input
stringlengths
2.65k
237k
output
stringclasses
1 value
( x ) to if x: converts while ( x ) to while x: """ statements = ('elif', 'for', 'if', 'while',) i = 0 while i < len(aList): if self.is_string_or_comment(aList, i): i = self.skip_string_or_comment(aList, i) elif any(self.match_word(aList, i, z) for z in statements): i = self.handle_keyword(aList, i) # elif ( # self.match_word(aList,i,"if") or # self.match_word(aList,i,"while") or # self.match_word(aList,i,"for") or # self.match_word(aList,i,"elif") # ): # i = self.handle_keyword(aList,i) else: i += 1 # print "handAllKeywords2:", ''.join(aList) #@+node:ekr.20150514063305.182: *7* handle_keyword def handle_keyword(self, aList, i): if self.match_word(aList, i, "if"): i += 2 elif self.match_word(aList, i, "elif"): i += 4 elif self.match_word(aList, i, "while"): i += 5 elif self.match_word(aList, i, "for"): i += 3 else: assert False, 'not a keyword' # Make sure one space follows the keyword. k = i i = self.skip_ws(aList, i) if k == i: c = aList[i] aList[i : i + 1] = [' ', c] i += 1 # Remove '(' and matching ')' and add a ':' if aList[i] == "(": # Look ahead. Don't remove if we span a line. j = self.skip_to_matching_bracket(aList, i) k = i found = False while k < j and not found: found = aList[k] == '\n' k += 1 if not found: j = self.removeMatchingBrackets(aList, i) if i < j < len(aList): ch = aList[j] aList[j : j + 1] = [ch, ":", " "] j = j + 2 return j return i #@+node:ekr.20150514063305.183: *6* mungeAllClasses def mungeAllClasses(self, aList): """Scan for a '{' at the top level that is preceeded by ')' """ i = 0 while i < len(aList): progress = i if self.is_string_or_comment(aList, i): i = self.skip_string_or_comment(aList, i) elif self.match_word(aList, i, 'class'): i1 = i i = self.skip_line(aList, i) aList[i - 1 : i] = list(f"{aList[i - 1]}:") s = ''.join(aList[i1:i]) k = s.find(' extends ') if k > -1: k1 = k k = g.skip_id(s, k + 1) k = g.skip_ws(s, k) if k < len(s) and g.is_c_id(s[k]): k2 = g.skip_id(s, k) word = s[k:k2] aList[i1:i] = list(f"{s[:k1]} ({word})") elif self.match_word(aList, i, 'interface'): aList[i : i + len('interface')] = list('class') i = self.skip_line(aList, i) aList[i - 1 : i] = list(f"{aList[i - 1]}: # interface") i = self.skip_line(aList, i) # Essential. else: i += 1 assert i > progress #@+node:ekr.20150514063305.184: *6* mungeAllFunctions & helpers def mungeAllFunctions(self, aList): """Scan for a '{' at the top level that is preceeded by ')' """ prevSemi = 0 # Previous semicolon: header contains all previous text i = 0 firstOpen = None while i < len(aList): progress = i if self.is_string_or_comment(aList, i): j = self.skip_string_or_comment(aList, i) prevSemi = j elif self.match(aList, i, '('): if not firstOpen: firstOpen = i j = i + 1 elif self.match(aList, i, ';'): j = i + 1 prevSemi = j elif self.match(aList, i, "{"): j = self.handlePossibleFunctionHeader( aList, i, prevSemi, firstOpen) prevSemi = j firstOpen = None # restart the scan else: j = i + 1 # Handle unusual cases. if j <= progress: j = progress + 1 assert j > progress i = j #@+node:ekr.20150514063305.185: *7* handlePossibleFunctionHeader def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen): """ converts function header lines from typescript format to python format. That is, converts x1..nn w::y ( t1 z1,..tn zn) { C++ (public|private|export) name (t1: z1, ... tn: zn { to def y (z1,..zn): { # (public|private|export) """ assert self.match(aList, i, "{") prevSemi = self.skip_ws_and_nl(aList, prevSemi) close = self.prevNonWsOrNlChar(aList, i) if close < 0 or aList[close] != ')': # Should not increase *Python* indent. return 1 + self.skip_to_matching_bracket(aList, i) if not firstOpen: return 1 + self.skip_to_matching_bracket(aList, i) close2 = self.skip_to_matching_bracket(aList, firstOpen) if close2 != close: return 1 + self.skip_to_matching_bracket(aList, i) open_paren = firstOpen assert aList[open_paren] == '(' head = aList[prevSemi:open_paren] # do nothing if the head starts with "if", "for" or "while" k = self.skip_ws(head, 0) if k >= len(head) or not head[k].isalpha(): return 1 + self.skip_to_matching_bracket(aList, i) kk = self.skip_past_word(head, k) if kk > k: headString = ''.join(head[k:kk]) # C keywords that might be followed by '{' # print "headString:", headString if headString in ["do", "for", "if", "struct", "switch", "while"]: return 1 + self.skip_to_matching_bracket(aList, i) args = aList[open_paren : close + 1] k = 1 + self.skip_to_matching_bracket(aList, i) body = aList[close + 1 : k] head = self.massageFunctionHead(head) args = self.massageFunctionArgs(args) body = self.massageFunctionBody(body) result = [] if head: result.extend(head) if args: result.extend(args) if body: result.extend(body) aList[prevSemi:k] = result return prevSemi + len(result) #@+node:ekr.20150514063305.186: *7* massageFunctionArgs def massageFunctionArgs(self, args): assert args[0] == '(' assert args[-1] == ')' result = ['('] lastWord = [] if self.class_name: for item in list("self,"): result.append(item) #can put extra comma i = 1 while i < len(args): i = self.skip_ws_and_nl(args, i) ch = args[i] if ch.isalpha(): j = self.skip_past_word(args, i) lastWord = args[i:j] i = j elif ch == ',' or ch == ')': for item in lastWord: result.append(item) if lastWord != [] and ch == ',': result.append(',') lastWord = [] i += 1 else: i += 1 if result[-1] == ',': del result[-1] result.append(')') result.append(':') return result #@+node:ekr.20150514063305.187: *7* massageFunctionHead (sets .class_name) def massageFunctionHead(self, head): result: List[Any] = [] prevWord = [] self.class_name = '' i = 0 while i < len(head): i = self.skip_ws_and_nl(head, i) if i < len(head) and head[i].isalpha(): result = [] j = self.skip_past_word(head, i) prevWord = head[i:j] i = j # look for ::word2 i = self.skip_ws(head, i) if self.match(head, i, "::"): # Set the global to the class name. self.class_name = ''.join(prevWord) # print(class name:", self.class_name) i = self.skip_ws(head, i + 2) if i < len(head) and (head[i] == '~' or head[i].isalpha()): j = self.skip_past_word(head, i) if head[i:j] == prevWord: result.extend('__init__') elif head[i] == '~' and head[i + 1 : j] == prevWord: result.extend('__del__') else: # result.extend(list('::')) result.extend(head[i:j]) i = j else: result.extend(prevWord) else: i += 1 finalResult = list("def ") finalResult.extend(result) return finalResult #@+node:ekr.20150514063305.188: *7* massageFunctionBody & helper def massageFunctionBody(self, body): # body = self.massageIvars(body) # body = self.removeCasts(body) # body = self.removeTypeNames(body) body = self.dedentBlocks(body) return body #@+node:ekr.20150514063305.189: *8* dedentBlocks def dedentBlocks(self, body): """ Look for '{' preceded by '{' or '}' or ';' (with intervening whitespace and comments). """ i = 0 while i < len(body): j = i ch = body[i] if self.is_string_or_comment(body, i): j = self.skip_string_or_comment(body, i) elif ch in '{};': # Look ahead ofr '{' j += 1 while True: k = j j = self.skip_ws_and_nl(body, j) if self.is_string_or_comment(body, j): j = self.skip_string_or_comment(body, j) if k == j: break assert k < j if self.match(body, j, '{'): k = j j = self.skip_to_matching_bracket(body, j) m = '# <Start dedented block>...' body[k : k + 1] = list(m) j += len(m) while k < j: progress = k if body[k] == '\n': k += 1 spaces = 0 while spaces < 4 and k < j: if body[k] == ' ': spaces += 1 k += 1 else: break if spaces > 0: del body[k - spaces : k] k -= spaces j -= spaces else: k += 1 assert progress < k m = ' # <End dedented block>' body[j : j + 1] = list(m) j += len(m) else: j = i + 1 # Defensive programming. if i == j: j += 1 assert i < j i = j return body #@-others #@-others c = self.c TS_To_Python(c).go() c.bodyWantsFocus() #@+node:ekr.20160321042444.1: *3* ccc.import-jupyter-notebook @cmd('import-jupyter-notebook') def importJupyterNotebook(self, event): """Prompt for a Jupyter (.ipynb) file and convert it to a Leo outline.""" try: import nbformat assert nbformat except ImportError: g.es_print('import-jupyter-notebook requires nbformat package') return from leo.plugins.importers.ipynb import Import_IPYNB # was @-others c = self.c x = Import_IPYNB(c) fn = x.get_file_name() if fn: p = c.lastTopLevel() root = p.insertAfter() root.h = fn x.import_file(fn, root) c.redraw(root) c.bodyWantsFocus() #@+node:ekr.20160321072007.1: *3* ccc.export-jupyter-notebook @cmd('export-jupyter-notebook') def exportJupyterNotebook(self, event): """Convert the present outline to a .ipynb file.""" from
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import sys import re import collections import copy __version__ = '1.5.1' class Pyasciigraph: def __init__(self, line_length=79, min_graph_length=50, separator_length=2, force_max_value=None, graphsymbol=None, multivalue=True, human_readable=None, float_format='{0:.0f}', titlebar='#' ): """Constructor of Pyasciigraph :param line_length: the max number of char on a line if any line cannot be shorter, it will go over this limit. Default: 79 :type line_length: int :param min_graph_length: the min number of char used by the graph itself. Default: 50 :type min_graph_length: int :param force_max_value: if provided, force a max value in order to graph each line with respect to it (only taking the actual max value if it is greater). :type: force_max_value: int :param separator_length: the length of field separator. Default: 2 :type separator_length: int :param graphsymbol: the symbol used for the graph bar. Default: '█' :type graphsymbol: str or unicode (length one) :param multivalue: displays all the values if multivalued when True. displays only the max value if False Default: True :type multivalue: boolean :param human_readable: trigger human readable display (K, G, etc) Default: None (raw value display) * 'si' for power of 1000 * 'cs' for power of 1024 * any other value for raw value display) :type human_readable: string (si, cs, none) :param float_format: formatting of the float value Default: '{0:.0f}' (convert to integers). expample: '{:,.2f}' (2 decimals, '.' to separate decimal and int, ',' every three power of tens). :param titlebar: sets the character(s) for the horizontal title bar Default: '#' :type titlebar: string """ self.line_length = line_length self.separator_length = separator_length self.min_graph_length = min_graph_length self.max_value = force_max_value self.float_format = float_format self.titlebar = titlebar if graphsymbol is None: self.graphsymbol = self._u('█') else: self.graphsymbol = graphsymbol if self._len_noansi(self.graphsymbol) != 1: raise Exception('Bad graphsymbol length, must be 1', self._len_noansi(self.graphsymbol)) self.multivalue = multivalue self.hsymbols = [self._u(''), self._u('K'), self._u('M'), self._u('G'), self._u('T'), self._u('P'), self._u('E'), self._u('Z'), self._u('Y')] if human_readable == 'si': self.divider = 1000 elif human_readable == 'cs': self.divider = 1024 else: self.divider = None @staticmethod def _len_noansi(string): l = len(re.sub('\x1b[^m]*m', '', string)) return l def _trans_hr(self, value): if self.divider is None: return self.float_format.format(value) vl = value for hs in self.hsymbols: new_val = vl / self.divider if new_val < 1: return self.float_format.format(vl) + hs else: vl = new_val return self.float_format.format(vl * self.divider) + hs @staticmethod def _u(x): """Unicode compat helper """ if sys.version < '3': return x + ''.decode("utf-8") else: return x @staticmethod def _color_string(string, color): """append color to a string + reset to white at the end of the string """ if color is None: return string else: return color + string + '\033[0m' def _get_thresholds(self, data): """get various info (min, max, width... etc) from the data to graph. """ all_thre = {} all_thre['value_max_length'] = 0 all_thre['info_max_length'] = 0 all_thre['max_pos_value'] = 0 all_thre['min_neg_value'] = 0 if self.max_value is not None: all_thre['max_pos_value'] = self.max_value # Iterate on all the items for (info, value, color) in data: totalvalue_len = 0 # If we have a list of values for the item if isinstance(value, collections.Iterable): icount = 0 maxvalue = 0 minvalue = 0 for (ivalue, icolor) in value: if ivalue < minvalue: minvalue = ivalue if ivalue > maxvalue: maxvalue = ivalue # if we are in multivalued mode, the value string is # the concatenation of the values, separeted by a ',', # len() must be computed on it # if we are not in multivalued mode, len() is just the # longer str(value) len ( /!\, value can be negative, # which means that it's not simply len(str(max_value))) if self.multivalue: totalvalue_len += len("," + self._trans_hr(ivalue)) else: totalvalue_len = max(totalvalue_len, len(self._trans_hr(ivalue))) if self.multivalue: # remove one comma if multivalues totalvalue_len = totalvalue_len - 1 # If the item only has one value else: totalvalue_len = len(self._trans_hr(value)) maxvalue = value minvalue = value if minvalue < all_thre['min_neg_value']: all_thre['min_neg_value'] = minvalue if maxvalue > all_thre['max_pos_value']: all_thre['max_pos_value'] = maxvalue if self._len_noansi(info) > all_thre['info_max_length']: all_thre['info_max_length'] = self._len_noansi(info) if totalvalue_len > all_thre['value_max_length']: all_thre['value_max_length'] = totalvalue_len return all_thre def _gen_graph_string( self, value, max_value, min_neg_value, graph_length, start_value_pos, color): """Generate the bar + its paddings (left and right) """ def _gen_graph_string_part( value, max_value, min_neg_value, graph_length, color): all_width = max_value + abs(min_neg_value) if all_width == 0: bar_width = 0 else: bar_width = int(abs(float(value)) * float(graph_length) / float(all_width)) return (Pyasciigraph._color_string( self.graphsymbol * bar_width, color), bar_width ) all_width = max_value + abs(min_neg_value) if all_width == 0: bar_width = 0 neg_width = 0 pos_width = 0 else: neg_width = int(abs(float(min_neg_value)) * float(graph_length) / float(all_width)) pos_width = int(abs(max_value) * graph_length / all_width) if isinstance(value, collections.Iterable): accuvalue = 0 totalstring = "" totalsquares = 0 sortedvalue = copy.deepcopy(value) sortedvalue.sort(reverse=False, key=lambda tup: tup[0]) pos_value = [x for x in sortedvalue if x[0] >= 0] neg_value = [x for x in sortedvalue if x[0] < 0] # for the negative values, we build the bar + padding from 0 to the left for i in reversed(neg_value): ivalue = i[0] icolor = i[1] scaled_value = ivalue - accuvalue (partstr, squares) = _gen_graph_string_part( scaled_value, max_value, min_neg_value, graph_length, icolor) totalstring = partstr + totalstring totalsquares += squares accuvalue += scaled_value # left padding totalstring = Pyasciigraph._u(' ') * (neg_width - abs(totalsquares)) + totalstring # reset some counters accuvalue = 0 totalsquares = 0 # for the positive values we build the bar from 0 to the right for i in pos_value: ivalue = i[0] icolor = i[1] scaled_value = ivalue - accuvalue (partstr, squares) = _gen_graph_string_part( scaled_value, max_value, min_neg_value, graph_length, icolor) totalstring += partstr totalsquares += squares accuvalue += scaled_value # right padding totalstring += Pyasciigraph._u(' ') * (start_value_pos - neg_width - abs(totalsquares)) return totalstring else: # handling for single value item (partstr, squares) = _gen_graph_string_part( value, max_value, min_neg_value, graph_length, color) if value >= 0: return Pyasciigraph._u(' ') * neg_width + \ partstr + \ Pyasciigraph._u(' ') * (start_value_pos - (neg_width + squares)) else: return Pyasciigraph._u(' ') * (neg_width - squares) +\ partstr +\ Pyasciigraph._u(' ') * (start_value_pos - neg_width) def _gen_info_string(self, info, start_info_pos, line_length): """Generate the info string + padding """ number_of_space = (line_length - start_info_pos - self._len_noansi(info)) return info + Pyasciigraph._u(' ') * number_of_space def _gen_value_string(self, value, min_neg_value, color, start_value_pos, start_info_pos): """Generate the value string + padding """ icount = 0 if isinstance(value, collections.Iterable) and self.multivalue: for (ivalue, icolor) in value: if icount == 0: # total_len is needed because the color characters count # with the len() function even when they are not printed to # the screen. totalvalue_len = len(self._trans_hr(ivalue)) totalvalue = Pyasciigraph._color_string( self._trans_hr(ivalue), icolor) else: totalvalue_len += len("," + self._trans_hr(ivalue)) totalvalue += "," + \ Pyasciigraph._color_string( self._trans_hr(ivalue), icolor) icount += 1 elif isinstance(value, collections.Iterable): max_value = min_neg_value color = None for (ivalue, icolor) in value: if ivalue > max_value: max_value = ivalue color = icolor totalvalue_len = len(self._trans_hr(max_value)) totalvalue = Pyasciigraph._color_string( self._trans_hr(max_value), color) else: totalvalue_len = len(self._trans_hr(value)) totalvalue = Pyasciigraph._color_string( self._trans_hr(value), color) number_space = start_info_pos -\ start_value_pos -\ totalvalue_len -\ self.separator_length # This must not be negitive, this happens when the string length is # larger than the separator length if number_space < 0: number_space = 0 return ' ' * number_space + totalvalue +\ ' ' * \ ((start_info_pos - start_value_pos - totalvalue_len) - number_space) def _sanitize_string(self, string): """try to convert strings to UTF-8 """ # get the type of a unicode string unicode_type = type(Pyasciigraph._u('t')) input_type = type(string) if input_type is str: if sys.version < '3': info = unicode(string) else: info = string elif input_type is unicode_type: info = string elif input_type is int or input_type is float: if sys.version < '3': info = unicode(string) else: info = str(string) else: info = str(string) return info def _sanitize_value(self, value): """try to values to UTF-8 """ if isinstance(value, collections.Iterable): newcollection = [] for i in value: if len(i) == 1: newcollection.append((i[0], None)) elif len(i) >= 2: newcollection.append((i[0], i[1])) return newcollection else: return value def _sanitize_data(self, data): ret = [] for item in data: if
= 6 ) ), 'polar2014': dict( radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2015': dict( radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2016': dict( radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2017': dict( radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2018': dict( radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'showlegend': False, "height": 1200, "width": 16800, "autosize": False, "title": Site + recommendationName + 'Completeness 2005-2018' } # create a description of the placement of each subplot layout2 = { 'polar2005': dict( domain = dict( x = [0, 1], y = [.96, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2006': dict( domain = dict( x = [0, 1], y = [0.89, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2007': dict( domain = dict( x = [0, 1], y = [0.818, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2008': dict( domain = dict( x = [0, 1], y = [0.746, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2009': dict( domain = dict( x = [0, 1], y = [0.675, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2010': dict( domain = dict( x = [0, 1], y = [0.603, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2011': dict( domain = dict( x = [0, 1], y = [0.531, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2012': dict( domain = dict( x = [0, 1], y = [0.460, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2013': dict( domain = dict( x = [0, 1], y = [0.388, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2014': dict( domain = dict( x = [0, 1], y = [0.317, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2015': dict( domain = dict( x = [0, 1], y = [0.245, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2016': dict( domain = dict( x = [0, 1], y = [0.174, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2017': dict( domain = dict( x = [0, 1], y = [0.103, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'polar2018': dict( domain = dict( x = [0, 1], y = [0.029, 1] ), radialaxis = dict( angle = 0 ), angularaxis = dict( direction = "clockwise", period = 6 ) ), 'showlegend': False, "height": 32700, "width": 1200, "autosize": False } fig2 = {'data':data,'layout':layout2} pio.write_image(fig2, os.path.join('..','data', recommendationName, Site + recommendationName + '_bigPict_.png')) fig = {'data':data,'layout':layout} pio.write_image(fig, os.path.join('..','data', recommendationName, Site + '_' + recommendationName + '_.png')) crop(os.path.join('..','data', recommendationName, Site+ recommendationName + '_bigPict_.png'), (0, 0, 1200, 16600), os.path.join('..','data', recommendationName, Site+ recommendationName + '_bigPicture_.png')) os.remove(os.path.join('..','data', recommendationName, Site+ recommendationName + '_bigPict_.png')) def CombineAppliedRecommendation(Site, recElements, recommendationName, RecommendationOccurrenceToCombine, RecommendationcountsToCombine=None): # places for all the combined data RecommendationOccurrence = os.path.join("..", "data", recommendationName, "combinedCollections" + '_' + recommendationName + 'Occurrence.csv') RecommendationConcept = os.path.join('..','data', recommendationName, "combinedCollections" + '_' + recommendationName + 'Completeness.csv') #RecommendationGraph = os.path.join('..','data', recommendationName, "combinedCollections" + '_' + recommendationName + '_.png') if RecommendationcountsToCombine is not None: RecommendationCounts = os.path.join("..", "data", recommendationName, Site + '_' + recommendationName + 'Counts.csv') CombineXPathCounts(RecommendationcountsToCombine, RecommendationCounts) # combine xpathoccurrence from a specfic site for each year RecommendationCountsDF = pd.read_csv(RecommendationCounts) CombineXPathOccurrence(RecommendationOccurrenceToCombine, RecommendationOccurrence, to_csv=True) RecommendationOccurrenceDF = pd.read_csv(RecommendationOccurrence) # change order of rows to be meaningful for recommendation CollectionRecRows = [] CollectionRecRows.append(["Number of Records"]) CollectionRecColumns = [] CollectionRecColumns.append(["Collection","Record"]) for element in recElements: # find the rows that match each element CollectionElements = list(RecommendationOccurrenceDF['XPath'])[1:] matchingElements = [CollectionElement for CollectionElement in CollectionElements if element in CollectionElement] #append the list to a master list that will be used to order the chart CollectionRecRows.append(matchingElements) CollectionRecColumns.append(matchingElements) CollectionRecRows = [item for sublist in CollectionRecRows for item in sublist] CollectionRecColumns = [item for sublist in CollectionRecColumns for item in sublist] from collections import OrderedDict CollectionRecRows = list(OrderedDict.fromkeys(CollectionRecRows)) CollectionRecColumns = list(OrderedDict.fromkeys(CollectionRecColumns)) if RecommendationcountsToCombine is not None: RecommendationCountsDF = RecommendationCountsDF[CollectionRecColumns] # change order of rows to be meaningful for recommendation RecommendationOccurrenceDF = RecommendationOccurrenceDF.set_index('XPath') RecommendationOccurrenceDF = RecommendationOccurrenceDF.loc[CollectionRecRows] RecommendationOccurrenceDF = RecommendationOccurrenceDF.reset_index() # write over the previous csv RecommendationOccurrenceDF.to_csv(RecommendationOccurrence, index=False, mode='w') def Collection_ConceptAnalysis(Site, recommendationName, RecDict, LevelOrder, ConceptOrder, ElementOrder, YearsInvestigated): recMD = ['RecConcept', 'RecLevel', 'RecElement'] # use a sites recommendation elements occurrence table, and add some columns for metadata about the recommendation recOccurDF = pd.read_csv(os.path.join("..","data", recommendationName, "combinedCollections"+"_" + recommendationName + "Occurrence.csv")) recOccurDF.insert(0, "RecElement", 0, allow_duplicates=False) recOccurDF.insert(0, "RecLevel", 0, allow_duplicates=False) recOccurDF.insert(0, "RecConcept", '', allow_duplicates=False) ''' use the RecDict to look at the XPath column and for each key that matches part of any cell, write the value into the same row in the recOccurDF ''' recOccurDF['RecElement'] = recOccurDF['XPath'].apply(lambda x: [value for key, value in RecDict.items() if key in x][0] ) # create a list to order the columns with columnOrder = list(recOccurDF) # don't need xpaths any more columnOrder.remove('XPath') ''' create a pivot table that leverages the dataframe recOccurDF's column for recommendation elements and assigns the highest percentage any of the xpaths of the child elements to that row for a particular year ''' radarElements = pd.pivot_table(recOccurDF, index='RecElement', columns=None, aggfunc='max').reindex(ElementOrder).reset_index() radarElements = radarElements[columnOrder] # fill in the metadata about concepts and recommendation levels radarElements['RecConcept'] = pd.Series(ConceptOrder) radarElements['RecLevel'] = pd.Series(LevelOrder) radarElements = radarElements.fillna(value=0.0) lineConcepts = radarElements # remove the site name from the column #radarElements = radarElements.rename(columns={col: col.split('__')[-1] for col in radarElements.columns}) # create recommendation concept csv #lineConcepts = lineConcepts.drop(['RecElement','RecLevel'], axis=1) lineConcepts.loc[-1] = lineConcepts.iloc[1:,:].mean(axis=0, numeric_only=True) lineConcepts.index = lineConcepts.index + 1 # shifting index lineConcepts.fillna('Average Completeness', inplace=True) lineConcepts = lineConcepts.sort_index() lineConcepts.to_csv(os.path.join('..','data', recommendationName, "CombinedCollections"+'_' + recommendationName + 'Complete.csv'), index=False) # remove the site name from the column lineConcepts = lineConcepts.rename(columns={col: col.split('__')[-1] for col in lineConcepts.columns}) lineConcepts.to_csv(os.path.join('..','data', recommendationName, "CombinedCollections"+'_' + recommendationName + 'Completeness.csv'), index=False) # create new version of concept occurrence table radarList = list(radarElements) difference = list(set(YearsInvestigated) - set(radarList[3:])) for year in difference: radarElements.insert(0, year, 0, allow_duplicates=False) RecOccurDFcols = recMD + YearsInvestigated radarElements = radarElements[RecOccurDFcols] ''' Take the occurrence of the conceptual LTER elements from each site's pivot table and plot each years output on a radar chart of 0 to 1 with each RecElement as an axis and the occurrence of records the percentage of color along that axis. ''' # create a structure to add data to. colorList = ['#1f77b4','#ff7f0e','#2ca02c','#d62728','#9467bd', '#8c564b','#e377c2','#7f7f7f','#bcbd22','#17becf'] count = 0 # add the data from each year to a subplot. for year in YearsInvestigated: data = [go.Scatterpolar( name = year, mode = 'lines', r = radarElements[year].tolist()[1:], theta = radarElements['RecElement'].tolist()[1:], line = dict(width = 10, color = colorList[count]), opacity = .75, fill = 'tonext', fillcolor = colorList[count], connectgaps = False, subplot = 'polar', text =
#!/usr/bin/env python # -*- python -*- # <NAME> <<EMAIL>> # Code generation support: emitting files, emitting functions, etc. #BEGIN_LEGAL # #Copyright (c) 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #END_LEGAL import sys import os import re import types import glob from genutil import * def find_dir(d): dir = os.getcwd() last = '' while dir != last: target_dir = os.path.join(dir,d) #print "Trying %s" % (target_dir) if os.path.exists(target_dir): return target_dir last = dir (dir,tail) = os.path.split(dir) return None sys.path.append(find_dir('mbuild')) try: import mbuild except: sys.stderr.write("\nERROR(file: codegen.py): Could not find mbuild. Might try setting PYTHONPATH env var.\n\n") sys.exit(1) class ip_header_t(object): """Intellectual property headers""" def __init__(self): self.lines = None def read_header(self,fn): ##msge("Attempting to open: " + fn) fp = base_open_file(fn,"r") self.lines = fp.readlines() def emit_header(self, shell_type = False): eol = '\n' out = [] if shell_type: out.append("#BEGIN_LEGAL" + eol) for line in self.lines: out.append("#" + line) out.append("#END_LEGAL" + eol) else: out.append("/*BEGIN_LEGAL" + eol) out.extend(self.lines) out.append("END_LEGAL */" + eol) return out class file_emitter_t(object): """Attach IP headers, standard includes, and namespace decorations to generated files. This replaces the file objects I was using for emitting files.""" header_file_name_pattern = re.compile(r'.[hH]$') # note: in the following the '-' must be last or it will (try to) act like a range! header_guard_pattern = re.compile(r'[./-]') def __init__(self,gendir, file_name, shell_file=False, namespace=None): """gendir is the output dir. If shell_file is True, we delimit the header differently.""" self.file_name = file_name self.gendir = gendir self.namespace = namespace # True for shell-like files, False for C++ files. Determines the comment syntax self.shell_file = shell_file self.lines = [] self.full_file_name = mbuild.join(self.gendir, self.file_name) self.eol = '\n' self.closed = False self.header = False if file_emitter_t.header_file_name_pattern.search(self.file_name): self.header = True self.headers = [] self.system_headers = [] self.misc_header = [] def add_header(self,h): """Add h to the list of headers""" if type(h) == types.ListType: self.headers.extend(h) else: self.headers.append(h) def add_system_header(self,h): """Add h to the list of system headers""" if type(h) == types.ListType: self.system_headers.extend(h) else: self.system_headers.append(h) def add_misc_header(self,h): if type(h) == types.ListType: self.misc_header.extend(h) else: self.misc_header.append(h) def replace_headers(self,h): """Replace the existing headers with the header h""" if type(h) == types.ListType: self.headers = h else: self.headers = [h] def start(self): """Call this this after creating the objectd""" self.emit_header() if not self.shell_file: self.system_headers_emit() self.user_headers_emit() self.misc_headers_emit() self.namespace_start() def count_lines(self): return len(self.lines) def write(self,str): """Replaces the file pointer write() function call""" self.lines.append(str) def writelines(self,list_of_str): """Replaces the file pointer writelines() function call""" self.lines.extend(list_of_str) def add_code(self,str): """Add a line and newline""" self.write(str+'\n') def add_code_eol(self,str): """Add a line with semicolon, newline""" self.add_code(str+';') def close(self): if not self.closed: self.closed = True if not self.shell_file: self.namespace_end() if self.header: self.emit_header_guard_end() self.emit_file() del self.lines else: msge("FE: Closing an already-closed file: " + self.full_file_name) def emit_file(self): msge("FE:EMIT_FILE " + self.full_file_name) fp = self.open_file(self.full_file_name,"w") fp.writelines(self.lines) fp.close() # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # def open_file(self,fn,rw): fp = base_open_file(fn,rw) return fp def emit_ip_header(self, ip_header_file_name): iph = ip_header_t() iph.read_header(ip_header_file_name) s = iph.emit_header(self.shell_file) return s def emit_header(self): self.dox('@file ' + self.file_name) self.emit() self.cmt('This file was automatically generated.') self.cmt('Do not edit this file.') self.emit() if self.header: self.emit_header_guard_start() def emit_header_guard_start(self): s = file_emitter_t.header_guard_pattern.sub('_',self.file_name) defname = '_%s_' % s.upper() self.emit_eol('#if !defined(' + defname + ')') self.emit_eol('# define ' + defname ) def emit_header_guard_end(self): self.emit_eol("#endif") def dox(self,s): if self.shell_file: self.emit_eol('# '+ s) else: self.emit_eol('/// '+ s) def cmt(self,s): if self.shell_file: self.emit_eol('# '+ s) else: self.emit_eol('// '+ s) def emit(self,s='\n'): self.lines.append(s) def emit_eol(self,s=''): self.emit(s + '\n') def user_headers_emit(self): for h in self.headers: self.emit_eol('#include \"%s\"' % h ) def system_headers_emit(self): for h in self.system_headers: self.emit_eol('#include <%s>' % h ) def misc_headers_emit(self): for h in self.misc_header: self.emit_eol(h) def namespace_start(self): if self.namespace: self.emit_eol( ''.join(['namespace ' , self.namespace , ' {'])) def namespace_end(self): if self.namespace: msge("FE:NAMESPACE " + self.full_file_name) self.emit_eol( '} // namespace') class xed_file_emitter_t(file_emitter_t): """Attach IP headers, standard includes, and namespace decorations to generated files. This replaces the file objects I was using for emitting files.""" def __init__(self, xeddir, gendir, file_name, shell_file=False, namespace=None, is_private=True): file_emitter_t.__init__( self,gendir, file_name, shell_file, namespace) self.xeddir = xeddir if is_private: self.headers.append('xed-internal-header.h') def start(self): """override the parent's start() function to apply the IP header.""" self.emit_header() ip_header_file_name = mbuild.join(self.xeddir, 'misc', 'apache-header.txt') for line in self.emit_ip_header(ip_header_file_name): self.emit(line) if not self.shell_file: self.system_headers_emit() self.user_headers_emit() self.misc_headers_emit() self.namespace_start() #inline_string = "inline" inline_sring = "XED_INLINE" class function_object_t(object): inline_string = "XED_INLINE" def __init__(self,name, return_type='xed_bool_t', static=False, inline=False, doxgroup=None, force_no_inline=False, dll_export=False): self.function_name = name self.doxgroup=doxgroup self.return_type = return_type self.static=static self.inline=inline self.dll_export = dll_export self.body = [] self.args = [] self.const_member = False self.ref_return = False self.force_no_inline = force_no_inline def set_function_name(self,fname): self.function_name = fname def lines(self): return len(self.body) def add_arg(self, arg): self.args.append(arg) def get_arg_num(self): return len(self.args) def add_comment(self,s): self.body.append(''.join(['/* ' , s , ' */'])) def set_const_member(self): self.const_member = True def set_ref_return(self): self.ref_return = True def add_code(self, line): self.body.append(line) def add_code_eol(self, line): self.body.append(line + ';') def add_lines(self, lines): self.body.extend(lines) def emit_header_internal(self, class_qualfier='', emit_group=False): """private function that emits the function name and args, but no newline or semicolon""" s = [] if emit_group and self.doxgroup: s.append("/// @ingroup %s\n" % (self.doxgroup)) if self.static: s.append("static ") if self.inline: s.append( function_object_t.inline_string + " ") if self.force_no_inline: s.append('XED_NOINLINE ') if self.dll_export: s.append('XED_DLL_EXPORT ') s.append(self.return_type) if self.ref_return: s.append('&') s.append(' ') s.append(class_qualfier) s.append(self.function_name) s.append('(') first_arg = True for arg in self.args: if first_arg: first_arg = False else: s.append(', ') s.append(arg) if first_arg: s.append('void') s.append(')') if self.const_member: s.append(' const') return ''.join(s) def emit_header(self): 'emit the header with the semicolon and newline' s = [ self.emit_header_internal(emit_group=True), ";\n" ] return ''.join(s) def emit(self, class_qualfier=''): 'emit the function body' eol = '\n' s = [ self.emit_header_internal(class_qualfier) , eol ] s.append('{') s.append(eol) for bline in self.body: s.extend([ ' ' , bline , eol]) s.append('}') s.append(eol) return ''.join(s) def emit_file_emitter(self, fe, class_qualfier=''): 'emit the function body' fe.add_code(self.emit_header_internal(class_qualfier)) fe.add_code('{') for bline in self.body: fe.add_code(bline) fe.add_code('}') def emit_body(self): 'emit function body as string' return '\n'.join([bline + ';' for bline in self.body]) ############################################################ def dump_flist_2_header(h_file, functions, headers, emit_headers=True, emit_bodies=True): ''' emits the list of functions objects to a header file @type: functions: list of function_object_t @param functions: the function to emit @type: h_file: xed_file_emitter_t @param h_file: emmiting the function to this headr file @type: headers: list of strings @param headers: include headers to emit ''' for header in headers: h_file.add_header(header) h_file.start() if emit_headers: for fo in functions: decl = fo.emit_header() h_file.add_code(decl) if emit_bodies: for fo in functions: fo.emit_file_emitter(h_file) h_file.close() def emit_function_list(func_list, fn_prefix, xeddir, gendir, hgendir, namespace=None, other_headers=[]): """Emit a list of functions to a numbered sequence of files. Breaking them up when the files get too big. @type func_list: list of function_object_t objects @param func_list: functions to emit @type fn_prefix: string @param fn_prefix: basis for the output file names. @type xeddir: string @param xeddir: location of the source directory so that we can find the legal header @type gendir: string @param gendir: directory where the output files go. @type hgendir: string @param hgendir: directory where the output hdr files go. @type namespace: string @param namespace: defaults to XED """ file_number = 0; max_lines_per_file = 3000 fe = None fn_header = "%s.h" % (fn_prefix) fe_list = [] fe_header = xed_file_emitter_t(xeddir,hgendir,fn_header,shell_file=False,namespace=namespace) fe_header.start() fe_list.append(fe_header) # remove any numbered files that we previously emitted. We won't # necessarily overwrite them all each build and do not want # stale files remaining from previous builds for fn in glob.glob(mbuild.join(gendir, fn_prefix + '-[0-9]*.c')): mbuild.remove_file(fn) for func in func_list: fe_header.write(func.emit_header()) if not fe or fe.count_lines() + func.lines() >= max_lines_per_file: if fe: fe.close() fn = "%s-%d.c" % (fn_prefix, file_number) fe = xed_file_emitter_t(xeddir,gendir, fn, shell_file=False, namespace=namespace) fe.add_header(fn_header) for header in other_headers: fe.add_header(header) fe.start() fe_list.append(fe) file_number += 1 func.emit_file_emitter(fe) fe.close() fe_header.close()
** 2) / grid.rho channel = grid.new_channel_to(instance.grid) channel.copy() instance.stopping_conditions.number_of_steps_detection.enable() ybound = instance.get_boundary_grid('ybound1') self.assertEquals(ybound.shape, (4+8,4,1)) memybound = ybound.copy() memybound.rho = 0.02 | density memybound.rhovx = 0.0 | momentum memybound.rhovy = 0.2 | momentum memybound.rhovz = 0.0 | momentum memybound.energy = p / (instance.parameters.gamma - 1) memybound.energy += 0.5 * (memybound.rhovx ** 2 + memybound.rhovy ** 2 + memybound.rhovz ** 2) / memybound.rho channel = memybound.new_channel_to(ybound) channel.copy() instance.evolve_model(1.0 | generic_unit_system.time) print instance.stopping_conditions.number_of_steps_detection.is_set() print instance.grid.rho rho = instance.grid.rho[0,...,0] self.assertAlmostRelativeEquals(rho[-1], 0.01 | density) self.assertTrue(rho[0] > 0.01 | density) self.assertTrue(instance.grid.rhovy[0,0,0] > 0.1 | momentum) self.assertAlmostRelativeEquals(instance.grid.rhovy[0,-1,0] , 0.1 | momentum) print instance.model_time instance.stopping_conditions.number_of_steps_detection.disable() instance.evolve_model(1.0 | generic_unit_system.time) rho = instance.grid.rho[0,...,0] self.assertAlmostRelativeEquals(rho, 0.02 | density, 8) self.assertAlmostRelativeEquals(instance.grid.rhovy[0,...,0], 0.2 | momentum, 8) print instance.model_time instance.stop() def test19(self): instance=self.new_instance(Athena, number_of_workers = 1) instance.set_parallel_decomposition(1,1,1) instance.parameters.mesh_size = (4,5,6) instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length instance.parameters.x_boundary_conditions = ("periodic", "periodic") instance.parameters.y_boundary_conditions = ("periodic", "periodic") instance.parameters.z_boundary_conditions = ("interface", "outflow") instance.parameters.stopping_conditions_number_of_steps = 1 grid = datamodel.new_regular_grid((4,5,6), [1.0, 1.0, 1.0] | generic_unit_system.length ) density = generic_unit_system.density momentum = generic_unit_system.speed * generic_unit_system.density energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length) grid.rho = 0.01 | density grid.rhovx = 0.0 | momentum grid.rhovy = 0.0 | momentum grid.rhovz = 0.1 | momentum p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2)) grid.energy = p / (instance.parameters.gamma - 1) grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho channel = grid.new_channel_to(instance.grid) channel.copy() instance.stopping_conditions.number_of_steps_detection.enable() zbound = instance.get_boundary_grid('zbound1') self.assertEquals(zbound.shape, (4+8,5+8,4)) memzbound = zbound.copy() memzbound.rho = 0.02 | density memzbound.rhovx = 0.0 | momentum memzbound.rhovy = 0.0 | momentum memzbound.rhovz = 0.2 | momentum memzbound.energy = p / (instance.parameters.gamma - 1) memzbound.energy += 0.5 * (memzbound.rhovx ** 2 + memzbound.rhovy ** 2 + memzbound.rhovz ** 2) / memzbound.rho channel = memzbound.new_channel_to(zbound) channel.copy() instance.evolve_model(1.0 | generic_unit_system.time) rho = instance.grid.rho[0,0,...] self.assertAlmostRelativeEquals(rho[-1], 0.01 | density) self.assertTrue(rho[0] > 0.01 | density) self.assertTrue(instance.grid.rhovz[0,0,0] > 0.1 | momentum) self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,-1] , 0.1 | momentum) print instance.model_time instance.stopping_conditions.number_of_steps_detection.disable() instance.evolve_model(1.0 | generic_unit_system.time) rho = instance.grid.rho[0,...,0] self.assertAlmostRelativeEquals(rho, 0.02 | density, 8) self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,...], 0.2 | momentum, 8) print instance.model_time instance.stop() def test20(self): instance=self.new_instance(Athena, number_of_workers = 4) instance.parameters.parallel_decomposition = (2,2,1) instance.parameters.mesh_size = (4,5,6) instance.parameters.mesh_length = [1.0, 1.0, 1.0] | generic_unit_system.length instance.parameters.x_boundary_conditions = ("periodic", "periodic") instance.parameters.y_boundary_conditions = ("periodic", "periodic") instance.parameters.z_boundary_conditions = ("outflow", "interface") instance.parameters.stopping_conditions_number_of_steps = 1 grid = datamodel.new_regular_grid((4,5,6), [1.0, 1.0, 1.0] | generic_unit_system.length ) density = generic_unit_system.density momentum = generic_unit_system.speed * generic_unit_system.density energy = generic_unit_system.mass / ((generic_unit_system.time**2) * generic_unit_system.length) grid.rho = 0.01 | density grid.rhovx = 0.0 | momentum grid.rhovy = 0.0 | momentum grid.rhovz = -0.1 | momentum p = 1.0 | (generic_unit_system.mass / (generic_unit_system.length * generic_unit_system.time**2)) grid.energy = p / (instance.parameters.gamma - 1) grid.energy += 0.5 * (grid.rhovx ** 2 + grid.rhovy ** 2 + grid.rhovz ** 2) / grid.rho channel = grid.new_channel_to(instance.grid) channel.copy() instance.stopping_conditions.number_of_steps_detection.enable() zbound = instance.get_boundary_grid('zbound2') self.assertEquals(zbound.shape, (4+8,5+8,4)) memzbound = zbound.copy() memzbound.rho = 0.02 | density memzbound.rhovx = 0.0 | momentum memzbound.rhovy = 0.0 | momentum memzbound.rhovz = -0.2 | momentum memzbound.energy = p / (instance.parameters.gamma - 1) memzbound.energy += 0.5 * (memzbound.rhovx ** 2 + memzbound.rhovy ** 2 + memzbound.rhovz ** 2) / memzbound.rho channel = memzbound.new_channel_to(zbound) channel.copy() instance.evolve_model(1.0 | generic_unit_system.time) rho = instance.grid.rho[0,0,...] self.assertAlmostRelativeEquals(rho[0], 0.01 | density) self.assertTrue(rho[-1] > 0.01 | density) self.assertTrue(instance.grid.rhovz[0,0,-1] < -0.1 | momentum) self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,0] , -0.1 | momentum) print instance.model_time instance.stopping_conditions.number_of_steps_detection.disable() instance.evolve_model(1.0 | generic_unit_system.time) rho = instance.grid.rho[0,...,0] self.assertAlmostRelativeEquals(rho, 0.02 | density, 8) self.assertAlmostRelativeEquals(instance.grid.rhovz[0,0,...], -0.2 | momentum, 8) print instance.model_time instance.stop() def test21(self): instance=self.new_instance(Athena) instance.parameters.x_boundary_conditions = ("periodic","periodic") instance.parameters.mesh_length = (20.0, 1, 1) | generic_unit_system.length instance.parameters.mesh_size = (20, 1, 1) for x in instance.itergrids(): inmem = x.copy() inmem.rho = inmem.x/(1| generic_unit_system.length) | generic_unit_system.density inmem.rhovx = 0.0 | generic_unit_system.momentum_density inmem.energy = 1.0 | generic_unit_system.energy_density from_model_to_code = inmem.new_channel_to(x) from_model_to_code.copy() print inmem.rho rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.0| generic_unit_system.length,0.0| generic_unit_system.length) self.assertEquals(rho , 0.5 | generic_unit_system.density) for value in numpy.arange(0.5, 19.6, 0.1): rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( value | generic_unit_system.length, 0.0 | generic_unit_system.length, 0.0 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density) for value in numpy.arange(0.0, 0.6, 0.1): rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( value | generic_unit_system.length, 0.0 | generic_unit_system.length, 0.0 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , ((0.5 + value) * 0.5 + (0.5-value) * 19.5) | generic_unit_system.density) for value in numpy.arange(0.0, 0.5, 0.1): rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( value + 19.5| generic_unit_system.length, 0.0 | generic_unit_system.length, 0.0 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , (19.5 - (value * 19)) | generic_unit_system.density, 9) # out of range rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( 20.0| generic_unit_system.length, 0.0 | generic_unit_system.length, 0.0 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , 0.0 | generic_unit_system.density, 9) def test22(self): instance=self.new_instance(Athena, number_of_workers=2) instance.parameters.x_boundary_conditions = ("periodic","periodic") instance.parameters.y_boundary_conditions = ("periodic","periodic") instance.parameters.mesh_length = (20.0, 20.0, 1) | generic_unit_system.length instance.parameters.mesh_size = (20, 20, 1) for x in instance.itergrids(): inmem = x.copy() inmem.rho = (inmem.x + ((inmem.y - (0.5| generic_unit_system.length))* 20.0))/(1| generic_unit_system.length) | generic_unit_system.density inmem.rhovx = 0.0 | generic_unit_system.momentum_density inmem.energy = 1.0 | generic_unit_system.energy_density from_model_to_code = inmem.new_channel_to(x) from_model_to_code.copy() print inmem.rho[0], inmem.y[0], inmem.x[0] rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.5| generic_unit_system.length,0.0| generic_unit_system.length) self.assertEquals(rho , 0.5 | generic_unit_system.density) for value in numpy.arange(0.5, 19.6, 0.1): rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( value | generic_unit_system.length, 0.5 | generic_unit_system.length, 0.0 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density) for x in numpy.arange(8.5, 11.5, 0.25): for y in numpy.arange(0.5, 19.6, 0.25): rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( x | generic_unit_system.length, y | generic_unit_system.length, 0.0 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , x + (20 * (y-0.5)) | generic_unit_system.density) def test23(self): instance=self.new_instance(Athena, number_of_workers=3) instance.parameters.x_boundary_conditions = ("periodic","periodic") instance.parameters.y_boundary_conditions = ("periodic","periodic") instance.parameters.z_boundary_conditions = ("periodic","periodic") instance.parameters.mesh_length = (20.0, 20.0, 20.0) | generic_unit_system.length instance.parameters.mesh_length = (20.0, 20.0, 20.0) | generic_unit_system.length instance.parameters.mesh_size = (20, 20, 20) for x in instance.itergrids(): inmem = x.copy() inmem.rho = ( ( inmem.x + ((inmem.y - (0.5| generic_unit_system.length))* 20.0) + ((inmem.z - (0.5| generic_unit_system.length))* 400.0) ) /(1| generic_unit_system.length) | generic_unit_system.density ) inmem.rhovx = 0.0 | generic_unit_system.momentum_density inmem.energy = 1.0 | generic_unit_system.energy_density from_model_to_code = inmem.new_channel_to(x) from_model_to_code.copy() rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point(0.5| generic_unit_system.length,0.5| generic_unit_system.length,0.5| generic_unit_system.length) self.assertEquals(rho , 0.5 | generic_unit_system.density) for value in numpy.arange(0.5, 19.6, 0.1): rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( value | generic_unit_system.length, 0.5 | generic_unit_system.length, 0.5 | generic_unit_system.length ) self.assertAlmostRelativeEquals(rho , value | generic_unit_system.density) sample = sample = datamodel.new_regular_grid( (4, 4, 76), (2, 2, 19) | generic_unit_system.length ) sample.x += 9.5 | generic_unit_system.length sample.y += 9.5 | generic_unit_system.length sample.z += 0.5 | generic_unit_system.length x = sample.x.flatten() y = sample.y.flatten() z = sample.z.flatten() rho, rhovx, rhovy, rhovx, rhoenergy = instance.get_hydro_state_at_point( x, y, z ) half = 0.5 | generic_unit_system.length self.assertAlmostRelativeEquals(rho , (x + (20 * (y-half)) + (400 * (z-half)))/(1| generic_unit_system.length) | generic_unit_system.density ) def test24(self): instance=self.new_instance(Athena, number_of_workers=1) instance.parameters.x_boundary_conditions = ("periodic","periodic") instance.parameters.y_boundary_conditions = ("periodic","periodic") instance.parameters.z_boundary_conditions = ("periodic","periodic") instance.parameters.mesh_length = (10.0, 1, 1) | generic_unit_system.length instance.parameters.mesh_size = (20, 1, 1) instance.set_has_external_gravitational_potential(1) instance.commit_parameters() potential_grid = instance.potential_grid factor = (2 | generic_unit_system.length / generic_unit_system.time**2) potential_grid.potential = potential_grid.x * factor x = numpy.arange(0,10.25, 0.1) | generic_unit_system.length y = 0.5 |generic_unit_system.length z = 0.5 |generic_unit_system.length interpolated = instance.get_interpolated_gravitational_potential(x,y,z) self.assertAlmostRelativeEquals(interpolated, x * factor) def test25(self): instance=self.new_instance(Athena, number_of_workers=1) instance.parameters.x_boundary_conditions = ("periodic","periodic") instance.parameters.y_boundary_conditions = ("periodic","periodic") instance.parameters.z_boundary_conditions = ("periodic","periodic") instance.parameters.mesh_length = (5.0, 10.0, 1) | generic_unit_system.length instance.parameters.mesh_size = (20, 20, 1) instance.set_has_external_gravitational_potential(1) instance.commit_parameters() potential_grid = instance.potential_grid factor = (2 | generic_unit_system.length / generic_unit_system.time**2) potential_grid.potential = potential_grid.y * factor print potential_grid.y * factor y = numpy.arange(0,10.25, 0.1) | generic_unit_system.length x = (y * 0) + (2 |generic_unit_system.length) z = 0.5 |generic_unit_system.length interpolated = instance.get_interpolated_gravitational_potential(x,y,z) print y*factor self.assertAlmostRelativeEquals(interpolated, y * factor)
pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class NeZhaModel(BertPreTrainedModel): def __init__(self, config): super(NeZhaModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def forward(self, input_ids, token_type_ids=None, position_ids=None, attention_mask=None, head_mask=None, **kwargs): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 embedding_output = self.embeddings(input_ids, token_type_ids) encoded_layers = self.encoder(embedding_output, extended_attention_mask) encoded_layers, attention_layers = encoded_layers sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) return sequence_output, pooled_output, None class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() # Need to unty it when we separate the dimensions of hidden and emb self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, str)): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0))) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertForPreTraining(BertPreTrainedModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = NeZhaModel(config) self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight) self.init_weights() def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False) prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = nn.CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss elif masked_lm_labels is not None: loss_fct = nn.CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) total_loss = masked_lm_loss return total_loss else: return prediction_scores, seq_relationship_score class NeZhaForMaskedLM(BertPreTrainedModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is not `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(NeZhaForMaskedLM, self).__init__(config) self.bert = NeZhaModel(config) self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight) self.init_weights() def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, output_attention=False, **kwargs): sequence_output, _, _ = self.bert(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) return masked_lm_loss, masked_lm_loss class BertForSequenceClassification(BertPreTrainedModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000,
<gh_stars>1-10 #!/usr/bin/env python # coding: utf-8 # In[ ]: # This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../../../input/shrutimechlearn_churn-modelling/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory import os for dirname, _, filenames in os.walk("../../../input/shrutimechlearn_churn-modelling"): for filename in filenames: print(os.path.join(dirname, filename)) # Any results you write to the current directory are saved as output. # # 1. Introduction # If you’re from a **customer-based** company, business or an organization, **customer success** is the foundation of your company. When your customer achieving their desired outcome through their interactions with your company (focus on customer success) churn will not an issue at all. But in general, churn can happen in everywhere. So what if we told you there was a way to predict, at least to some degree, how and when your customers will cancel? Or how many numbers of customers will leave you or stay at you? That’s exactly what a churn model can do. So basically customer churn is a method used to identify the numbers of customers who have either unsubscribed or cancelled their service contract. # ![image.png](attachment:image.png) # # 2. DataSet # This data set contains details of a bank's customers and the target variable is a binary variable reflecting the fact whether the customer left the bank (closed his account) or he/she continues to be a customer. This dataset consists of 10,000 customers (rows) and 14 columns. Here we're trying to separate customers for two particular groups based on customers personal-details and their bank details. # # 3. Objective # # * Build the classification algorithm to classify customers who are left and stay. # # 4. Methodology # Since this, a type of classification problem I used supervised deep learning model called ***Artificial Neural Network (ANN)*** to classify the customers of the bank. # First of all, I carry out a data preprocessing step to get a better dataset. Then I start building the artificial neural network by splitting the dataset into train and test sets. The models' architecture will contain three layers. The first layer will have 6 neurons and use the **Relu** activation function, the second layer will also have 6 neurons and use the **Relu** activation function, and the third and the final layer will have 1 neuron with a sigmoid activation function. Then compile the model and give it the **binary_crossentopy** loss function (Used for binary classification) to measure how well the model performs on training, and then give it the Stochastic Gradient Descent **adam** optimizer to improve upon the loss. Also, I want to measure the accuracy of the model so add ‘accuracy’ to the metrics. # # ##### Analysis Procedure. # 1. Exploratory Data Analysis # 2. Data preprocessing # 3. Splitting the dataset # 4. Build the ANN model # 5. Make Predictions # 6. Evaluation # 7. Improve the Model # 8. Parameter Tuning # # # In[ ]: # Import required libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # In[ ]: churn=pd.read_csv("../../../input/shrutimechlearn_churn-modelling/Churn_Modelling.csv") # In[ ]: churn.head() # In[ ]: churn.info() # In[ ]: churn.describe() # we can see that most of the customers are around age 38 with 10000 estimated salary. According to the US Census Bureau, this salary range considered as the low-income category. # # 4. Exploratory Data Analysis # In[ ]: sns.set_style('whitegrid') # In[ ]: sns.countplot(churn['Geography']) # It seems like these customers are from a bank which is located in the European region. As we can see most of the customers are from France while the representatives of the equal number of customers from both Spain and Germany. # In[ ]: sns.countplot(churn['Gender']) # # This is the graph of the gender of customers. As we can see most of the customers are males. # In[ ]: sns.distplot(churn['Age'],bins=50) plt.title('Distribution of Age') # This graph illustrates the distribution of age of the customers. Majority of the customers in the bank are in age around 38. That means this bank has middled age customer base. It seems like there is a low percentage of an older crowd. # In[ ]: sns.countplot(churn['NumOfProducts']) # Most of the customers have either one or two product of the bank. This means the intention of the majority of the customers is only to maintaining a simple account or having a debit or credit card rather than the other products. # In[ ]: sns.countplot(churn['HasCrCard']) # As we can see most of the customers have a credit card # In[ ]: sns.countplot(churn['IsActiveMember']) # As we can see there is no significance difference among the active and non-active customers.It seems like most of the customers not happy about the bank and its services, their low-income level are maybe the reason for that. # # 5. Data Preprocessing # Data pre-processing is an important step in the data mining process. The real-world datasets are incomplete, inconsistency and lacking certain behaviours. As a result of that, this kind of data leads to misleading results. Therefore let's carry out some data preprocessing steps to get a better dataset. # Read my 'Data Pre-processing article -[A simple introduction to Data Pre-processing](http://medium.com/analytics-vidhya/a-simple-introduction-to-data-pre-processing-4cac052df4a4) # 1. Data Cleaning # 2. Create Dummy Variables # 3. Feature Scaling # 4. Feature Transformation # ## 5.1 Data Cleaning # # If you're interserted to read more about Data Cleaning, read my article on [Data Cleaning in Nutshell](https://medium.com/@duminyk95/data-cleaning-in-nutshell-4e017dd86fb6) # In[ ]: churn.isnull().sum() # In[ ]: churn.duplicated().sum() # This dataset has no missing values and duplicated items. # ## 5.2 Create Dummy Variables # # Geography, Gender is the categorical variables in this dataset. To do a proper analysis it is best practice to create dummy variables for these variables. So let’s create dummy variables. # In[ ]: geography=pd.get_dummies(churn['Geography'],drop_first=True) gender=pd.get_dummies(churn['Gender'],drop_first=True) # In[ ]: churn.drop(['Geography','Gender'],axis=1,inplace=True) # In[ ]: churn=pd.concat([churn,geography,gender],axis=1) # In[ ]: churn.drop(['RowNumber','CustomerId','Surname'],axis=1,inplace=True) # In[ ]: churn.head() # ## Divide the variables into set of independent variables and single dependent variable # In[ ]: X=churn.drop(['Exited'],axis=1).values # In[ ]: X # In[ ]: y=churn['Exited'].values # In[ ]: y # X and y are numpy arrays # ## Split the dataset into training and test sets # In[ ]: from sklearn.model_selection import train_test_split # In[ ]: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # ## 5.3 Feature Scaling # In[ ]: from sklearn.preprocessing import StandardScaler # In[ ]: sc=StandardScaler() # In[ ]: sc.fit(X_train) # ## 5.4 Feature Transormation # In[ ]: X_train=sc.transform(X_train) # In[ ]: X_test=sc.transform(X_test) # Now our dataset is well preprocessed and ready for the advanced analysis. # # 6 Build the ANN model # In[ ]: # Import required models and layers and otheer important libraries from keras.models import Sequential from keras.layers import Dense # In[ ]: # Initializing the ANN Classifier=Sequential() # In[ ]: # Adding layers #input layer Classifier.add(Dense(6, kernel_initializer = 'uniform', activation = 'relu', input_dim = 11)) #hidden layers Classifier.add(Dense(6, kernel_initializer = 'uniform',activation='relu')) #Output layer Classifier.add(Dense(1, kernel_initializer = 'uniform',activation='sigmoid')) # In[ ]: # compiling the model Classifier.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy']) # Compiling means we appying Stochastic gradient descent on the whole ANN. # In[ ]: hist=Classifier.fit(X_train,y_train,validation_data=(X_test,y_test),batch_size=10,epochs=100) # In[ ]: plt.plot(hist.history['loss']) plt.plot(hist.history['val_loss']) plt.title('Model loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['loss','val_loss'], loc='upper right') print() # Here we start with a very high loss during our first couple of epoch runs and then as the weight and biases start to get adjusted, we'll hopefully see the gradual drop in our loss or error. Validation loss is also going down. There are some spikes in validation loss here.Training loss and validation loss go down and then continue down together. # In[ ]: plt.plot(hist.history['accuracy']) plt.plot(hist.history['val_accuracy']) plt.title('Model loss') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['accuracy','val_accuracy'], loc='upper right') print() # Here we start with very low accuracy during first couple of epoch runs and then when the epoch runs increasing, the accuracy also tend increase and finally it will continue to be consistent. # # 7.Make Predictions # In[ ]: predictions=Classifier.predict(X_test) # In[ ]: predictions # These numbers make no sense. Our objective is to classify our customers as leave or stay. Therefore we rename our predictions, # * Predictions>0.5 = True (Customer Will leave the Bank) # * Predictions<0.5 = False (Customer Will Stay the Bank) # In[ ]: predictions=(predictions>0.5) # In[ ]: predictions # We made our predictions using test data where our model hasn't seen before. Now it's time to evaluate how accurate our model is.. # # 8.Evaluating # In[ ]: from sklearn.metrics import accuracy_score # In[ ]: accuracy_score(y_test,predictions) # Our ANN model predicts almost 84% results correctly.
patch : matplotlib.patches Matplotlib patch for the specified region. """ pass def intersection(self, other): raise NotImplementedError def symmetric_difference(self, other): raise NotImplementedError def union(self, other): raise NotImplementedError class Region2D(Region): """ Abstract Region class to define the interface for 2-dimensional Region classes. """ def __getstate__(self): state = self.__dict__.copy() del state["_shapely_object"] return state def __setstate__(self, state): self.__dict__.update(state) self.__dict__["_shapely_object"] = None @property def dimension(self): return 2 @property def bounds(self): min_x, min_y, max_x, max_y = self.shapely_object.bounds return min_x, min_y, max_x, max_y @property def extent(self): min_x, min_y, max_x, max_y = self.bounds return abs(max_x - min_x), abs(max_y - min_y) @property def bounding_box(self): min_x, min_y, max_x, max_y = self.bounds return Rectangle((min_x, min_y), max_x - min_x, max_y - min_y, 0) @property @abstractmethod def shapely_object(self): """ Geometric object as defined in `shapely`. Returns ------- Shapely object """ pass @staticmethod def from_shapely(shapely_object): """ Constructor for instantiating Region from `shapely` object. Parameters ---------- shapely_object `shapely` Geometric object to be converted into Region Returns ------- Region """ ptype = shapely_object.geom_type if ptype == "Polygon": return Polygon.from_shapely(shapely_object) elif ptype == "MultiPolygon": return MultiPolygon.from_shapely(shapely_object) @abstractmethod def as_artist(self, origin=(0, 0), **kwargs): """ Matplotlib patch object for this region (e.g. `matplotlib.patches.Ellipse`). Parameters ---------- origin : array_like The (x, y) pixel position of the origin of the displayed image. Default is (0, 0). kwargs : dict Other parameters passed to the `matplotlib.patches` object. Returns ------- patch : matplotlib.patches Matplotlib patch for the specified region. """ pass def plot(self, ax=None, **kwargs): """ Provide plot of region as :class:`matplotlib.axes.Axes` object. Parameters ---------- ax : :class:`matplotlib.axes.Axes` The axes on which to show the image kwargs : dict Other parameters passed to the `matplotlib.patches` object. Returns ------- :class:`matplotlib.axes.Axes` Axes object with the plot. """ if ax is None: ax = plt.gca() if not self: return ax artist = self.as_artist(**kwargs) ax.add_artist(artist) return ax def intersection(self, other): shapely_obj = self.shapely_object.intersection(other.shapely_object) return Region2D.from_shapely(shapely_obj) def symmetric_difference(self, other): shapely_obj = self.shapely_object.symmetric_difference(other.shapely_object) return Region2D.from_shapely(shapely_obj) def union(self, other): shapely_obj = self.shapely_object.union(other.shapely_object) return Region2D.from_shapely(shapely_obj) def buffer(self, distance, **kwargs): """ Extend the region perpendicular by a `distance`. Parameters ---------- distance : float Distance by which the region is extended. kwargs : dict Other parameters passed to :func:`shapely.geometry.buffer`. Returns ------- Polygon The extended region. """ return Region2D.from_shapely(self.shapely_object.buffer(distance, **kwargs)) class Region3D(Region): """ Abstract Region class to define the interface for 3-dimensional Region classes. """ @property def dimension(self): return 3 @abstractmethod def as_artist(self, origin=(0, 0), **kwargs): """ Matplotlib patch object for this region (e.g. `matplotlib.patches.Ellipse`). Parameters ---------- origin : array_like The (x, y) pixel position of the origin of the displayed image. Default is (0, 0). kwargs : dict Other parameters passed to the `matplotlib.patches` object. Returns ------- patch : matplotlib.patches Matplotlib patch for the specified region. """ pass def plot(self, ax=None, **kwargs): """ Provide plot of region as :class:`matplotlib.axes.Axes` object. Parameters ---------- ax : :class:`matplotlib.axes.Axes` The axes on which to show the image kwargs : dict Other parameters passed to the `matplotlib.patches` object. Returns ------- :class:`matplotlib.axes.Axes` Axes object with the plot. """ if ax is None: ax = plt.gca() if not self: return ax artist = self.as_artist(**kwargs) ax.add_artist(artist) return ax def intersection(self, other): raise NotImplementedError def symmetric_difference(self, other): raise NotImplementedError def union(self, other): raise NotImplementedError class RegionND(Region): """ Abstract Region class to define the interface for n-dimensional Region classes. """ def as_artist(self): raise NotImplementedError def intersection(self, other): raise NotImplementedError def symmetric_difference(self, other): raise NotImplementedError def union(self, other): raise NotImplementedError class EmptyRegion(Region): """ Region class to define an empty region that has no dimension. """ def __init__(self): self.shapely_object = shPolygon() def __repr__(self): return f"{self.__class__.__name__}()" def __str__(self): return f"{self.__class__.__name__}()" @property def dimension(self): return None @property def points(self): return np.array([]) @property def centroid(self): return None @property def max_distance(self): return 0 @property def region_measure(self): return 0 @property def subregion_measure(self): return 0 @property def bounds(self): return None @property def extent(self): return None @property def bounding_box(self): return None def intersection(self, other): return EmptyRegion() def symmetric_difference(self, other): return other def union(self, other): return other def contains(self, points): return np.array([]) def as_artist(self, **kwargs): raise NotImplementedError("EmptyRegion cannot return an artist.") def buffer(self, distance, **kwargs): raise NotImplementedError("EmptyRegion cannot be extended.") @classmethod def from_shapely(cls, polygon): if polygon.is_empty: return cls() else: raise TypeError("Shapely object must be an empty.") class Interval(Region1D): """ Region class to define an interval. Parameters ---------- lower_bound : float The lower bound of the interval. upper_bound : float The upper bound of the interval. """ def __init__(self, lower_bound=0, upper_bound=1): self._lower_bound = lower_bound self._upper_bound = upper_bound self._region_specs = (self.lower_bound, self.upper_bound) def __repr__(self): return f"{self.__class__.__name__}({self.lower_bound}, {self.upper_bound})" @classmethod def from_intervals(cls, intervals): """ Constructor for instantiating Region from list of (min, max) bounds. Takes array-like intervals instead of interval to be consistent with `Rectangle.from_intervals`. Parameters ---------- intervals : array-like of shape (2,) The region bounds for each dimension Returns ------- Interval """ lower_bound, upper_bound = intervals return cls(lower_bound, upper_bound) @property def lower_bound(self): """ The lower boundary. Returns ------- float """ return self._lower_bound @property def upper_bound(self): """ The upper boundary. Returns ------- float """ return self._upper_bound @property def bounds(self): return self.lower_bound, self.upper_bound @property def extent(self): return abs(self.upper_bound - self.lower_bound) @property def intervals(self): """ Provide bounds in a tuple (min, max) arrangement. Returns ------- Tuple of shape(dimension, 2) ((min_x, max_x), ...). """ return (self.bounds,) @property def region_specs(self): """ Legacy interface to serve legacy RoiRegion. Warnings -------- Do not use - will be deprecated. Returns ------- dict """ return self._region_specs @property def points(self): return np.array([self.lower_bound, self.upper_bound]) @property def centroid(self): return self.lower_bound + (self.upper_bound - self.lower_bound) / 2 @property def max_distance(self): return self.upper_bound - self.lower_bound @property def region_measure(self): return self.upper_bound - self.lower_bound @property def subregion_measure(self): return None @property def bounding_box(self): return self def contains(self, points): points_ = np.asarray(points) condition = (points_ >= self.lower_bound) & (points_ < self.upper_bound) inside_indices = condition.nonzero()[0] # points are 1-dimensional return inside_indices def as_artist(self, origin=(0, 0), **kwargs): raise NotImplementedError def buffer(self, distance, **kwargs): l_bound = self.lower_bound - distance u_bound = self.upper_bound + distance return Interval(lower_bound=l_bound, upper_bound=u_bound) class Rectangle(Region2D): """ Region class to define a rectangle. Parameters ---------- corner : array-like with shape (2,) A point that defines the lower left corner. width : float The length of a vector describing the edge in x-direction. height : float The length of a vector describing the edge in y-direction. angle : float The angle (in degrees) by which the rectangle is rotated counterclockwise around the corner point. """ def __init__(self, corner=(0, 0), width=1, height=1, angle=0): self._corner = corner self._width = width self._height = height self._angle = angle self._region_specs = (corner, width, height, angle) self._shapely_object = None def __repr__(self): return f"{self.__class__.__name__}({tuple(self.corner)}, {self.width}, {self.height}, {self.angle})" def __getattr__(self, attr): """All non-adapted calls are passed to shapely object""" if attr.startswith("__") and attr.endswith( "__" ): # this is needed to enable pickling raise AttributeError return getattr(self.shapely_object, attr) @classmethod def from_intervals(cls, intervals): """ Constructor for instantiating Region from list of (min, max) bounds. Parameters ---------- intervals : tuple, list, np.ndarray of shape (2, 2) The region bounds for each dimension Returns ------- Interval """ min_x, max_x = intervals[0] min_y, max_y = intervals[1] corner = (min_x, min_y) width = max_x - min_x height = max_y - min_y angle = 0 return cls(corner, width, height, angle) @property def corner(self): """ A point that defines the lower left corner. Returns ------- array-like with shape (2,) """ return self._corner @property def width(self): """ The length of a vector describing the edge in x-direction. Returns ------- float """ return self._width @property def height(self): """ The length of a vector describing the edge in y-direction. Returns ------- float """ return self._height @property def angle(self): """ The angle (in degrees) by which the rectangle is rotated counterclockwise around the corner point. Returns ------- float """ return self._angle @property def intervals(self): """ Provide bounds in a tuple (min, max) arrangement. Returns ------- Tuple of shape(dimension, 2) ((min_x, max_x), ...). """ lower_bounds = self.bounds[: self.dimension] upper_bounds = self.bounds[self.dimension :] return tuple( ((lower, upper) for lower, upper in zip(lower_bounds, upper_bounds)) ) @property def points(self): rectangle = mpl_patches.Rectangle( self.corner, self.width, self.height,
with integer labels for rows. As mentioned above, note that both the start and stop of the slice are included. >>> df.loc[7:9] max_speed shield 7 1 2 8 4 5 9 7 8 """ def __init__(self, df_or_s): from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series assert isinstance(df_or_s, (DataFrame, Series)), \ 'unexpected argument type: {}'.format(type(df_or_s)) if isinstance(df_or_s, DataFrame): self._kdf = df_or_s self._ks = None else: # If df_or_col is Column, store both the DataFrame anchored to the Column and # the Column itself. self._kdf = df_or_s._kdf self._ks = df_or_s def __getitem__(self, key): from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series def raiseNotImplemented(description): raise SparkPandasNotImplementedError( description=description, pandas_function=".loc[..., ...]", spark_target_function="select, where") rows_sel, cols_sel = _unfold(key, self._ks) sdf = self._kdf._sdf if isinstance(rows_sel, Series): sdf_for_check_schema = sdf.select(rows_sel._scol) assert isinstance(sdf_for_check_schema.schema.fields[0].dataType, BooleanType), \ (str(sdf_for_check_schema), sdf_for_check_schema.schema.fields[0].dataType) sdf = sdf.where(rows_sel._scol) elif isinstance(rows_sel, slice): assert len(self._kdf._internal.index_columns) > 0 if rows_sel.step is not None: raiseNotImplemented("Cannot use step with Spark.") if rows_sel == slice(None): # If slice is None - select everything, so nothing to do pass elif len(self._kdf._internal.index_columns) == 1: start = rows_sel.start stop = rows_sel.stop index_column = self._kdf.index.to_series() index_data_type = index_column.spark_type cond = [] if start is not None: cond.append(index_column._scol >= F.lit(start).cast(index_data_type)) if stop is not None: cond.append(index_column._scol <= F.lit(stop).cast(index_data_type)) if len(cond) > 0: sdf = sdf.where(reduce(lambda x, y: x & y, cond)) else: raiseNotImplemented("Cannot use slice for MultiIndex with Spark.") elif isinstance(rows_sel, str): raiseNotImplemented("Cannot use a scalar value for row selection with Spark.") else: try: rows_sel = list(rows_sel) except TypeError: raiseNotImplemented("Cannot use a scalar value for row selection with Spark.") if len(rows_sel) == 0: sdf = sdf.where(F.lit(False)) elif len(self._kdf._internal.index_columns) == 1: index_column = self._kdf.index.to_series() index_data_type = index_column.spark_type if len(rows_sel) == 1: sdf = sdf.where( index_column._scol == F.lit(rows_sel[0]).cast(index_data_type)) else: sdf = sdf.where(index_column._scol.isin( [F.lit(r).cast(index_data_type) for r in rows_sel])) else: raiseNotImplemented("Cannot select with MultiIndex with Spark.") # make cols_sel a 1-tuple of string if a single string column_index = self._kdf._internal.column_index if isinstance(cols_sel, str): kdf = DataFrame(self._kdf._internal.copy(sdf=sdf)) return kdf._get_from_multiindex_column((cols_sel,)) elif isinstance(cols_sel, Series): cols_sel = _make_col(cols_sel) elif isinstance(cols_sel, slice) and cols_sel != slice(None): raise raiseNotImplemented("Can only select columns either by name or reference or all") elif isinstance(cols_sel, slice) and cols_sel == slice(None): cols_sel = None if cols_sel is None: columns = self._kdf._internal.column_scols elif isinstance(cols_sel, spark.Column): columns = [cols_sel] column_index = None elif all(isinstance(key, Series) for key in cols_sel): columns = [_make_col(key) for key in cols_sel] column_index = [key._internal.column_index[0] for key in cols_sel] elif all(isinstance(key, spark.Column) for key in cols_sel): columns = cols_sel column_index = None elif (any(isinstance(key, str) for key in cols_sel) and any(isinstance(key, tuple) for key in cols_sel)): raise TypeError('Expected tuple, got str') else: if all(isinstance(key, tuple) for key in cols_sel): level = self._kdf._internal.column_index_level if any(len(key) != level for key in cols_sel): raise ValueError('All the key level should be the same as column index level.') column_to_index = list(zip(self._kdf._internal.data_columns, self._kdf._internal.column_index)) columns = [] column_index = [] for key in cols_sel: found = False for column, idx in column_to_index: if idx == key or idx[0] == key: columns.append(_make_col(column)) column_index.append(idx) found = True if not found: raise KeyError("['{}'] not in index".format(key)) try: sdf = sdf.select(self._kdf._internal.index_scols + columns) index_columns = self._kdf._internal.index_columns data_columns = [column for column in sdf.columns if column not in index_columns] column_scols = [scol_for(sdf, col) for col in data_columns] internal = _InternalFrame(sdf=sdf, index_map=self._kdf._internal.index_map, column_index=column_index, column_scols=column_scols) kdf = DataFrame(internal) except AnalysisException: raise KeyError('[{}] don\'t exist in columns' .format([col._jc.toString() for col in columns])) if cols_sel is not None and isinstance(cols_sel, spark.Column): from databricks.koalas.series import _col return _col(kdf) else: return kdf def __setitem__(self, key, value): from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series, _col if (not isinstance(key, tuple)) or (len(key) != 2): raise SparkPandasNotImplementedError( description="Only accepts pairs of candidates", pandas_function=".loc[..., ...] = ...", spark_target_function="withColumn, select") rows_sel, cols_sel = key if (not isinstance(rows_sel, slice)) or (rows_sel != slice(None)): if isinstance(rows_sel, list): if isinstance(cols_sel, str): cols_sel = [cols_sel] kdf = self._kdf.copy() for col_sel in cols_sel: # Uses `kdf` to allow operations on different DataFrames. # TODO: avoid temp column name or declare `__` prefix is # reserved for Koalas' internal columns. kdf["__indexing_temp_col__"] = value new_col = kdf["__indexing_temp_col__"]._scol kdf[col_sel] = Series(kdf[col_sel]._internal.copy( scol=F.when( kdf._internal.index_scols[0].isin(rows_sel), new_col ).otherwise(kdf[col_sel]._scol)), anchor=kdf) kdf = kdf.drop(labels=['__indexing_temp_col__']) self._kdf._internal = kdf._internal.copy() else: raise SparkPandasNotImplementedError( description="""Can only assign value to the whole dataframe, the row index has to be `slice(None)` or `:`""", pandas_function=".loc[..., ...] = ...", spark_target_function="withColumn, select") if not isinstance(cols_sel, (str, list)): raise ValueError("""only column names or list of column names can be assigned""") if isinstance(value, DataFrame): if len(value.columns) == 1: self._kdf[cols_sel] = _col(value) else: raise ValueError("Only a dataframe with one column can be assigned") else: if isinstance(cols_sel, str): cols_sel = [cols_sel] if (not isinstance(rows_sel, list)) and (isinstance(cols_sel, list)): for col_sel in cols_sel: self._kdf[col_sel] = value class ILocIndexer(object): """ Purely integer-location based indexing for selection by position. ``.iloc[]`` is primarily integer position based (from ``0`` to ``length-1`` of the axis), but may also be used with a conditional boolean Series. Allowed inputs are: - An integer for column selection, e.g. ``5``. - A list or array of integers for column selection, e.g. ``[4, 3, 0]``. - A boolean array for column selection. - A slice object with ints for column selection, e.g. ``1:7``. - A slice object with ints without start and step for row selection, e.g. ``:7``. - A conditional boolean Index for row selection. Not allowed inputs which pandas allows are: - An integer for row selection, e.g. ``5``. - A list or array of integers for row selection, e.g. ``[4, 3, 0]``. - A boolean array for row selection. - A ``callable`` function with one argument (the calling Series, DataFrame or Panel) and that returns valid output for indexing (one of the above). This is useful in method chains, when you don't have a reference to the calling object, but would like to base your selection on some value. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow out-of-bounds indexing (this conforms with python/numpy *slice* semantics). See Also -------- DataFrame.loc : Purely label-location based indexer for selection by label. Series.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4}, ... {'a': 100, 'b': 200, 'c': 300, 'd': 400}, ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }] >>> df = ks.DataFrame(mydict, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 **Indexing just the rows** A scalar integer for row selection is not allowed. >>> df.iloc[0] Traceback (most recent call last): ... databricks.koalas.exceptions.SparkPandasNotImplementedError: ... A list of integers for row selection is not allowed. >>> df.iloc[[0]] Traceback (most recent call last): ... databricks.koalas.exceptions.SparkPandasNotImplementedError: ... With a `slice` object. >>> df.iloc[:3] a b c d 0 1 2 3 4 1 100 200 300 400 2 1000 2000 3000 4000 Conditional that returns a boolean Series >>> df.iloc[df.index % 2 == 0] a b c d 0 1 2 3 4 2 1000 2000 3000 4000 **Indexing both axes** You can mix the indexer types for the index and columns. Use ``:`` to select the entire axis. With scalar integers. >>> df.iloc[:1, 1] 0 2 Name: b, dtype: int64 With lists of integers. >>> df.iloc[:2, [1, 3]] b d 0 2 4 1 200 400 With `slice` objects. >>> df.iloc[:2, 0:3] a b c 0 1 2 3 1 100 200 300 With a boolean array whose length matches the columns. >>> df.iloc[:, [True, False, True, False]] a c 0 1 3 1 100 300 2 1000 3000 """ def __init__(self, df_or_s): from databricks.koalas.frame import DataFrame from databricks.koalas.series import Series assert isinstance(df_or_s, (DataFrame, Series)), \ 'unexpected argument type: {}'.format(type(df_or_s)) if isinstance(df_or_s, DataFrame): self._kdf = df_or_s self._ks = None else: # If df_or_col is Column, store both the DataFrame anchored to the Column and #
from flask import Blueprint, request, session, redirect, render_template import core import tools import logging import bcrypt import json import traceback import sys from whenareyou import whenareyou try: import queue as Queue except ImportError: import Queue db = None configuration_data = None log = logging.getLogger() api = Blueprint('api', __name__, template_folder='templates') @api.route('/new_user', methods=["GET","POST"]) def new_user(): ''' Create new user in the database :param: username :param: password :param: first_name :param: email :param: city :param: country :param: state ''' log.info(":API:/api/new_user") response = {"type": None, "data": {}, "text": None} try: if request.is_json: request_data = request.get_json() else: request_data = request.form username = str(request_data["username"]) log.debug("Username is {0}".format(username)) password = str(request_data["password"]) first_name = str(request_data["first_name"]) last_name = str(request_data["last_name"]) email = str(request_data["email"]) city = str(request_data["city"]) country = str(request_data["country"]) state = str(request_data["state"]) check_list = [username, password, first_name, last_name, email, city, country, state] passed = tools.check_string(check_list) if passed: log.debug("Attempting to create new user with username {0} and email {1}".format(username, email)) # Check to see if the username exists users = db["users"] if users.find_one(username=username): # If that username is already taken taken_message = "Username {0} is already taken".format(username) log.debug(taken_message) response["type"] = "error" response["text"] = taken_message else: # Add the new user to the database log.info(":{0}:Adding a new user to the database".format(username)) db.begin() # Hash the password log.debug("Hashing password") hashed = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt()) log.debug("Hashed password is {0}".format(hashed)) is_admin = username in configuration_data["admins"] try: db['users'].insert({ "username": username, "first_name": first_name, "last_name": last_name, "email": email, "password": <PASSWORD>, "admin": is_admin, "default_plugin": "search", "notifications": json.dumps(["email"]), "ip": request.environ.get('HTTP_X_REAL_IP', request.remote_addr), "news_site": "http://reuters.com", "city": city, "country": country, "state": state, "temp_unit": "fahrenheit", "timezone": whenareyou(city) }) db.commit() response["type"] = "success" response["text"] = "Thank you {0}, you are now registered for W.I.L.L".format(first_name) except: db.rollback() response["type"] = "error" response["text"] = "There was an error in signing you up for W.I.L.L. Please check the information you entered" else: log.warning(":{0}:Failed SQL evaluation".format(username)) response["type"] = "error" response["text"] = "Invalid input, valid chars are {0}".format(tools.valid_chars) except KeyError: log.error("Needed data not found in new user request") response["type"] = "error" response["text"] = "Couldn't find required data in request. " \ "To create a new user, a username, password, first name, last name," \ "and email is required" return tools.return_json(response) @api.route("/settings", methods=["POST"]) def settings(): """ :param username: :param password: :param Optional - setting to be changed: Change the users settings :return: """ log.info(":API:/api/settings") response = {"type": None, "text": None, "data": {}} if request.is_json: request_data = request.get_json() else: request_data = request.form if "username" in request_data.keys() and "password" in request_data.keys(): username = request_data["username"] password = request_data["password"] if tools.check_string(request_data.values()): user_table = db["users"].find_one(username=username) if user_table: db_hash = user_table["password"] if bcrypt.checkpw(password.encode('utf8'), db_hash.encode('utf8')): #TODO: write a framework that allows changing of notifications immutable_settings = ["username", "admin", "id", "user_token", "notifications", "password"] db.begin() log.info(":{0}:Changing settings for user".format(username)) try: for setting in request_data.keys(): if setting not in immutable_settings: db["users"].upsert({"username": username, setting: request.form[setting]}, ['username']) db.commit() response["type"] = "success" response["text"] = "Updated settings" except Exception as db_error: log.debug("Exception {0}, {1} occurred while trying to commit changes to the database".format( db_error.message, db_error.args )) response["type"] = "error" response["text"] = "Error encountered while trying to update db, changes not committed" db.rollback() else: response["type"] = "error" response["text"] = "User {0} doesn't exist".format(username) else: response["type"] = "error" response["text"] = "Invalid input" else: response["type"] = "error" response["text"] = "Couldn't find username or password in request data" return tools.return_json(response) @api.route('/get_sessions', methods=["GET", "POST"]) def get_sessions(): """ Return list of active sessions for user :param: username :param: password :return: list of sessions """ log.info(":API:/api/get_sessions") response = {"type": None, "data": {}, "text": None} sessions = core.sessions if request.is_json: request_data = request.get_json() else: request_data = request.form try: username = request_data["username"] password = request_data["password"] if tools.check_string(request_data.values()): db_hash = db['users'].find_one(username=username)["password"] user_auth = bcrypt.checkpw(password.encode('utf8'), db_hash.encode('utf8')) if user_auth: response["data"].update({"sessions":[]}) for user_session in sessions: if sessions[user_session]["username"] == username: response["data"]["sessions"].append(session) response["type"] = "success" response["text"] = "Fetched active sessions" else: response["type"] = "error" response["text"] = "Invalid username/password combination" else: response["type"] = "error" response["text"] = "One of the submitted parameters contained an invalid character. " \ "Valid characters are {0}".format(tools.valid_chars) except KeyError: response["type"] = "error" response["text"] = "Couldn't find username and password in request" return tools.return_json(response) @api.route('/start_session', methods=["GET","POST"]) def start_session(): ''' :param: username :param: password Generate a session id and start a new session :return: ''' log.info(":API:/api/start_session") # Check the information that the user has submitted response = {"type": None, "data": {}, "text": None} if request.is_json: request_data = request.get_json() else: request_data = request.form try: if request.method == "POST": username = request_data["username"] password = request_data["password"] client = "API-POST" elif request.method == "GET": username = request.args.get("username", "") password = request.args.get("password", "") client = "API-GET" if not (username and password): raise KeyError() if tools.check_string([username, password]): log.info(":{0}:Checking password".format(username)) users = db["users"] user_data = users.find_one(username=username) if user_data: user_data = db["users"].find_one(username=username) # Check the password db_hash = user_data["password"] user_auth = bcrypt.checkpw(password.encode('utf8'), db_hash.encode('utf8')) if user_auth: log.info(":{0}:Authentication successful".format(username)) # Return the session id to the user session_id = tools.gen_session(username, client, db) if session_id: response["type"] = "success" response["text"] = "Authentication successful" response["data"].update({"session_id": session_id}) else: response["type"] = "error" response["text"] = "Invalid username/password" else: response["type"] = "error" response["text"] = "Couldn't find user with username {0}".format(username) else: response["type"] = "error" response["text"] = "Invalid input" except KeyError: response["type"] = "error" response["text"] = "Couldn't find username and password in request data" # Render the response as json if request.method == "GET": session.update({"session_data": response}) if response["type"] == "success": return redirect("/") log.debug("Rendering command template") return render_template("command.html") else: return tools.return_json(response) @api.route('/end_session', methods=["GET", "POST"]) def end_session(): """ End a session :param session_id: :return End the session: """ log.info(":API:/api/end_session") response = {"type": None, "data": {}, "text": None} if request.is_json: request_data = request.get_json() else: request_data = request.form try: session_id = request_data["session_id"] # Check for the session id in the core.sessions dictionary if session_id in core.sessions.keys(): log.info(":{0}:Ending session".format(session_id)) del core.sessions[session_id] response["type"] = "success" response["text"] = "Ended session" else: response["type"] = "error" response["text"] = "Session id {0} wasn't found in core.sessions".format(session_id) except KeyError: response["type"] = "error" response["text"] = "Couldn't find session id in request data" # Render the response as json return tools.return_json(response) @api.route('/check_session', methods=["GET", "POST"]) def check_session(): """ Check if a session is valid :param: session_id :return: """ log.info(":API:/api/check_session") response = {"type": None, "text": None, "data": {}} if request.is_json: request_data = request.get_json() else: request_data = request.form try: session_id = request_data["session_id"] session_valid = (session_id in core.sessions.keys()) response["data"].update({"valid": session_valid}) response["type"] = "success" if tools.check_string(session_id): if session_valid: response["text"] = "Session id {0} is valid".format(session_id) else: response["text"] = "Session id {0} is invalid".format(session_id) else: response["type"] = "error" response["text"] = "Invalid input" except KeyError: response["type"] = "error" response["text"] = "Couldn't find session_id in request data" response["data"].update({"valid": False}) return tools.return_json(response) @api.route('/respond', methods=["GET", "POST"]) def command_response(): """ Api path for responding to a command question :param session_id: :param command_id: :return: """ log.info(":API:/api/respond") response = {"type": None, "text": None, "data": {}} if request.is_json: request_data = request.get_json() try: log.debug(request_data.keys()) command_id = request_data["command_id"] session_id = request_data["session_id"] response_value = request_data["value"] #Validate the JSON response object if tools.check_string([command_id, session_id]): if session_id in core.sessions.keys(): session_data = core.sessions[session_id] session_commands = session_data["commands"] response_command = None for command_obj in session_commands: if command_obj["id"] == command_id: response_command = command_obj if response_command: if "function" in response_command.keys() and "event" in response_command.keys(): response_function = response_command["function"] log.info(":{0}: Executing response function {1} with response {2}".format( command_id, response_function, response_value )) #Execute the response try: response_result = response_function(response_value, response_command["event"]) log.info(":{0}:Successfully executed response, returning {1}".format( session_id, tools.fold(response_result) )) response = response_result except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() error_string = repr(traceback.format_exception(exc_type, exc_value, exc_traceback)) log.error(error_string) username = session_data["username"] user_table = db["users"].find_one(username=username) if user_table: response["type"] = "error" if user_table["admin"]: response["text"] = error_string else: response["text"] = "An error has occurred while trying to fetch a response." \ "Please contact <EMAIL> to report the error and " \ "get more information" else: log.error("USER {0} NOT FOUND IN DATABASE. WARNING.".format(username)) response["type"] = "error" response["text"] = "A database error has occurred. Please contact <EMAIL>" \ "to report the error, along with the circumstances under which it" \ "occurred." else: response["type"] = "error" response["text"] = "Command {0} didn't register for a response or didn't" \ " register the required data for a response.".format(command_id) else: response["type"] = "error" response["text"] = "Couldn't find a command object in session {0} with command id {1}".format( session_id, command_id ) else:
<gh_stars>1-10 import os,stat import sys import glob import shutil import numpy as np from pathlib import Path import tarfile import subprocess from shutil import copyfile from abc import ABCMeta, abstractmethod import datetime from shutil import copy2 import json from json import JSONEncoder from pyearth.toolbox.data.convert_time_series_daily_to_monthly import convert_time_series_daily_to_monthly from swaty.auxiliary.text_reader_string import text_reader_string from swaty.auxiliary.line_count import line_count from swaty.classes.watershed import pywatershed from swaty.classes.subbasin import pysubbasin from swaty.classes.hru import pyhru from swaty.classes.soil import pysoil from swaty.classes.swatpara import swatpara pDate = datetime.datetime.today() sDate_default = "{:04d}".format(pDate.year) + "{:02d}".format(pDate.month) + "{:02d}".format(pDate.day) class CaseClassEncoder(JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) if isinstance(obj, np.float32): return float(obj) if isinstance(obj, np.ndarray): return obj.tolist() if isinstance(obj, pywatershed): return json.loads(obj.tojson()) if isinstance(obj, pysubbasin): return json.loads(obj.tojson()) if isinstance(obj, pyhru): return json.loads(obj.tojson()) if isinstance(obj, pysoil): return json.loads(obj.tojson()) if isinstance(obj, swatpara): return json.loads(obj.tojson()) if isinstance(obj, list): pass return JSONEncoder.default(self, obj) class swatcase(object): __metaclass__ = ABCMeta iCase_index=0 iSiteID=0 iFlag_run =0 iFlag_standalone=1 iFlag_simulation=1 iFlag_initialization=1 iFlag_calibration=0 iFlag_watershed=0 iFlag_subbasin=0 iFlag_hru=0 iFlag_mode=0 iYear_start=0 iYear_end=0 iMonth_start=0 iMonth_end=0 iDay_start=0 iDay_end=0 nstress=0 nsegment =0 nhru=0 #total nhru nhru_combination=0 #unique hru aConfig_in=None #aParameter_watershed = None #aParameter_subbasin = None #aParameter_subbasin_name = None pWatershed = None aSubbasin=None aHru=None #aParameter_hru = None #aParameter_hru_name = None nParameter=0 nParameter_watershed=0 nParameter_subbasin=0 nParameter_hru=0 nParameter_soil=0 sFilename_swat_current = '' sFilename_model_configuration='' sWorkspace_input='' sWorkspace_output='' sWorkspace_output_case='' sFilename_model_configuration='' sFilename_observation_discharge='' sFilename_LandUseSoilsReport='' sFilename_HRULandUseSoilsReport='' sRegion='' sModel='' sCase='' sDate='' sSiteID='' sDate_start ='' sDate_end='' def __init__(self, aConfig_in,\ iFlag_read_discretization_in=None,\ iFlag_standalone_in= None,\ sDate_in=None, sWorkspace_output_in=None): if 'iFlag_run' in aConfig_in: self.iFlag_run = int(aConfig_in['iFlag_run']) if iFlag_standalone_in is not None: self.iFlag_standalone = iFlag_standalone_in else: if 'iFlag_standalone' in aConfig_in: self.iFlag_standalone = int(aConfig_in['iFlag_standalone']) else: self.iFlag_standalone=1 if iFlag_read_discretization_in is not None: self.iFlag_read_discretization = 1 else: if 'iFlag_read_discretization' in aConfig_in: self.iFlag_read_discretization =int(aConfig_in['iFlag_read_discretization']) else: self.iFlag_read_discretization=0 if 'iFlag_initialization' in aConfig_in: self.iFlag_initialization = int(aConfig_in['iFlag_initialization']) if 'iFlag_calibration' in aConfig_in: self.iFlag_calibration = int(aConfig_in['iFlag_calibration']) if 'iFlag_simulation' in aConfig_in: self.iFlag_simulation = int(aConfig_in['iFlag_simulation']) if 'iFlag_watershed' in aConfig_in: self.iFlag_watershed = int(aConfig_in['iFlag_watershed']) if 'iFlag_subbasin' in aConfig_in: self.iFlag_subbasin = int(aConfig_in['iFlag_subbasin']) if 'iFlag_hru' in aConfig_in: self.iFlag_hru = int(aConfig_in['iFlag_hru']) if 'iFlag_mode' in aConfig_in: self.iFlag_mode = int( aConfig_in['iFlag_mode']) if 'iFlag_replace_parameter' in aConfig_in: self.iFlag_replace_parameter= int( aConfig_in['iFlag_replace_parameter'] ) if 'iYear_start' in aConfig_in: self.iYear_start = int( aConfig_in['iYear_start'] ) if 'iYear_end' in aConfig_in: self.iYear_end = int( aConfig_in['iYear_end'] ) if 'iMonth_start' in aConfig_in: self.iMonth_start = int( aConfig_in['iMonth_start']) if 'iMonth_end' in aConfig_in: self.iMonth_end = int( aConfig_in['iMonth_end'] ) if 'iDay_start' in aConfig_in: self.iDay_start = int( aConfig_in['iDay_start'] ) if 'iDay_end' in aConfig_in: self.iDay_end = int( aConfig_in['iDay_end'] ) if 'nstress' in aConfig_in: self.nstress = int( aConfig_in['nstress'] ) else: pass if 'sRegion' in aConfig_in: self.sRegion = aConfig_in[ 'sRegion'] if 'sModel' in aConfig_in: self.sModel = aConfig_in[ 'sModel'] if 'sPython' in aConfig_in: self.sPython = aConfig_in[ 'sPython'] if 'sFilename_model_configuration' in aConfig_in: self.sFilename_model_configuration = aConfig_in[ 'sFilename_model_configuration'] if 'sWorkspace_home' in aConfig_in: self.sWorkspace_home = aConfig_in[ 'sWorkspace_home'] if 'sWorkspace_input' in aConfig_in: self.sWorkspace_input = aConfig_in[ 'sWorkspace_input'] if sWorkspace_output_in is not None: self.sWorkspace_output = sWorkspace_output_in else: if 'sWorkspace_output' in aConfig_in: self.sWorkspace_output = aConfig_in[ 'sWorkspace_output'] #the model can be run as part of hexwatershed or standalone if 'sWorkspace_bin' in aConfig_in: self.sWorkspace_bin= aConfig_in[ 'sWorkspace_bin'] if 'iCase_index' in aConfig_in: iCase_index = int(aConfig_in['iCase_index']) else: iCase_index=1 sCase_index = "{:03d}".format( iCase_index ) if sDate_in is not None: self.sDate= sDate_in else: if 'sDate' in aConfig_in: self.sDate = aConfig_in[ 'sDate'] else: self.sDate = sDate_default self.iCase_index = iCase_index sCase = self.sModel + self.sDate + sCase_index self.sCase = sCase if self.iFlag_standalone == 1: #in standalone case, will add case information sPath = str(Path(self.sWorkspace_output) / sCase) self.sWorkspace_output_case = sPath Path(sPath).mkdir(parents=True, exist_ok=True) else: #use specified output path, also do not add output or input tag self.sWorkspace_output_case = self.sWorkspace_output if 'sJob' in aConfig_in: self.sJob = aConfig_in['sJob'] else: self.sJob = 'swat' if 'sWorkspace_simulation_copy' in aConfig_in: self.sWorkspace_simulation_copy= aConfig_in[ 'sWorkspace_simulation_copy'] if (os.path.exists(self.sWorkspace_simulation_copy)): pass else: self.sWorkspace_simulation_copy = os.path.join(self.sWorkspace_input, self.sWorkspace_simulation_copy ) pass else: self.sWorkspace_simulation_copy='TxtInOut.tar' self.sWorkspace_simulation_copy = os.path.join(self.sWorkspace_input, self.sWorkspace_simulation_copy ) if 'sFilename_LandUseSoilsReport' in aConfig_in: self.sFilename_LandUseSoilsReport = aConfig_in[ 'sFilename_LandUseSoilsReport'] else: self.sFilename_LandUseSoilsReport = 'LandUseSoilsReport.txt' self.sFilename_LandUseSoilsReport = os.path.join(self.sWorkspace_input, self.sFilename_LandUseSoilsReport ) if 'sFilename_HRULandUseSoilsReport' in aConfig_in: self.sFilename_HRULandUseSoilsReport = aConfig_in[ 'sFilename_HRULandUseSoilsReport'] else: self.sFilename_HRULandUseSoilsReport = 'HRULandUseSoilsReport.txt' self.sFilename_HRULandUseSoilsReport = os.path.join(self.sWorkspace_input, self.sFilename_HRULandUseSoilsReport ) if 'sFilename_hru_combination' in aConfig_in: self.sFilename_hru_combination = aConfig_in['sFilename_hru_combination'] else: self.sFilename_hru_combination = 'hru_combination.txt' self.sFilename_hru_combination = os.path.join(self.sWorkspace_input, self.sFilename_hru_combination ) if 'sFilename_watershed_configuration' in aConfig_in: self.sFilename_watershed_configuration = aConfig_in['sFilename_watershed_configuration'] else: self.sFilename_watershed_configuration = 'watershed_configuration.txt' self.sFilename_watershed_configuration = os.path.join(self.sWorkspace_input, self.sFilename_watershed_configuration ) if 'sFilename_hru_info' in aConfig_in: self.sFilename_hru_info = aConfig_in['sFilename_hru_info'] else: self.sFilename_hru_info = aConfig_in['hru_info.txt'] self.sFilename_hru_info = os.path.join(self.sWorkspace_input, self.sFilename_hru_info ) #soil self.sFilename_soil_combination = os.path.join(self.sWorkspace_input, 'soil_combination.txt') self.sFilename_soil_info = os.path.join(self.sWorkspace_input, 'soil_info.txt') #set up instance self.pWatershed = pywatershed() if self.iFlag_read_discretization == 1: #read basin dummy = text_reader_string(self.sFilename_watershed_configuration, cDelimiter_in=',') dummy1 = np.array(dummy[:,0]) aSubbasin_info = dummy1.astype(int) self.nsubbasin = aSubbasin_info.shape[0] self.aSubbasin=list() for i in range(self.nsubbasin): pdummy = pysubbasin() pdummy.lIndex = i+1 self.aSubbasin.append(pdummy) #read hru aHru_combination = text_reader_string(self.sFilename_hru_combination, cDelimiter_in=',') self.nhru_combination = len(aHru_combination) aHru_info = text_reader_string(self.sFilename_hru_info, cDelimiter_in=',') self.nhru = len(aHru_info) aHru_info= np.reshape(aHru_info, (self.nhru)) #read soil aSoil_info = text_reader_string(self.sFilename_soil_info, cDelimiter_in=',') aSoil_info = np.array(aSoil_info) aSoil_combinaiton = text_reader_string(self.sFilename_soil_combination, cDelimiter_in=',') self.nsoil_combination = len(aSoil_combinaiton) self.aHru_combination=list() for iHru_combination in range(1, self.nhru_combination+1): pdummy = pyhru() pdummy.lIndex = iHru_combination sHru = aHru_combination[iHru_combination-1] dummy_index = np.where(aHru_info == sHru) dummy_index2= dummy_index[0][0] dummy = aSoil_info[dummy_index2,:] pdummy.nSoil_layer= int( dummy[1]) pdummy.aSoil=list() for j in range(pdummy.nSoil_layer): dummy_soil = pysoil() pdummy.aSoil.append(dummy_soil) self.aHru_combination.append(pdummy) #self.aHru=list() #dummy_index = 0 #for i in range(self.nsubbasin): # nhru = aSubbasin_info[i] # for j in range(nhru): # sHru = aHru_info[dummy_index] # pdummy = pyhru() # pdummy.lIndex = dummy_index + 1 # sHru = aHru_combination[iHru_combination-1] # dummy_index = np.where(aHru_info == sHru) # dummy_index2= dummy_index[0][0] # dummy = aSoil_info[dummy_index2,:] # pdummy.nSoil_layer= int( dummy[1]) # pdummy.aSoil=list() # for j in range(pdummy.nSoil_layer): # dummy_soil = pysoil() # pdummy.aSoil.append(dummy_soil) # self.aHru_combination.append(pdummy) else: if 'nsegment' in aConfig_in: self.nsegment = int( aConfig_in[ 'nsegment'] ) if 'nsubbasin' in aConfig_in: self.nsubbasin = int (aConfig_in[ 'nsubbasin']) self.aSubbasin=list() for i in range(self.nsubbasin): pdummy = pysubbasin() pdummy.lIndex = i+1 self.aSubbasin.append(pdummy) if 'nhru' in aConfig_in: nhru = int( aConfig_in['nhru']) if 'sFilename_observation_discharge' in aConfig_in: self.sFilename_observation_discharge = aConfig_in['sFilename_observation_discharge'] if 'sFilename_swat' in aConfig_in: self.sFilename_swat = aConfig_in[ 'sFilename_swat'] iMonth_count = 0 for iYear in range( self.iYear_start, self.iYear_end +1): if iYear == self.iYear_start: iMonth_start = self.iMonth_start else: iMonth_start = 1 if iYear == self.iYear_end : iMonth_end = self.iMonth_end else: iMonth_end = 12 for iMonth in range(iMonth_start, iMonth_end+1): iMonth_count = iMonth_count + 1 pass self.nstress_month = iMonth_count if 'nParameter_watershed' in aConfig_in: self.nParameter_watershed = int(aConfig_in['nParameter_watershed'] ) else: self.nParameter_watershed = 0 if 'nParameter_subbasin' in aConfig_in: self.nParameter_subbasin = int(aConfig_in['nParameter_subbasin'] ) else: self.nParameter_subbasin = 0 if 'nParameter_hru' in aConfig_in: self.nParameter_hru = int(aConfig_in['nParameter_hru'] ) else: self.nParameter_hru = 0 if 'aParameter_watershed' in aConfig_in: dummy = aConfig_in['aParameter_watershed'] self.pWatershed.setup_parameter(dummy) if 'aParameter_subbasin' in aConfig_in: for i in range(self.nsubbasin): dummy = aConfig_in['aParameter_subbasin'] self.aSubbasin[i].setup_parameter(dummy) if 'aParameter_hru' in aConfig_in: for i in range(self.nhru_combination): dummy = aConfig_in['aParameter_hru'] self.aHru_combination[i].setup_parameter(dummy) if 'aParameter_soil' in aConfig_in: for i in range(self.nhru_combination): nsoil_layer = self.aHru_combination[i].nSoil_layer for j in range(nsoil_layer): dummy = aConfig_in['aParameter_soil'] self.aHru_combination[i].aSoil[j].setup_parameter(dummy) return def copy_TxtInOut_files(self): """ sFilename_configuration_in sModel """ sWorkspace_output_case = self.sWorkspace_output_case if self.iFlag_calibration == 1: sWorkspace_target_case = os.getcwd() else: sWorkspace_target_case = sWorkspace_output_case Path(sWorkspace_target_case).mkdir(parents=True, exist_ok=True) if not os.path.exists(self.sWorkspace_simulation_copy): print(self.sWorkspace_simulation_copy) print('The simulation copy does not exist!') return else: #we might need to extract if os.path.isfile(self.sWorkspace_simulation_copy): sBasename = Path(self.sWorkspace_simulation_copy).stem #delete previous folder sTarget_path = str(Path(self.sWorkspace_output) /sBasename) if os.path.exists(sTarget_path): shutil.rmtree(sTarget_path) pass pTar = tarfile.open(self.sWorkspace_simulation_copy) pTar.extractall(self.sWorkspace_output) # specify which folder to extract to pTar.close() self.sWorkspace_simulation_copy = sTarget_path else: #this is a folder pass sWorkspace_simulation_copy= self.sWorkspace_simulation_copy #the following file will be copied aExtension = ('.pnd','.rte','.sub','.swq','.wgn','.wus',\ '.chm','.gw','.hru','.mgt','sdr','.sep',\ '.sol','ATM','bsn','wwq','deg','.cst',\ 'dat','fig','cio','fin','dat','.pcp','.tmp','.slr','.hmd' ) #we need to be careful that Tmp is different in python/linux with tmp for sExtension in aExtension: sDummy = '*' + sExtension sRegax = os.path.join(str(Path(sWorkspace_simulation_copy) ) , sDummy ) if sExtension == '.tmp': for sFilename in glob.glob(sRegax): sBasename_with_extension = os.path.basename(sFilename) sFilename_new = os.path.join(str(Path(sWorkspace_target_case)) , sBasename_with_extension.lower() ) #sFilename_new = sWorkspace_target_case + slash + sBasename_with_extension.lower() copyfile(sFilename, sFilename_new) else: for sFilename in glob.glob(sRegax): sBasename_with_extension = os.path.basename(sFilename) sFilename_new = os.path.join(str(Path(sWorkspace_target_case)) , sBasename_with_extension ) #sFilename_new = sWorkspace_target_case + slash + sBasename_with_extension copyfile(sFilename, sFilename_new) print('Finished copying all input files') def prepare_pest_template_files(self): self.swaty_prepare_watershed_template_file() self.swaty_prepare_subbasin_template_file() self.swaty_prepare_hru_template_file() self.swaty_prepare_soil_template_file() return def setup(self): """ Set up a SWAT case """ if (self.iFlag_initialization ==1): #self.copy_TxtInOut_files()
<reponame>schlamar/latexmk.py #!/usr/bin/env python # coding: utf-8 ''' latexmake ~~~~~~~~~ Python module for latexmk.py which completely automates the process of generating a LaTeX document. :copyright: (c) 2013 by <NAME> :license: MIT, see LICENSE for more details. ''' from __future__ import with_statement from collections import defaultdict from itertools import chain from optparse import OptionParser from subprocess import Popen, call import codecs import filecmp import fnmatch import logging import os import re import shutil import sys import time __author__ = '<NAME>' __version__ = '0.5dev' __license__ = 'MIT' BIB_PATTERN = re.compile(r'\\bibdata\{(.*)\}') CITE_PATTERN = re.compile(r'\\citation\{(.*)\}') BIBCITE_PATTERN = re.compile(r'\\bibcite\{(.*)\}\{(.*)\}') BIBENTRY_PATTERN = re.compile(r'@.*\{(.*),\s') ERROR_PATTTERN = re.compile(r'(?:^! (.*\nl\..*)$)|(?:^! (.*)$)|' '(No pages of output.)', re.M) LATEX_RERUN_PATTERNS = [re.compile(pattr) for pattr in [r'LaTeX Warning: Reference .* undefined', r'LaTeX Warning: There were undefined references\.', r'LaTeX Warning: Label\(s\) may have changed\.', r'No file .*(\.toc|\.lof)\.']] TEXLIPSE_MAIN_PATTERN = re.compile(r'^mainTexFile=(.*)(?:\.tex)$', re.M) LATEX_FLAGS = ['-interaction=nonstopmode', '-shell-escape', '--synctex=1'] MAX_RUNS = 4 NO_LATEX_ERROR = ( 'Could not run command "%s". ' 'Is your latex distribution under your PATH?' ) class LatexMaker(object): ''' Main class for generation process. ''' def __init__(self, project_name, opt): self.opt = opt self.log = self._setup_logger() if project_name == '.texlipse': self.project_name = self._parse_texlipse_config() else: self.project_name = project_name if self.project_name.endswith('.tex'): self.project_name = self.project_name[:-4] if self.opt.pdf: self.latex_cmd = 'pdflatex' else: self.latex_cmd = 'latex' self.out = '' self.glossaries = dict() self.latex_run_counter = 0 self.bib_file = '' def _setup_logger(self): '''Set up a logger.''' log = logging.getLogger('latexmk.py') handler = logging.StreamHandler() log.addHandler(handler) if self.opt.verbose: log.setLevel(logging.INFO) return log def _parse_texlipse_config(self): ''' Read the project name from the texlipse config file ".texlipse". ''' # If Eclipse's workspace refresh, the # ".texlipse"-File will be newly created, # so try again after short sleep if # the file is still missing. if not os.path.isfile('.texlipse'): time.sleep(0.1) if not os.path.isfile('.texlipse'): self.log.error('! Fatal error: File .texlipse is missing.') self.log.error('! Exiting...') sys.exit(1) with open('.texlipse') as fobj: content = fobj.read() match = TEXLIPSE_MAIN_PATTERN.search(content) if match: project_name = match.groups()[0] self.log.info('Found inputfile in ".texlipse": %s.tex' % project_name) return project_name else: self.log.error('! Fatal error: Parsing .texlipse failed.') self.log.error('! Exiting...') sys.exit(1) def _read_latex_files(self): ''' Check if some latex output files exist before first latex run, process them and return the generated data. - Parsing *.aux for citations counter and existing glossaries. - Getting content of files to detect changes. - *.toc file - all available glossaries files ''' if os.path.isfile('%s.aux' % self.project_name): cite_counter = self.generate_citation_counter() self.read_glossaries() else: cite_counter = {'%s.aux' % self.project_name: defaultdict(int)} fname = '%s.toc' % self.project_name if os.path.isfile(fname): with open(fname) as fobj: toc_file = fobj.read() else: toc_file = '' gloss_files = dict() for gloss in self.glossaries: ext = self.glossaries[gloss][1] filename = '%s.%s' % (self.project_name, ext) if os.path.isfile(filename): with open(filename) as fobj: gloss_files[gloss] = fobj.read() return cite_counter, toc_file, gloss_files def _is_toc_changed(self, toc_file): ''' Test if the *.toc file has changed during the first latex run. ''' fname = '%s.toc' % self.project_name if os.path.isfile(fname): with open(fname) as fobj: if fobj.read() != toc_file: return True def _need_bib_run(self, old_cite_counter): ''' Determine if you need to run "bibtex". 1. Check if *.bib exists. 2. Check latex output for hints. 3. Test if the numbers of citations changed during first latex run. 4. Examine *.bib for changes. ''' with open('%s.aux' % self.project_name) as fobj: match = BIB_PATTERN.search(fobj.read()) if not match: return False else: self.bib_file = match.group(1) if not os.path.isfile('%s.bib' % self.bib_file): self.log.warning('Could not find *.bib file.') return False if (re.search('No file %s.bbl.' % self.project_name, self.out) or re.search('LaTeX Warning: Citation .* undefined', self.out)): return True if old_cite_counter != self.generate_citation_counter(): return True if os.path.isfile('%s.bib.old' % self.bib_file): new = '%s.bib' % self.bib_file old = '%s.bib.old' % self.bib_file if not filecmp.cmp(new, old): return True def read_glossaries(self): ''' Read all existing glossaries in the main aux-file. ''' filename = '%s.aux' % self.project_name with open(filename) as fobj: main_aux = fobj.read() pattern = r'\\@newglossary\{(.*)\}\{.*\}\{(.*)\}\{(.*)\}' for match in re.finditer(pattern, main_aux): name, ext_i, ext_o = match.groups() self.glossaries[name] = (ext_i, ext_o) def check_errors(self): ''' Check if errors occured during a latex run by scanning the output. ''' errors = ERROR_PATTTERN.findall(self.out) # "errors" is a list of tuples if errors: self.log.error('! Errors occurred:') self.log.error('\n'.join( [error.replace('\r', '').strip() for error in chain(*errors) if error.strip()] )) self.log.error('! See "%s.log" for details.' % self.project_name) if self.opt.exit_on_error: self.log.error('! Exiting...') sys.exit(1) def generate_citation_counter(self): ''' Generate dictionary with the number of citations in all included files. If this changes after the first latex run, you have to run "bibtex". ''' cite_counter = dict() filename = '%s.aux' % self.project_name with open(filename) as fobj: main_aux = fobj.read() cite_counter[filename] = _count_citations(filename) for match in re.finditer(r'\\@input\{(.*.aux)\}', main_aux): filename = match.groups()[0] try: counter = _count_citations(filename) except IOError: pass else: cite_counter[filename] = counter return cite_counter def latex_run(self): ''' Start latex run. ''' self.log.info('Running %s...' % self.latex_cmd) cmd = [self.latex_cmd] cmd.extend(LATEX_FLAGS) cmd.append('%s.tex' % self.project_name) try: with open(os.devnull, 'w') as null: Popen(cmd, stdout=null, stderr=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % self.latex_cmd) self.latex_run_counter += 1 fname = '%s.log' % self.project_name with codecs.open(fname, 'r', 'utf-8', 'replace') as fobj: self.out = fobj.read() self.check_errors() def bibtex_run(self): ''' Start bibtex run. ''' self.log.info('Running bibtex...') try: with open(os.devnull, 'w') as null: Popen(['bibtex', self.project_name], stdout=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % 'bibtex') sys.exit(1) shutil.copy('%s.bib' % self.bib_file, '%s.bib.old' % self.bib_file) def makeindex_runs(self, gloss_files): ''' Check for each glossary if it has to be regenerated with "makeindex". @return: True if "makeindex" was called. ''' gloss_changed = False for gloss in self.glossaries: make_gloss = False ext_i, ext_o = self.glossaries[gloss] fname_in = '%s.%s' % (self.project_name, ext_i) fname_out = '%s.%s' % (self.project_name, ext_o) if re.search('No file %s.' % fname_in, self.out): make_gloss = True if not os.path.isfile(fname_out): make_gloss = True else: with open(fname_out) as fobj: try: if gloss_files[gloss] != fobj.read(): make_gloss = True except KeyError: make_gloss = True if make_gloss: self.log.info('Running makeindex (%s)...' % gloss) try: cmd = ['makeindex', '-q', '-s', '%s.ist' % self.project_name, '-o', fname_in, fname_out] with open(os.devnull, 'w') as null: Popen(cmd, stdout=null).wait() except OSError: self.log.error(NO_LATEX_ERROR % 'makeindex') sys.exit(1) gloss_changed = True return gloss_changed def open_preview(self): ''' Try to open a preview of the generated document. Currently only supported on Windows. ''' self.log.info('Opening preview...') if self.opt.pdf: ext = 'pdf' else: ext = 'dvi' filename = '%s.%s' % (self.project_name, ext) if sys.platform == 'win32': try: os.startfile(filename) except OSError: self.log.error( 'Preview-Error: Extension .%s is not linked to a ' 'specific application!' % ext ) elif sys.platform == 'darwin': call(['open', filename]) else: self.log.error( 'Preview-Error: Preview function is currently not ' 'supported on Linux.' ) def need_latex_rerun(self): ''' Test for all rerun patterns if they match the output. ''' for pattern in LATEX_RERUN_PATTERNS: if pattern.search(self.out): return True return False def run(self): '''Run the LaTeX compilation.''' # store files self.old_dir = [] if self.opt.clean: self.old_dir = os.listdir('.') cite_counter, toc_file, gloss_files = self._read_latex_files() self.latex_run() self.read_glossaries() gloss_changed = self.makeindex_runs(gloss_files) if gloss_changed or self._is_toc_changed(toc_file): self.latex_run() if self._need_bib_run(cite_counter): self.bibtex_run() self.latex_run() while (self.latex_run_counter < MAX_RUNS): if not self.need_latex_rerun(): break self.latex_run() if self.opt.check_cite: cites = set() with open('%s.aux' % self.project_name) as fobj: aux_content = fobj.read() for match in BIBCITE_PATTERN.finditer(aux_content): name = match.groups()[0] cites.add(name) with open('%s.bib' % self.bib_file) as fobj: bib_content = fobj.read() for match in BIBENTRY_PATTERN.finditer(bib_content): name = match.groups()[0] if name not in cites: self.log.info('Bib entry not cited: "%s"' % name) if self.opt.clean: ending = '.dvi' if self.opt.pdf: ending = '.pdf' for fname in os.listdir('.'): if not (fname in self.old_dir or fname.endswith(ending)): try: os.remove(fname) except IOError: pass if self.opt.preview: self.open_preview() def _count_citations(aux_file): ''' Counts the citations in an aux-file. @return: defaultdict(int) - {citation_name: number, ...} ''' counter = defaultdict(int) with open(aux_file) as fobj: content = fobj.read() for match in CITE_PATTERN.finditer(content): name = match.groups()[0] counter[name] += 1 return counter def main(): ''' Set up "optparse" and pass the options to a new instance of L{LatexMaker}. ''' prog = 'latexmk.py' version = __version__ usage = '%prog [options] [filename]' # Read description from doc doc_text = '' for line in __doc__.splitlines(): if line.find('#') == 0: break doc_text += ' %s\n' % line parser = OptionParser(prog=prog, usage=usage, version=version) parser.add_option('-c', '--clean', action='store_true', dest='clean', default=False, help='clean all temporary files after converting') parser.add_option('-q', '--quiet', action='store_false', dest='verbose', default=True, help='don\'t print status messages to stdout') parser.add_option('-n', '--no-exit', action='store_false', dest='exit_on_error', default=True, help='don\'t exit if error occurs') parser.add_option('-p', '--preview', action='store_true', dest='preview', default=False, help='try to open preview of generated document') parser.add_option('--dvi', action='store_false', dest='pdf', default=True, help='use "latex" instead of pdflatex')
228: return 'tpgrd' if table2Version == 129 and indicatorOfParameter == 227: return 'crnhgrd' if table2Version == 129 and indicatorOfParameter == 226: return 'htlcgrd' if table2Version == 129 and indicatorOfParameter == 225: return 'htccgrd' if table2Version == 129 and indicatorOfParameter == 224: return 'vdhgrd' if table2Version == 129 and indicatorOfParameter == 223: return 'ctmwgrd' if table2Version == 129 and indicatorOfParameter == 222: return 'ctzwgrd' if table2Version == 129 and indicatorOfParameter == 221: return 'nsgdgrd' if table2Version == 129 and indicatorOfParameter == 220: return 'ewgdgrd' if table2Version == 129 and indicatorOfParameter == 219: return 'vdmwgrd' if table2Version == 129 and indicatorOfParameter == 218: return 'vdzwgrd' if table2Version == 129 and indicatorOfParameter == 217: return 'dhlcgrd' if table2Version == 129 and indicatorOfParameter == 216: return 'dhccgrd' if table2Version == 129 and indicatorOfParameter == 215: return 'dhvdgrd' if table2Version == 129 and indicatorOfParameter == 214: return 'dhrgrd' if table2Version == 129 and indicatorOfParameter == 212: return 'tisrgrd' if table2Version == 129 and indicatorOfParameter == 211: return 'strcgrd' if table2Version == 129 and indicatorOfParameter == 210: return 'ssrcgrd' if table2Version == 129 and indicatorOfParameter == 209: return 'ttrcgrd' if table2Version == 129 and indicatorOfParameter == 208: return 'tsrcgrd' if table2Version == 129 and indicatorOfParameter == 207: return '10sigrd' if table2Version == 129 and indicatorOfParameter == 206: return 'tco3grd' if table2Version == 129 and indicatorOfParameter == 205: return 'rogrd' if table2Version == 129 and indicatorOfParameter == 204: return 'pawgrd' if table2Version == 129 and indicatorOfParameter == 203: return 'o3grd' if table2Version == 129 and indicatorOfParameter == 202: return 'mn2tgrd' if table2Version == 129 and indicatorOfParameter == 201: return 'mx2tgrd' if table2Version == 129 and indicatorOfParameter == 200: return 'vsogrd' if table2Version == 129 and indicatorOfParameter == 199: return 'veggrd' if table2Version == 129 and indicatorOfParameter == 198: return 'srcgrd' if table2Version == 129 and indicatorOfParameter == 197: return 'gwdgrd' if table2Version == 129 and indicatorOfParameter == 196: return 'mgwsgrd' if table2Version == 129 and indicatorOfParameter == 195: return 'lgwsgrd' if table2Version == 129 and indicatorOfParameter == 194: return 'btmpgrd' if table2Version == 129 and indicatorOfParameter == 193: return 'neovgrd' if table2Version == 129 and indicatorOfParameter == 192: return 'nwovgrd' if table2Version == 129 and indicatorOfParameter == 191: return 'nsovgrd' if table2Version == 129 and indicatorOfParameter == 190: return 'ewovgrd' if table2Version == 129 and indicatorOfParameter == 189: return 'sundgrd' if table2Version == 129 and indicatorOfParameter == 188: return 'hccgrd' if table2Version == 129 and indicatorOfParameter == 187: return 'mccgrd' if table2Version == 129 and indicatorOfParameter == 186: return 'lccgrd' if table2Version == 129 and indicatorOfParameter == 185: return 'cccgrd' if table2Version == 129 and indicatorOfParameter == 184: return 'swl3grd' if table2Version == 129 and indicatorOfParameter == 183: return 'stl3grd' if table2Version == 129 and indicatorOfParameter == 182: return 'egrd' if table2Version == 129 and indicatorOfParameter == 181: return 'nsssgrd' if table2Version == 129 and indicatorOfParameter == 180: return 'ewssgrd' if table2Version == 129 and indicatorOfParameter == 179: return 'ttrgrd' if table2Version == 129 and indicatorOfParameter == 178: return 'tsrgrd' if table2Version == 129 and indicatorOfParameter == 177: return 'strgrd' if table2Version == 129 and indicatorOfParameter == 176: return 'ssrgrd' if table2Version == 129 and indicatorOfParameter == 175: return 'strdgrd' if table2Version == 129 and indicatorOfParameter == 174: return 'algrd' if table2Version == 129 and indicatorOfParameter == 173: return 'srgrd' if table2Version == 129 and indicatorOfParameter == 172: return 'lsmgrd' if table2Version == 129 and indicatorOfParameter == 171: return 'swl2grd' if table2Version == 129 and indicatorOfParameter == 170: return 'stl2grd' if table2Version == 129 and indicatorOfParameter == 169: return 'ssrdgrd' if table2Version == 129 and indicatorOfParameter == 168: return '2dgrd' if table2Version == 129 and indicatorOfParameter == 167: return '2tgrd' if table2Version == 129 and indicatorOfParameter == 166: return '10vgrd' if table2Version == 129 and indicatorOfParameter == 165: return '10ugrd' if table2Version == 129 and indicatorOfParameter == 164: return 'tccgrd' if table2Version == 129 and indicatorOfParameter == 163: return 'slorgrd' if table2Version == 129 and indicatorOfParameter == 162: return 'anorgrd' if table2Version == 129 and indicatorOfParameter == 161: return 'isorgrd' if table2Version == 129 and indicatorOfParameter == 160: return 'sdorgrd' if table2Version == 129 and indicatorOfParameter == 159: return 'blhgrd' if table2Version == 129 and indicatorOfParameter == 158: return 'tspgrd' if table2Version == 129 and indicatorOfParameter == 157: return 'rgrd' if table2Version == 129 and indicatorOfParameter == 156: return 'ghgrd' if table2Version == 129 and indicatorOfParameter == 155: return 'dgrd' if table2Version == 129 and indicatorOfParameter == 154: return 'lwhrgrd' if table2Version == 129 and indicatorOfParameter == 153: return 'swhrgrd' if table2Version == 129 and indicatorOfParameter == 152: return 'lnspgrd' if table2Version == 129 and indicatorOfParameter == 151: return 'mslgrd' if table2Version == 129 and indicatorOfParameter == 150: return 'tnrgrd' if table2Version == 129 and indicatorOfParameter == 149: return 'snrgrd' if table2Version == 129 and indicatorOfParameter == 148: return 'chnkgrd' if table2Version == 129 and indicatorOfParameter == 147: return 'slhfgrd' if table2Version == 129 and indicatorOfParameter == 146: return 'sshfgrd' if table2Version == 129 and indicatorOfParameter == 145: return 'bldgrd' if table2Version == 129 and indicatorOfParameter == 144: return 'sfgrd' if table2Version == 129 and indicatorOfParameter == 143: return 'cpgrd' if table2Version == 129 and indicatorOfParameter == 142: return 'lspgrd' if table2Version == 129 and indicatorOfParameter == 141: return 'sdgrd' if table2Version == 129 and indicatorOfParameter == 140: return 'swl1grd' if table2Version == 129 and indicatorOfParameter == 139: return 'stl1grd' if table2Version == 129 and indicatorOfParameter == 138: return 'vogrd' if table2Version == 129 and indicatorOfParameter == 137: return 'tcwvgrd' if table2Version == 129 and indicatorOfParameter == 136: return 'tcwgrd' if table2Version == 129 and indicatorOfParameter == 135: return 'wgrd' if table2Version == 129 and indicatorOfParameter == 134: return 'spgrd' if table2Version == 129 and indicatorOfParameter == 133: return 'qgrd' if table2Version == 129 and indicatorOfParameter == 132: return 'vgrd' if table2Version == 129 and indicatorOfParameter == 131: return 'ugrd' if table2Version == 129 and indicatorOfParameter == 130: return 'tgrd' if table2Version == 129 and indicatorOfParameter == 129: return 'zgrd' if table2Version == 129 and indicatorOfParameter == 128: return 'bvgrd' if table2Version == 129 and indicatorOfParameter == 127: return 'atgrd' if table2Version == 129 and indicatorOfParameter == 126: return '~' if table2Version == 129 and indicatorOfParameter == 125: return '~' if table2Version == 129 and indicatorOfParameter == 123: return '10fg6grd' if table2Version == 129 and indicatorOfParameter == 122: return 'mn2t6grd' if table2Version == 129 and indicatorOfParameter == 121: return 'mx2t6grd' if table2Version == 129 and indicatorOfParameter == 120: return '~' if table2Version == 129 and indicatorOfParameter == 119: return '~' if table2Version == 129 and indicatorOfParameter == 118: return '~' if table2Version == 129 and indicatorOfParameter == 117: return '~' if table2Version == 129 and indicatorOfParameter == 116: return '~' if table2Version == 129 and indicatorOfParameter == 115: return '~' if table2Version == 129 and indicatorOfParameter == 114: return '~' if table2Version == 129 and indicatorOfParameter == 113: return '~' if table2Version == 129 and indicatorOfParameter == 112: return '~' if table2Version == 129 and indicatorOfParameter == 111: return '~' if table2Version == 129 and indicatorOfParameter == 110: return '~' if table2Version == 129 and indicatorOfParameter == 109: return '~' if table2Version == 129 and indicatorOfParameter == 108: return '~' if table2Version == 129 and indicatorOfParameter == 107: return '~' if table2Version == 129 and indicatorOfParameter == 106: return '~' if table2Version == 129 and indicatorOfParameter == 105: return '~' if table2Version == 129 and indicatorOfParameter == 104: return '~' if table2Version == 129 and indicatorOfParameter == 103: return '~' if table2Version == 129 and indicatorOfParameter == 102: return '~' if table2Version == 129 and indicatorOfParameter == 101: return '~' if table2Version == 129 and indicatorOfParameter == 100: return '~' if table2Version == 129 and indicatorOfParameter == 99: return '~' if table2Version == 129 and indicatorOfParameter ==
= BatchNormalization()(conv2_1) relu2_1 = Activation('relu')(bn2_1) conv2_2 = Conv1D(128, 4, padding='same')(relu2_1) bn2_2 = BatchNormalization()(conv2_2) relu2_2 = Activation('relu')(bn2_2) cnn2 = MaxPooling1D(pool_size=4)(relu2_2) # kernel_size = 5 conv3_1 = Conv1D(256, 5, padding='same')(embed) bn3_1 = BatchNormalization()(conv3_1) relu3_1 = Activation('relu')(bn3_1) conv3_2 = Conv1D(128, 5, padding='same')(relu3_1) bn3_2 = BatchNormalization()(conv3_2) relu3_2 = Activation('relu')(bn3_2) cnn3 = MaxPooling1D(pool_size=4)(relu3_2) # conc = Concatenate()([cnn1,cnn2,cnn3]) #print(conc.shape) #attention_mul = SelfAttention1DLayer(similarity="linear",dropout_rate=0.2)(conc) #print(attention_mul.shape) #flat = Flatten()(attention_mul) #drop = Dropout(0.5)(flat) #print(drop.shape) #attention_mul = SelfAttention2DLayer(similarity="linear",dropout_rate=0.2)(drop) #print(attention_mul.shape) #fc = Dense(2048)(drop) #bn = BatchNormalization(name='bn')(fc) model = Model(inputs = main_input, outputs = conc) #model.summary() return model def text_attent_cnnmodel_base(index,classes): base_model = text_attent_cnnmodel(classes) for layer in base_model.layers: layer.trainable=False layer.name = layer.name + str("_")+str(index) res = base_model.output #print res.shape model = Model(inputs=base_model.input, outputs=res) return model #es = EarlyStopping(monitor='val_loss', patience=1) #model.fit(x=X_train,y=Y_train,epochs=20,batch_size=32,validation_data=(X_val, Y_val),callbacks=[es]) #tt=build_pspnet(102, 50, input_shape=(224,224), activation='softmax') def mult_text_attent_cnnmodel(classes): capt1_model=text_attent_cnnmodel_base(0,classes) capt1_feature=capt1_model.output capt1_in=capt1_model.input capt2_model=text_attent_cnnmodel_base(1,classes) capt2_feature=capt2_model.output capt2_in=capt2_model.input capt3_model=text_attent_cnnmodel_base(2,classes) capt3_feature=capt3_model.output capt3_in=capt3_model.input capt4_model=text_attent_cnnmodel_base(3,classes) capt4_feature=capt4_model.output capt4_in=capt4_model.input capt5_model=text_attent_cnnmodel_base(4,classes) capt5_feature=capt5_model.output capt5_in=capt5_model.input capt6_model=text_attent_cnnmodel_base(5,classes) capt6_feature=capt6_model.output capt6_in=capt6_model.input capt7_model=text_attent_cnnmodel_base(6,classes) capt7_feature=capt7_model.output capt7_in=capt7_model.input capt8_model=text_attent_cnnmodel_base(7,classes) capt8_feature=capt8_model.output capt8_in=capt8_model.input capt9_model=text_attent_cnnmodel_base(8,classes) capt9_feature=capt9_model.output capt9_in=capt9_model.input capt10_model=text_attent_cnnmodel_base(9,classes) capt10_feature=capt10_model.output capt10_in=capt10_model.input outs = Concatenate()([capt1_feature, capt2_feature,capt3_feature, capt4_feature,capt5_feature,capt6_feature,capt7_feature, capt8_feature,capt9_feature, capt10_feature]) print(outs.shape) attention_mul = SelfAttention1DLayer(similarity="multiplicative",dropout_rate=0.2)(outs) print(attention_mul.shape) flat = Flatten()(attention_mul) drop = Dropout(0.5)(flat) #print(drop.shape) fc = Dense(2048)(drop) bn = BatchNormalization(name='bn')(fc) model = Model(inputs= [capt1_in,capt2_in,capt3_in,capt4_in,capt5_in,capt6_in,capt7_in,capt8_in,capt9_in,capt10_in], outputs=bn,name='mult_text_cnnmodel') model.summary() return model def true_attent_ResNet50(classes): base_model = RResNet50(input_shape=(224,224,3),classes=200) base_model.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') for layer in base_model.layers: layer.trainable=False res = base_model.get_layer('activation_49').output #print(res.shape) #attention_mul = SelfAttention2DLayer(similarity="dot_product",dropout_rate=None)(res) #attention_mul = SelfAttention2DLayer(output_size=(7,7),similarity="additive",d_a=10,dropout_rate=None)(res) attention_mul = Attention2DLayer(similarity="dot_product",dropout_rate=None)(res) #print(attention_mul.shape) res = BatchNormalization()(attention_mul) model = Model(inputs=base_model.input, outputs=res,name='true-ResNet50') #model.summary() return model def fake2_attent_ResNet50(index,ki,classes): base_model1 = RResNet50(input_shape=(224,224,3),classes=200) base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') for layer in base_model1.layers: layer.trainable=False layer.name = layer.name + str("_")+str(index) #base_model1.summary() Num1=(index+2)*49++ki*6 res_layer1='activation_'+str(Num1)+ str("_")+str(index) #print(res_layer1) res1 = base_model1.get_layer(res_layer1).output #res1 = SelfAttention2DLayer(similarity="dot_product",dropout_rate=None)(res1) res1 = Attention2DLayer(similarity="dot_product",dropout_rate=None)(res1) #res1 = SelfAttention2DLayer(output_size=(7,7),similarity="additive",d_a=10,dropout_rate=None)(res1) res1 = BatchNormalization()(res1) in1=base_model1.input base_model2 = RResNet50(input_shape=(224,224,3),classes=200) base_model2.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') Index=index+1 for layer in base_model2.layers: layer.trainable=False layer.name = layer.name + str("_")+str(Index) #base_model2.summary() Num2=(Index+2)*49++ki*6 res_layer2='activation_'+str(Num2)+ str("_")+str(Index) #print(res_layer2) res2 = base_model2.get_layer(res_layer2).output #res2 = SelfAttention2DLayer(similarity="dot_product",dropout_rate=None)(res2) #res2 = SelfAttention2DLayer(output_size=(7,7),similarity="additive",d_a=10,dropout_rate=None)(res2) res2 = Attention2DLayer(similarity="dot_product",dropout_rate=None)(res2) res2 = BatchNormalization()(res2) in2=base_model2.input mult_fakeimage_feature =Average()([res1, res2]) #mult_fakeimage_feature =Concatenate()([res1, res2]) nIndex=index*2 caption_model=text_attent_cnnmodel_base(nIndex,classes) caption_feature=caption_model.output #caption_feature = SelfAttention1DLayer(similarity="dot_product",dropout_rate=None)(caption_feature) caption_feature = Attention1DLayer(similarity="dot_product",dropout_rate=None)(caption_feature) print(caption_feature.shape) #caption_feature = SelfAttention1DLayer(kernel_size=(16,384),similarity="additive",dropout_rate=None)(caption_feature) caption_feature = Flatten()(caption_feature) caption_feature = Dropout(0.5)(caption_feature) caption_feature = Dense(2048)(caption_feature) caption_feature = BatchNormalization(name='bn')(caption_feature) in3=caption_model.input merged=Add()([mult_fakeimage_feature,caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.1)(Flat) fc = Dense(2048)(Dor) model = Model(inputs= [in1,in2,in3], outputs=fc,name='fake2-ResNet50') return model def fake2_attent1_ResNet50(index,ki,classes): base_model1 = RResNet50(input_shape=(224,224,3),classes=200) base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') for layer in base_model1.layers: layer.trainable=False layer.name = layer.name + str("_")+str(index) #base_model1.summary() Num1=(index+2)*49++ki*6 res_layer1='activation_'+str(Num1)+ str("_")+str(index) #print(res_layer1) res1 = base_model1.get_layer(res_layer1).output res1 = BatchNormalization()(res1) in1=base_model1.input base_model2 = RResNet50(input_shape=(224,224,3),classes=200) base_model2.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') Index=index+1 for layer in base_model2.layers: layer.trainable=False layer.name = layer.name + str("_")+str(Index) #base_model2.summary() Num2=(Index+2)*49++ki*6 res_layer2='activation_'+str(Num2)+ str("_")+str(Index) #print(res_layer2) res2 = base_model2.get_layer(res_layer2).output res2 = BatchNormalization()(res2) in2=base_model2.input mult_fakeimage_feature =Average()([res1, res2]) #mult_fakeimage_feature = Reshape((49,2048))(mult_fakeimage_feature) #mult_fakeimage_feature =Concatenate()([res1, res2]) nIndex=index*2 caption_model=text_attent_cnnmodel_base(nIndex,classes) caption_feature=caption_model.output caption_feature= Flatten()(caption_feature) caption_feature=Dropout(0.5)(caption_feature) caption_feature = Dense(2048)(caption_feature) #caption_feature = SelfAttention1DLayer(similarity="dot_product",dropout_rate=None)(caption_feature) #caption_feature = Attention1DLayer(similarity="dot_product",dropout_rate=None)([mult_fakeimage_feature,caption_feature]) print(caption_feature.shape) #caption_feature = SelfAttention1DLayer(kernel_size=(16,384),similarity="additive",dropout_rate=None)(caption_feature) #caption_feature = Flatten()(caption_feature) #caption_feature = Dropout(0.5)(caption_feature) #caption_feature = Dense(2048)(caption_feature) #caption_feature = BatchNormalization(name='bn')(caption_feature) in3=caption_model.input merged=Add()([mult_fakeimage_feature,caption_feature]) #Flat= Flatten()(merged) #Dor=Dropout(0.1)(Flat) #fc = Dense(2048)(Dor) model = Model(inputs= [in1,in2,in3], outputs=merged,name='fake2-ResNet50') return model def fake1_attent_ResNet50(index,ki,classes): base_model1 = RResNet50(input_shape=(224,224,3),classes=200) base_model1.load_weights('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5') for layer in base_model1.layers: layer.trainable=False layer.name = layer.name + str("_")+str(index) base_model1.summary() Num1=(index+2)*49++ki*6 res_layer1='activation_'+str(Num1)+ str("_")+str(index) print(res_layer1) res1 = base_model1.get_layer(res_layer1).output res1 = SelfAttention2DLayer(similarity="additive",dropout_rate=0.5)(res1) res1 = BatchNormalization()(res1) in1=base_model1.input nIndex=index*2 caption_model=text_attent_cnnmodel_base(nIndex,classes) caption_feature=caption_model.output caption_feature = SelfAttention1DLayer(similarity="additive",dropout_rate=0.5)(caption_feature) caption_feature = Flatten()(caption_feature) caption_feature = Dropout(0.5)(caption_feature) caption_feature = Dense(2048)(caption_feature) caption_feature = BatchNormalization(name='bn')(caption_feature) in2=caption_model.input merged=Add()([res1,caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.1)(Flat) fc = Dense(2048)(Dor) model = Model(inputs= [in1,in2], outputs=fc,name='fake2-ResNet50') return model ###similarity="multiplicative""additive""linear""dot_product" def Muit_fake1_k1_attent_model(classes): print('bulid true image model') true_image_model = true_attent_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') print('bulid caption_fakeImage model') fakeCaption_model1=fake1_attent_ResNet50(0,0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input #mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5]) merged=Add()([true_image_feature,fakeCaption_featuer1]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) model = Model(inputs= [in0,in1[0],in1[1]], outputs=Den,name='Muit_fake3__k1_Feature_model') model.summary() return model def Muit_fake1_k2_attent_model(classes): print('bulid true image model') true_image_model = true_attent_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') print('bulid caption_fakeImage model') fakeCaption_model1=fake2_attent_ResNet50(0,0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input #mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5]) merged=Add()([true_image_feature,fakeCaption_featuer1]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model') model.summary() return model def Muit_fake1_k2_attent1_model(classes): print('bulid true image model') true_image_model = true_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') print('bulid caption_fakeImage model') fakeCaption_model1=fake2_attent1_ResNet50(0,0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input #mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5]) #merged=Add()([true_image_feature,fakeCaption_featuer1]) merged = Attention2DLayer(similarity="additive",dropout_rate=0.1)([true_image_feature,fakeCaption_featuer1]) #merged = Attention2DLayer(output_size=(7,7),similarity="additive",dropout_rate=0.1)([true_image_feature,fakeCaption_featuer1]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model') model.summary() return model def Muit_fake1_k2_attent1_dot_model(classes): print('bulid true image model') true_image_model = true_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') print('bulid caption_fakeImage model') fakeCaption_model1=fake2_attent1_ResNet50(0,0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input #mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5]) #merged=Add()([true_image_feature,fakeCaption_featuer1]) true_image_feature=Conv2D(49, kernel_size=(3,3), padding='same')(true_image_feature) fakeCaption_featuer1=Conv2D(49, kernel_size=(3,3), padding='same')(fakeCaption_featuer1) merged = Attention2DLayer(similarity="dot_product",dropout_rate=None)([true_image_feature,fakeCaption_featuer1]) #merged = Attention2DLayer(output_size=(7,7),similarity="additive",dropout_rate=0.1)([true_image_feature,fakeCaption_featuer1]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model') model.summary() return model def Muit_fake1_k2_Feature_model(classes): print('bulid true image model') true_image_model = true_attent_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') print('bulid caption_fakeImage model') fakeCaption_model1=fake2_ResNet50(0,0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input #mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5]) merged=Add()([true_image_feature,fakeCaption_featuer1]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) model = Model(inputs= [in0,in1[0],in1[1],in1[2]], outputs=Den,name='Muit_fake3__k2_Feature_model') model.summary() return model def Muit_fake5_k2_Feature_model(classes): print('bulid true image model') true_image_model = true_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') print('bulid caption_fakeImage model') fakeCaption_model1=fake2_ResNet50(0,0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input fakeCaption_model2=fake2_ResNet50(2,1,classes) fakeCaption_featuer2=fakeCaption_model2.output in2=fakeCaption_model2.input fakeCaption_model3=fake2_ResNet50(4,2,classes) fakeCaption_featuer3=fakeCaption_model3.output in3=fakeCaption_model3.input fakeCaption_model4=fake2_ResNet50(6,3,classes) fakeCaption_featuer4=fakeCaption_model4.output in4=fakeCaption_model4.input fakeCaption_model5=fake2_ResNet50(8,4,classes) fakeCaption_featuer5=fakeCaption_model5.output in5=fakeCaption_model5.input mult_fake3_caption_feature =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5]) merged=Add()([true_image_feature,mult_fake3_caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) model = Model(inputs= [in0,in1[0],in1[1],in1[2],in2[0],in2[1],in2[2],in3[0],in3[1],in3[2],in4[0],in4[1],in4[2],in5[0],in5[1],in5[2]], outputs=Den,name='Muit_fake3__k2_Feature_model') model.summary() return model def finnal_muilt5Feature_k2_model(classes): print('bulid true image model') true_image_model = true_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') mult_fake2_caption_model=Muit_fake5_k2_Feature_model(classes) mult_fake2_caption_feature=mult_fake2_caption_model.output in1=mult_fake3_caption_model.input merged=Add()([true_image_feature,mult_fake3_caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) m_model=Model(inputs=[in0,in1], outputs=Den) #plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True) m_model.summary() return m_model def fake3_ResNet50(index,classes): t1=index+0 fake_base_model1=fake_ResNet50_base(t1,input_shape = (224, 224, 3),classes=200) temp_feature1=fake_base_model1.output in1=fake_base_model1.input t2=index+1 fake_base_model2=fake_ResNet50_base(t2,input_shape = (224, 224, 3),classes=200) temp_feature2=fake_base_model2.output in2=fake_base_model2.input t3=index+2 fake_base_model3=fake_ResNet50_base(t3,input_shape = (224, 224, 3),classes=200) temp_feature3=fake_base_model3.output in3=fake_base_model3.input outs =Average()([temp_feature1, temp_feature2,temp_feature3]) model = Model(inputs= [in1,in2,in3], outputs=outs,name='fake-ResNet50') return model def caption_fake3_ResNet50(index,classes): print('merge the fake images') mult_fake_model=fake3_ResNet50(classes) mult_fakeimage_feature=mult_fake_model.output in1=mult_fake_model.input nIndex=index*3 caption_model=text_cnnmodel_base(nIndex,classes) caption_feature=caption_model.output in2=caption_model.input merged=Add()([mult_fakeimage_feature,caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.1)(Flat) fc = Dense(2048)(Dor) model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake3_ResNet50') return model def Muit_fake3_Feature_model(classes): print('bulid caption_fakeImage model') fakeCaption_model1=caption_fake3_ResNet50(0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input fakeCaption_model2=caption_fake3_ResNet50(1,classes) fakeCaption_featuer2=fakeCaption_model2.output in2=fakeCaption_model2.input fakeCaption_model3=caption_fake3_ResNet50(2,classes) fakeCaption_featuer3=fakeCaption_model3.output in3=fakeCaption_model3.input fakeCaption_model4=caption_fake3_ResNet50(3,classes) fakeCaption_featuer4=fakeCaption_model4.output in4=fakeCaption_model4.input fakeCaption_model5=caption_fake3_ResNet50(4,classes) fakeCaption_featuer5=fakeCaption_model5.output in5=fakeCaption_model5.input fakeCaption_model6=caption_fake3_ResNet50(5,classes) fakeCaption_featuer6=fakeCaption_model6.output in6=fakeCaption_model6.input fakeCaption_model7=caption_fake3_ResNet50(6,classes) fakeCaption_featuer7=fakeCaption_model7.output in7=fakeCaption_model7.input fakeCaption_model8=caption_fake3_ResNet50(7,classes) fakeCaption_featuer8=fakeCaption_model8.output in8=fakeCaption_model8.input fakeCaption_model9=caption_fake3_ResNet50(8,classes) fakeCaption_featuer9=fakeCaption_model9.output in9=fakeCaption_model9.input fakeCaption_model10=caption_fake3_ResNet50(9,classes) fakeCaption_featuer10=fakeCaption_model10.output in10=fakeCaption_model10.input outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10]) model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model') return model def finnal_muilt3Feature_model(classes): print('bulid true image model') true_image_model = true_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake3_Feature_model') mult_fake3_caption_model=Muit_fake3_Feature_model(classes) mult_fake3_caption_feature=mult_fake3_caption_model.output in1=mult_fake3_caption_model.input merged=Add()([true_image_feature,mult_fake3_caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) m_model=Model(inputs=[in0,in1], outputs=Den) #plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True) m_model.summary() return m_model def fake5_ResNet50(classes): fake_base_model1=fake_ResNet50_base55(0,input_shape = (224, 224, 3),classes=200) temp_feature1=fake_base_model1.output in1=fake_base_model1.input fake_base_model2=fake_ResNet50_base55(1,input_shape = (224, 224, 3),classes=200) temp_feature2=fake_base_model2.output in2=fake_base_model2.input fake_base_model3=fake_ResNet50_base55(2,input_shape = (224, 224, 3),classes=200) temp_feature3=fake_base_model3.output in3=fake_base_model3.input fake_base_model4=fake_ResNet50_base55(3,input_shape = (224, 224, 3),classes=200) temp_feature4=fake_base_model4.output in4=fake_base_model4.input fake_base_model5=fake_ResNet50_base55(4,input_shape = (224, 224, 3),classes=200) temp_feature5=fake_base_model5.output in5=fake_base_model5.input #ins =Add()([inputall[0], inputall[1],inputall[2], inputall[3],inputall[4], inputall[5],inputall[6], inputall[7],inputall[8], inputall[9]]) outs =Average()([temp_feature1, temp_feature2,temp_feature3, temp_feature4,temp_feature5]) model = Model(inputs= [in1,in2,in3,in4,in5], outputs=outs,name='fake-ResNet50') return model def caption_fake5_ResNet50(index,classes): print('merge the fake images') mult_fake_model=fake5_ResNet50(classes) mult_fakeimage_feature=mult_fake_model.output in1=mult_fake_model.input caption_model=text_cnnmodel_base(index,classes) caption_feature=caption_model.output in2=caption_model.input merged=Add()([mult_fakeimage_feature,caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.1)(Flat) fc = Dense(2048)(Dor) model=Model(inputs=[in1,in2],outputs=fc,name='caption_fake5_ResNet50') return model def Muit_fake5_Feature_model(classes): print('bulid caption_fakeImage model') fakeCaption_model1=caption_fake5_ResNet50(0,classes) fakeCaption_featuer1=fakeCaption_model1.output in1=fakeCaption_model1.input fakeCaption_model2=caption_fake5_ResNet50(1,classes) fakeCaption_featuer2=fakeCaption_model2.output in2=fakeCaption_model2.input fakeCaption_model3=caption_fake5_ResNet50(2,classes) fakeCaption_featuer3=fakeCaption_model3.output in3=fakeCaption_model3.input fakeCaption_model4=caption_fake5_ResNet50(3,classes) fakeCaption_featuer4=fakeCaption_model4.output in4=fakeCaption_model4.input fakeCaption_model5=caption_fake5_ResNet50(4,classes) fakeCaption_featuer5=fakeCaption_model5.output in5=fakeCaption_model5.input fakeCaption_model6=caption_fake5_ResNet50(5,classes) fakeCaption_featuer6=fakeCaption_model6.output in6=fakeCaption_model6.input fakeCaption_model7=caption_fake5_ResNet50(6,classes) fakeCaption_featuer7=fakeCaption_model7.output in7=fakeCaption_model7.input fakeCaption_model8=caption_fake5_ResNet50(7,classes) fakeCaption_featuer8=fakeCaption_model8.output in8=fakeCaption_model8.input fakeCaption_model9=caption_fake5_ResNet50(8,classes) fakeCaption_featuer9=fakeCaption_model9.output in9=fakeCaption_model9.input fakeCaption_model10=caption_fake5_ResNet50(9,classes) fakeCaption_featuer10=fakeCaption_model10.output in10=fakeCaption_model10.input outs =Average()([fakeCaption_featuer1, fakeCaption_featuer2,fakeCaption_featuer3,fakeCaption_featuer4, fakeCaption_featuer5,fakeCaption_featuer6,fakeCaption_featuer7, fakeCaption_featuer8,fakeCaption_featuer9, fakeCaption_featuer10]) model = Model(inputs= [in1,in2,in3,in4,in5,in6,in7,in8,in9,in10], outputs=outs,name='Muit_fake3_Feature_model') return model def finnal_muilt5Feature_model(classes): print('bulid true image model') true_image_model = true_ResNet50(classes) true_image_feature=true_image_model.output in0=true_image_model.input print('build Muit_fake5_Feature_model') mult_fake5_caption_model=Muit_fake5_Feature_model(classes) mult_fake5_caption_feature=mult_fake5_caption_model.output in1=mult_fake5_caption_model.input merged=Add()([true_image_feature,mult_fake3_caption_feature]) Flat= Flatten()(merged) Dor=Dropout(0.5)(Flat) fc = Dense(512)(Dor) bnn = BatchNormalization(name='bn2')(fc) Den=Dense(classes, activation='softmax')(bnn) m_model=Model(inputs=[in0,in1], outputs=Den) #plot_model(s_model, to_file='true-fake-restnet50-fine-20181104.png',show_shapes=True) m_model.summary() return m_model ###======================== PREPARE DATA ====================================### #build myself data generator #imgInfo_file_path: pickle (file name with path) #classInfo_file_path: pickle( file class) #image_direction: true image path #fackimage_direction: fack image path #txt_direction: text path #image_size: input image size of model #num: the value of K(StackMGAN++) tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',lower=True,split=" ") Alltxt=open('birds-dataset/birds/vacab.txt','r') Alltext=Alltxt.read() tokenizer.fit_on_texts(Alltext) vocab = tokenizer.word_index import cv2 def data_generator_5(imgInfo_file_path,classInfo_file_path,image_direction,txt_direction,fackimage0_direction,fackimage1_direction,image_size,BATCHSIZE,num): testfilenames = open(imgInfo_file_path,'rb') rmesf= pickle.load(testfilenames) testfilenames = open(classInfo_file_path,'rb') rmesc= pickle.load(testfilenames) txt1=[] txt2=[] txt3=[] txt4=[] txt5=[] fake01=[] fake02=[] fake03=[] fake04=[] fake05=[] fake11=[] fake12=[] fake13=[] fake14=[] fake15=[] images=[] labels=[] imagefile=[] textfile=[] iclass=[] imagename=[] num_of_examples=len(rmesf) for i in range(len(rmesf)): temp=rmesf[i] tempimagename=image_direction+temp #print(tempimagename) if os.path.isfile(tempimagename)==False: print('error! no such ture file: %s' %tempimagename) continue else: #class_001/image_00000.txt img=cv2.imread(tempimagename) img=cv2.resize(img,(image_size[0], image_size[1])) img=np.array(img) ttemp=rmesc[i] #print(ttemp) templable=int(ttemp) templable1=int(ttemp)-1 templable='%03d' % templable #print(templable) ftemp=temp[:-4] txtPath=txt_direction+'class_'+templable+'/'+ftemp+'.txt' #print(txtPath) if os.path.isfile(txtPath)==False: print('error! no such caption file: %s' %txtPath) continue else: temptxt=[] tempfake0=[] tempfake1=[] tmask0=False tmask1=False mm=0 for line in open(txtPath,'r'): if mm<5: fftemp=temp[:-4] fakefname0=fackimage0_direction+fftemp+'_sentence'+str(mm)+'.png' fakefname1=fackimage1_direction+fftemp+'_sentence'+str(mm)+'.png' mm=mm+1 #print(fakefname) if os.path.isfile(fakefname0)==False: print('error! no such fake0 image file: %s' %fakefname0) tmask0=False continue else: if os.path.isfile(fakefname1)==False: print('error! no such
from collections import Counter from Bio import SeqIO import numpy as np import warnings import math warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim') from gensim.models import Word2Vec Max_length = 100 # maximum length of used peptides def check_length(file): length = [] global Max_length with open(file) as f: for i in f: if i[0] != ">": length.append(len(i)) temp_max = max(length) if temp_max > Max_length: Max_length = temp_max def add(x, i): x_copy = x.copy() x_copy[i] = 1 return x_copy def BLOSUM62(seq): blosum62 = { 'A': [4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0], # A 'R': [-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3], # R 'N': [-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3], # N 'D': [-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3], # D 'C': [0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1], # C 'Q': [-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2], # Q 'E': [-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2], # E 'G': [0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3], # G 'H': [-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3], # H 'I': [-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3], # I 'L': [-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1], # L 'K': [-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2], # K 'M': [-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1], # M 'F': [-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1], # F 'P': [-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2], # P 'S': [1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2], # S 'T': [0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0], # T 'W': [-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3], # W 'Y': [-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1], # Y 'V': [0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4], # V '-': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], # - } pad_len = Max_length - len(seq) seqs = [] for aa in seq: seqs.append(blosum62[aa]) for _ in range(pad_len): seqs.append(blosum62['-']) return seqs def Count(aaSet, sequence): number = 0 for aa in sequence: if aa in aaSet: number = number + 1 cutoffNums = [1, math.floor(0.25 * number), math.floor(0.50 * number), math.floor(0.75 * number), number] cutoffNums = [i if i >= 1 else 1 for i in cutoffNums] code = [] for cutoff in cutoffNums: myCount = 0 for i in range(len(sequence)): if sequence[i] in aaSet: myCount += 1 if myCount == cutoff: code.append((i + 1) / len(sequence) * Max_length) break if myCount == 0: code.append(0) return code def CTDD(seq): group1 = { 'hydrophobicity_PRAM900101': 'RKEDQN', 'hydrophobicity_ARGP820101': 'QSTNGDE', 'hydrophobicity_ZIMJ680101': 'QNGSWTDERA', 'hydrophobicity_PONP930101': 'KPDESNQT', 'hydrophobicity_CASG920101': 'KDEQPSRNTG', 'hydrophobicity_ENGD860101': 'RDKENQHYP', 'hydrophobicity_FASG890101': 'KERSQD', 'normwaalsvolume': 'GASTPDC', 'polarity': 'LIFWCMVY', 'polarizability': 'GASDT', 'charge': 'KR', 'secondarystruct': 'EALMQKRH', 'solventaccess': 'ALFCGIVW' } group2 = { 'hydrophobicity_PRAM900101': 'GASTPHY', 'hydrophobicity_ARGP820101': 'RAHCKMV', 'hydrophobicity_ZIMJ680101': 'HMCKV', 'hydrophobicity_PONP930101': 'GRHA', 'hydrophobicity_CASG920101': 'AHYMLV', 'hydrophobicity_ENGD860101': 'SGTAW', 'hydrophobicity_FASG890101': 'NTPG', 'normwaalsvolume': 'NVEQIL', 'polarity': 'PATGS', 'polarizability': 'CPNVEQIL', 'charge': 'ANCQGHILMFPSTWYV', 'secondarystruct': 'VIYCWFT', 'solventaccess': 'RKQEND' } group3 = { 'hydrophobicity_PRAM900101': 'CLVIMFW', 'hydrophobicity_ARGP820101': 'LYPFIW', 'hydrophobicity_ZIMJ680101': 'LPFYI', 'hydrophobicity_PONP930101': 'YMFWLCVI', 'hydrophobicity_CASG920101': 'FIWC', 'hydrophobicity_ENGD860101': 'CVLIMF', 'hydrophobicity_FASG890101': 'AYHWVMFLIC', 'normwaalsvolume': 'MHKFRYW', 'polarity': 'HQRKNED', 'polarizability': 'KMHFRYW', 'charge': 'DE', 'secondarystruct': 'GNPSD', 'solventaccess': 'MSPTHY' } groups = [group1, group2, group3] property = ( 'hydrophobicity_PRAM900101', 'hydrophobicity_ARGP820101', 'hydrophobicity_ZIMJ680101', 'hydrophobicity_PONP930101', 'hydrophobicity_CASG920101', 'hydrophobicity_ENGD860101', 'hydrophobicity_FASG890101', 'normwaalsvolume', 'polarity', 'polarizability', 'charge', 'secondarystruct', 'solventaccess') encodings = [] code = [] for p in property: code = code + Count(group1[p], seq) + Count(group2[p], seq) + Count(group3[p], seq) encodings.append(code) return encodings def DPC(seq): AA = 'ACDEFGHIKLMNPQRSTVWY' encodings = [] diPeptides = [aa1 + aa2 for aa1 in AA for aa2 in AA] # header = ['#'] + diPeptides # encodings.append(header) AADict = {} for i in range(len(AA)): AADict[AA[i]] = i # for i in fastas: # name, sequence = i[0], re.sub('-', '', i[1]) code = [] tmpCode = [0] * 400 for j in range(len(seq) - 2 + 1): tmpCode[AADict[seq[j]] * 20 + AADict[seq[j + 1]]] = tmpCode[AADict[seq[j]] * 20 + AADict[ seq[j + 1]]] + 1 if sum(tmpCode) != 0: tmpCode = [i / sum(tmpCode) for i in tmpCode] code = code + tmpCode encodings.append(code) return encodings def AAC(seq): AA = 'ACDEFGHIKLMNPQRSTVWY' # AA = 'ARNDCQEGHILKMFPSTWYV' encodings = [] # for i in fastas: # name, sequence = i[0], re.sub('-', '', i[1]) count = Counter(seq) for key in count: count[key] = count[key] / len(seq) code = [] for aa in AA: code.append(count[aa]) encodings.append(code) return encodings def ZSCALE(seq): zscale = { 'A': [0.24, -2.32, 0.60, -0.14, 1.30], # A 'C': [0.84, -1.67, 3.71, 0.18, -2.65], # C 'D': [3.98, 0.93, 1.93, -2.46, 0.75], # D 'E': [3.11, 0.26, -0.11, -0.34, -0.25], # E 'F': [-4.22, 1.94, 1.06, 0.54, -0.62], # F 'G': [2.05, -4.06, 0.36, -0.82, -0.38], # G 'H': [2.47, 1.95, 0.26, 3.90, 0.09], # H 'I': [-3.89, -1.73, -1.71, -0.84, 0.26], # I 'K': [2.29, 0.89, -2.49, 1.49, 0.31], # K 'L': [-4.28, -1.30, -1.49, -0.72, 0.84], # L 'M': [-2.85, -0.22, 0.47, 1.94, -0.98], # M 'N': [3.05, 1.62, 1.04, -1.15, 1.61], # N 'P': [-1.66, 0.27, 1.84, 0.70, 2.00], # P 'Q': [1.75, 0.50, -1.44, -1.34, 0.66], # Q 'R': [3.52, 2.50, -3.50, 1.99, -0.17], # R 'S': [2.39, -1.07, 1.15, -1.39, 0.67], # S 'T': [0.75, -2.18, -1.12, -1.46, -0.40], # T 'V': [-2.59, -2.64, -1.54, -0.85, -0.02], # V 'W': [-4.36, 3.94, 0.59, 3.44, -1.59], # W 'Y': [-2.54, 2.44, 0.43, 0.04, -1.47], # Y '-': [0.00, 0.00, 0.00, 0.00, 0.00], # - } encodings = [] # header = ['#'] # for p in range(1, len(fastas[0][1]) + 1): # for z in ('1', '2', '3', '4', '5'): # header.append('Pos' + str(p) + '.ZSCALE' + z) # encodings.append(header) # for i in fastas: # name, sequence = i[0], i[1] code = [] for _ in range(Max_length - len(seq)): code = code + zscale['-'] for aa in seq: code = code + zscale[aa] encodings.append(code) return encodings def TPC(seq): AA = 'ACDEFGHIKLMNPQRSTVWY' encodings = [] triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA] AADict = {} for i in range(len(AA)): AADict[AA[i]] = i # for i in fastas: # name, sequence = i[0], re.sub('-', '', i[1]) code = [] tmpCode = [0] * 8000 for j in range(len(seq) - 3 + 1): tmpCode[AADict[seq[j]] * 400 + AADict[seq[j + 1]] * 20 + AADict[seq[j + 2]]] = tmpCode[AADict[seq[j]] * 400 + AADict[seq[j + 1]] * 20 + AADict[seq[j
#! /bin/python3 import os import re import sys import HTSeq import argparse import textwrap import itertools import pandas as pd from tqdm import tqdm from pandas import Series, DataFrame from collections import defaultdict # Regular expression patterns for parsing SAM alignment read names (mainly use NOT_COLLAPSED) COLLAPSED_REGEXP = r'[PB\.\d]+\|[\w\:\(\)\-\+]+\|c\d+\/f(\d+)p(\d+)\/\d+' NOT_COLLAPSED_REGEXP = r'c\d+\/f(\d+)p(\d+)\/\d+' ############################## ### Data Loading functions ### ############################## def check_file_exist(file_path): """Make sure the file exists""" if not os.path.isfile(file_path): file_name = os.path.basename(file_path) print(f"{file_name} was not found. Check path") print(f"Full path to file: {file_path}") sys.exit(1) def load_gff(gff_path): check_file_exist(gff_path) gff_reader = HTSeq.GFF_Reader(gff_path, end_included=True) return gff_reader def load_bed(bed_path): check_file_exist(bed_path) bed_reader = HTSeq.BED_Reader(bed_path) # also call it a sam reader for ease return bed_reader def load_bam(bam_path): check_file_exist(bam_path) bam_reader = HTSeq.BAM_Reader(bam_path) return bam_reader def get_filename(bamPath): '''Gets the filename from each bamfile without the path or the extension. Can use as default sample names.''' path, filenameExt = os.path.split(bamPath) filename, ext = os.path.splitext(filenameExt) return filename def load_bam_list(bamPathList): '''Loads all the bams and returns a list of bamReader objects. Also returns a list of filenames''' bamReaderList = [] bamFilenameList = [] for bamPath in bamPathList: # Load bamReader objects from baths bamReader = load_bam(bamPath) bamReaderList.append(bamReader) # Grab filenames filename = get_filename(bamPath) bamFilenameList.append(filename) return bamReaderList, bamFilenameList ######################################### ### Read sorting based on input exons ### ######################################### def get_bed_list(bedReader): '''Obtain a list of genomic intervals from the bed file and return number of bed objects ''' bedList = [] for bed in bedReader: bedList.append(bed) bedNum = len(bedList) return bedList, bedNum def get_interval_range(bedList): '''Calculate the minimum start coordinate and maximum end coordinate of all the exons from the input bed. Variable is called intervalRange which is a genomic interval''' smallestStart = 0 largestEnd = 0 chrom = '' strand = '' for i, bed in enumerate(bedList): if i == 0: smallestStart = bed.iv.start largestEnd = bed.iv.end chrom = bed.iv.chrom strand = bed.iv.strand else: if bed.iv.start < smallestStart: smallestStart = bed.iv.start if bed.iv.end > largestEnd: largestEnd = bed.iv.end intervalRange = HTSeq.GenomicInterval(chrom, smallestStart, largestEnd, strand) return intervalRange def read_name_pattern(collapsed): '''Choose regular expression patters for format of pacbio read names''' if collapsed: name_pattern = COLLAPSED_REGEXP else: name_pattern = NOT_COLLAPSED_REGEXP return name_pattern def grab_fullLength_count(name_pattern, readAlnObj): '''Gets the full-length read count from the pacbio read name''' result = re.search(name_pattern, readAlnObj.read.name) fullLengthCount = int(result.group(1)) return fullLengthCount def sort_reads(bamReader, intervalRange): '''Only select reads that contain the full range of all exons provided in bed file. This sorting is rather strict because the read must contain the interval completely. If it overlaps the interval in a partial way it will be put in the readsOverlapInterval pile (total reads). Also keeps counts for stats.''' # Contains does not mean all exons match, they just contain the coordinates of the intervalRange readsContainInterval = [] # note name changes to lists readsOverlapInterval = [] # This is less string # Keep Track for stats readStatsDict = {} readsContainInterval_clusterCount = 0 readsContainInterval_flCount = 0 readsOverlapInterval_clusterCount = 0 readsOverlapInterval_flCount = 0 allReads_clusterCount = 0 allReads_flCounts = 0 for read in tqdm(bamReader): flCount = grab_fullLength_count(NOT_COLLAPSED_REGEXP, read) if read.iv.contains(intervalRange): readsContainInterval.append(read) readsContainInterval_clusterCount += 1 readsContainInterval_flCount += flCount if read.iv.overlaps(intervalRange): readsOverlapInterval.append(read) readsOverlapInterval_clusterCount += 1 readsOverlapInterval_flCount += flCount allReads_clusterCount += 1 allReads_flCounts += flCount # Fill readStatsDict readStatsDict['readsContainInterval_clusterCount'] = readsContainInterval_clusterCount readStatsDict['readsContainInterval_flCount'] = readsContainInterval_flCount readStatsDict['readsOverlapInterval_clusterCount'] = readsOverlapInterval_clusterCount readStatsDict['readsOverlapInterval_flCount'] = readsOverlapInterval_flCount readStatsDict['allReads_clusterCount'] = allReads_clusterCount readStatsDict['allReads_flCounts'] = allReads_flCounts return readsContainInterval, readsOverlapInterval, readStatsDict def sort_reads_from_bamReaderList(bamReaderList, bamFilenameList, intervalRange, addReadStart=0, addReadEnd=0): '''Sort the reads of the whole bamReaderList and return a dictionary of lists for each sample that contain the intervalRange and reads that do not. This interval can be adjusted using the addReadStart, addReadEnd. ''' # Adjust intervalRange for sorting reads readIntervalRange = intervalRange.copy() # make a copy called readIntervalRange so not to clobber original range readIntervalRange.start += addReadStart readIntervalRange.end += addReadEnd # master dictionary of samples containing another dictionary of sorted sample reads (containsExons vs not) readDict = defaultdict(dict) for filename, bamReader in zip(bamFilenameList, bamReaderList): # Sort current bam's reads readsContainInterval, readsOverlapInterval, readStatsDict = sort_reads(bamReader, readIntervalRange) # Add it to the master dictionary readDict[filename] = {'readsContainInterval': readsContainInterval, 'readsOverlapInterval': readsOverlapInterval, 'readStatsDict': readStatsDict} return readDict, readIntervalRange ################################################## ### Determine splicing patterns of annotations ### ################################################## # Need to redesign script to remove the generation of a boolian matrix. It is # an unnessisary step that doesn't scale well. A better method would be to # directly store the patterns found in the annotation and bam reads def create_boolMatrix(bedList, bedNum): '''Create a binary matrix of all possible patterns of exons and convert to boolean matrix. BoolMatrix is used determining splicing patterns of transcripts and reads''' binMatrix = [] # create a binary matrix as reference for the boolean matrix # Fill the binary matrix with binary numbers zero to exonNum-1 # I call it a matrix, but its really a python list of lists for i in tqdm(range(2**bedNum)): # bedNum is the count of number of exons in bed file binNum = f'{i:0{bedNum}b}' binMatrix.append(binNum) # Create an boolean matrix of the same size as the binary one, but all 'False' entries boolMatrix=[] for i in tqdm(range(2**bedNum)): l = [] for j in range(bedNum): l.append(False) boolMatrix.append(l) # Create boolean matrix full of False # Now change the entries to match the binary matrix 1 = True, 0 = False for i in tqdm(range(2**bedNum)): for j in range(bedNum): if binMatrix[i][j] == '0': boolMatrix[i][j] = False elif binMatrix[i][j] == '1': boolMatrix[i][j] = True return boolMatrix # Lets return the boolMatrix too so we can use it for the transcript dictionary def sort_trans(gffReader, intervalRange, addTransStart=0, addTransEnd=0): # Only select transcripts that contain the full range of all exons profided # (start of ftransIntervalRangest exon and end of last exon transContainInterval = [] # contain not mean all exons match, just included in the interval transOverlapInterval = [] # Adjust the interval range transIntervalRange = intervalRange.copy() # make a copy transIntervalRange.start += addTransStart transIntervalRange.end += addTransEnd print('Sorting transcripts...') for annot in tqdm(gffReader): if annot.type == 'transcript': # only look at annotated transcripts this time if annot.iv.contains(transIntervalRange): transContainInterval.append(annot) if annot.iv.overlaps(transIntervalRange): transOverlapInterval.append(annot) return transContainInterval, transOverlapInterval, transIntervalRange def group_trans_exons(transContainInterval, gffReader): transDict = defaultdict(list) print('Grouping exons by transcript...') for annot in gffReader: if annot.type == 'exon': # Only look at exons for trans in transContainInterval: if trans.attr['transcript_id'] == annot.attr['transcript_id']: transDict[trans.attr['transcript_id']].append(annot) return transDict def check_trans_patterns(transDict, bedList, bedNum, boolMatrix): # Dict similar to the readsBoolCountDict, except instead of fl_read counts # It will contain a list of transcript ids that share that splice pattern transBoolDict = {} print('Checking transcript patterns...') for i in tqdm(range(2**bedNum)): # bedNum = number of exons in the bed file # Turn the list into a tuple, and assign it a '' string as default transBoolDict[tuple(boolMatrix[i])] = '' # Now fill up the annotated ones with the transcript Ids for transId, transExonList in tqdm(transDict.items()): matchList = [False] * bedNum # Store matched exons (Default False) for i, bed in enumerate(bedList): for transExon in transExonList: if bed.iv.overlaps(transExon.iv): matchList[i] = True matchTuple = tuple(matchList) if transBoolDict[matchTuple] == '': transBoolDict[matchTuple] = f'{transId}' # the first, simple assignment else: transBoolDict[matchTuple] += f', {transId}' # otherwise add a comma and space # # Fill up the non-annotated patterns # for boolKey, transIdList in transBoolDict.items(): # if not transIdList: # transBoolDict[boolKey] += 'NaN' return transBoolDict def gen_boolTransDf(transBoolDict, boolMatrix, bedList): boolTransDf = pd.DataFrame(boolMatrix) # turn boolMatrix into a dataframe # Get names of bed/exons from bedlist and assign as column names bedNames = [] for bed in bedList: bedNames.append(bed.name) boolTransDf.columns = bedNames for i, (patternKey, transIdList) in enumerate(transBoolDict.items()): boolTransDf.at[i, 'transcript_ids'] = transIdList return boolTransDf ####################################################### ### Determine and quantify splice patterns in reads ### ####################################################### def rm_indels_in_cigar(cigar_obj): """Pacbio data has many long-reads with insertion and deletion (InDel) mistakes. In the sam alignment file, we want to get coordinates of whole exons in order to determine if they are annotated or not. However, InDels split up exon coordinates ('M') in the cigar
expiration(self) -> Optional['outputs.BucketLifecycleConfigurationV2RuleExpiration']: """ Configuration block that specifies the expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker documented below. """ return pulumi.get(self, "expiration") @property @pulumi.getter def filter(self) -> Optional['outputs.BucketLifecycleConfigurationV2RuleFilter']: """ Configuration block used to identify objects that a Lifecycle Rule applies to documented below. """ return pulumi.get(self, "filter") @property @pulumi.getter(name="noncurrentVersionExpiration") def noncurrent_version_expiration(self) -> Optional['outputs.BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration']: """ Configuration block that specifies when noncurrent object versions expire documented below. """ return pulumi.get(self, "noncurrent_version_expiration") @property @pulumi.getter(name="noncurrentVersionTransitions") def noncurrent_version_transitions(self) -> Optional[Sequence['outputs.BucketLifecycleConfigurationV2RuleNoncurrentVersionTransition']]: """ Set of configuration blocks that specify the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class documented below. """ return pulumi.get(self, "noncurrent_version_transitions") @property @pulumi.getter def prefix(self) -> Optional[str]: """ Prefix identifying one or more objects to which the rule applies. This has been deprecated by Amazon S3 and `filter` should be used instead. """ return pulumi.get(self, "prefix") @property @pulumi.getter def transitions(self) -> Optional[Sequence['outputs.BucketLifecycleConfigurationV2RuleTransition']]: """ Set of configuration blocks that specify when an Amazon S3 object transitions to a specified storage class documented below. """ return pulumi.get(self, "transitions") @pulumi.output_type class BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "daysAfterInitiation": suggest = "days_after_initiation" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketLifecycleConfigurationV2RuleAbortIncompleteMultipartUpload.__key_warning(key) return super().get(key, default) def __init__(__self__, *, days_after_initiation: Optional[int] = None): """ :param int days_after_initiation: The number of days after which Amazon S3 aborts an incomplete multipart upload. """ if days_after_initiation is not None: pulumi.set(__self__, "days_after_initiation", days_after_initiation) @property @pulumi.getter(name="daysAfterInitiation") def days_after_initiation(self) -> Optional[int]: """ The number of days after which Amazon S3 aborts an incomplete multipart upload. """ return pulumi.get(self, "days_after_initiation") @pulumi.output_type class BucketLifecycleConfigurationV2RuleExpiration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "expiredObjectDeleteMarker": suggest = "expired_object_delete_marker" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleConfigurationV2RuleExpiration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketLifecycleConfigurationV2RuleExpiration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketLifecycleConfigurationV2RuleExpiration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, date: Optional[str] = None, days: Optional[int] = None, expired_object_delete_marker: Optional[bool] = None): """ :param str date: The date the object is to be moved or deleted. Should be in GMT ISO 8601 Format. :param int days: The lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. :param bool expired_object_delete_marker: Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. """ if date is not None: pulumi.set(__self__, "date", date) if days is not None: pulumi.set(__self__, "days", days) if expired_object_delete_marker is not None: pulumi.set(__self__, "expired_object_delete_marker", expired_object_delete_marker) @property @pulumi.getter def date(self) -> Optional[str]: """ The date the object is to be moved or deleted. Should be in GMT ISO 8601 Format. """ return pulumi.get(self, "date") @property @pulumi.getter def days(self) -> Optional[int]: """ The lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. """ return pulumi.get(self, "days") @property @pulumi.getter(name="expiredObjectDeleteMarker") def expired_object_delete_marker(self) -> Optional[bool]: """ Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. If set to `true`, the delete marker will be expired; if set to `false` the policy takes no action. """ return pulumi.get(self, "expired_object_delete_marker") @pulumi.output_type class BucketLifecycleConfigurationV2RuleFilter(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "and": suggest = "and_" elif key == "objectSizeGreaterThan": suggest = "object_size_greater_than" elif key == "objectSizeLessThan": suggest = "object_size_less_than" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleConfigurationV2RuleFilter. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketLifecycleConfigurationV2RuleFilter.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketLifecycleConfigurationV2RuleFilter.__key_warning(key) return super().get(key, default) def __init__(__self__, *, and_: Optional['outputs.BucketLifecycleConfigurationV2RuleFilterAnd'] = None, object_size_greater_than: Optional[int] = None, object_size_less_than: Optional[int] = None, prefix: Optional[str] = None, tag: Optional['outputs.BucketLifecycleConfigurationV2RuleFilterTag'] = None): """ :param 'BucketLifecycleConfigurationV2RuleFilterAndArgs' and_: Configuration block used to apply a logical `AND` to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the `and` block. :param int object_size_greater_than: Minimum object size to which the rule applies. :param int object_size_less_than: Maximum object size to which the rule applies. :param str prefix: Prefix identifying one or more objects to which the rule applies. :param 'BucketLifecycleConfigurationV2RuleFilterTagArgs' tag: A configuration block for specifying a tag key and value documented below. """ if and_ is not None: pulumi.set(__self__, "and_", and_) if object_size_greater_than is not None: pulumi.set(__self__, "object_size_greater_than", object_size_greater_than) if object_size_less_than is not None: pulumi.set(__self__, "object_size_less_than", object_size_less_than) if prefix is not None: pulumi.set(__self__, "prefix", prefix) if tag is not None: pulumi.set(__self__, "tag", tag) @property @pulumi.getter(name="and") def and_(self) -> Optional['outputs.BucketLifecycleConfigurationV2RuleFilterAnd']: """ Configuration block used to apply a logical `AND` to two or more predicates. The Lifecycle Rule will apply to any object matching all of the predicates configured inside the `and` block. """ return pulumi.get(self, "and_") @property @pulumi.getter(name="objectSizeGreaterThan") def object_size_greater_than(self) -> Optional[int]: """ Minimum object size to which the rule applies. """ return pulumi.get(self, "object_size_greater_than") @property @pulumi.getter(name="objectSizeLessThan") def object_size_less_than(self) -> Optional[int]: """ Maximum object size to which the rule applies. """ return pulumi.get(self, "object_size_less_than") @property @pulumi.getter def prefix(self) -> Optional[str]: """ Prefix identifying one or more objects to which the rule applies. """ return pulumi.get(self, "prefix") @property @pulumi.getter def tag(self) -> Optional['outputs.BucketLifecycleConfigurationV2RuleFilterTag']: """ A configuration block for specifying a tag key and value documented below. """ return pulumi.get(self, "tag") @pulumi.output_type class BucketLifecycleConfigurationV2RuleFilterAnd(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "objectSizeGreaterThan": suggest = "object_size_greater_than" elif key == "objectSizeLessThan": suggest = "object_size_less_than" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleConfigurationV2RuleFilterAnd. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketLifecycleConfigurationV2RuleFilterAnd.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketLifecycleConfigurationV2RuleFilterAnd.__key_warning(key) return super().get(key, default) def __init__(__self__, *, object_size_greater_than: Optional[int] = None, object_size_less_than: Optional[int] = None, prefix: Optional[str] = None, tags: Optional[Mapping[str, str]] = None): """ :param int object_size_greater_than: Minimum object size to which the rule applies. :param int object_size_less_than: Maximum object size to which the rule applies. :param str prefix: Prefix identifying one or more objects to which the rule applies. This has been deprecated by Amazon S3 and `filter` should be used instead. """ if object_size_greater_than is not None: pulumi.set(__self__, "object_size_greater_than", object_size_greater_than) if object_size_less_than is not None: pulumi.set(__self__, "object_size_less_than", object_size_less_than) if prefix is not None: pulumi.set(__self__, "prefix", prefix) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="objectSizeGreaterThan") def object_size_greater_than(self) -> Optional[int]: """ Minimum object size to which the rule applies. """ return pulumi.get(self, "object_size_greater_than") @property @pulumi.getter(name="objectSizeLessThan") def object_size_less_than(self) -> Optional[int]: """ Maximum object size to which the rule applies. """ return pulumi.get(self, "object_size_less_than") @property @pulumi.getter def prefix(self) -> Optional[str]: """ Prefix identifying one or more objects to which the rule applies. This has been deprecated by Amazon S3 and `filter` should be used instead. """ return pulumi.get(self, "prefix") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: return pulumi.get(self, "tags") @pulumi.output_type class BucketLifecycleConfigurationV2RuleFilterTag(dict): def __init__(__self__, *, key: str, value: str): """ :param str key: Name of the object key. :param str value: Value of the tag. """ pulumi.set(__self__, "key", key) pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> str: """ Name of the object key. """ return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> str: """ Value of the tag. """ return pulumi.get(self, "value") @pulumi.output_type class BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "newerNoncurrentVersions": suggest = "newer_noncurrent_versions" elif key == "noncurrentDays": suggest = "noncurrent_days" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketLifecycleConfigurationV2RuleNoncurrentVersionExpiration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None)
from __future__ import absolute_import, print_function, division import os import numpy import warnings from six import integer_types import theano from theano import Apply, tensor, config, Variable from theano.scalar import as_scalar, constant, Log from theano.gradient import DisconnectedType, grad_not_implemented from theano.gof import Optimizer, local_optimizer, COp from theano.gof.type import CDataType from theano.compile import optdb from theano.compile.ops import shape_i from theano.tensor.nnet import LogSoftmax, SoftmaxGrad from theano.tensor.nnet.abstract_conv import (get_conv_output_shape, assert_conv_shape) from theano.tensor.signal.pool import ( Pool, MaxPoolGrad, AveragePoolGrad) from theano.tensor.nnet import bn from theano.sandbox.cuda.type import CudaNdarrayType from theano.sandbox.cuda import GpuOp, dnn_available from theano.sandbox.cuda import dnn_version as version from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, host_from_gpu, gpu_contiguous, HostFromGpu, gpu_alloc, GpuAlloc, gpu_alloc_empty, GpuAllocEmpty, GpuElemwise) from theano.sandbox.cuda.blas import (GpuConv, GpuDownsampleFactorMax, GpuDownsampleFactorMaxGrad) from theano.sandbox.cuda.nnet import GpuSoftmax from theano.sandbox.cuda.opt_util import (alpha_merge, output_merge, pad_dims, unpad_dims) from theano.sandbox.cuda import gpu_seqopt, register_opt, register_inplace from theano.sandbox.cuda.nvcc_compiler import NVCC_compiler from theano.tensor.nnet.abstract_conv import (AbstractConv2d, AbstractConv2d_gradWeights, AbstractConv2d_gradInputs, AbstractConv3d, AbstractConv3d_gradWeights, AbstractConv3d_gradInputs) def c_define_tensor_desc(desc): return """ cudnnTensorDescriptor_t %(desc)s; """ % dict(desc=desc) def c_init_tensor_desc(desc, err, fail): return """ %(desc)s = NULL; if ((%(err)s = cudnnCreateTensorDescriptor(&%(desc)s)) != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_MemoryError, "could not allocate tensor descriptor " ": %%s", cudnnGetErrorString(%(err)s)); %(fail)s } """ % dict(desc=desc, err=err, fail=fail) def c_set_tensor4d(var, desc, err, fail): return """ { int str0, str1, str2, str3; str3 = CudaNdarray_HOST_STRIDES(%(var)s)[3]?CudaNdarray_HOST_STRIDES(%(var)s)[3]:1; str2 = CudaNdarray_HOST_STRIDES(%(var)s)[2]?CudaNdarray_HOST_STRIDES(%(var)s)[2]:CudaNdarray_HOST_DIMS(%(var)s)[3]; str1 = CudaNdarray_HOST_STRIDES(%(var)s)[1]?CudaNdarray_HOST_STRIDES(%(var)s)[1]:CudaNdarray_HOST_DIMS(%(var)s)[2]*CudaNdarray_HOST_DIMS(%(var)s)[3]; str0 = CudaNdarray_HOST_STRIDES(%(var)s)[0]?CudaNdarray_HOST_STRIDES(%(var)s)[0]:CudaNdarray_HOST_DIMS(%(var)s)[2]*CudaNdarray_HOST_DIMS(%(var)s)[3]*CudaNdarray_HOST_DIMS(%(var)s)[1]; %(err)s = cudnnSetTensor4dDescriptorEx( %(desc)s, CUDNN_DATA_FLOAT, CudaNdarray_HOST_DIMS(%(var)s)[0], CudaNdarray_HOST_DIMS(%(var)s)[1], CudaNdarray_HOST_DIMS(%(var)s)[2], CudaNdarray_HOST_DIMS(%(var)s)[3], str0, str1, str2, str3 ); if (%(err)s != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "could not set tensor4d descriptor: %%s" "shapes=%%d %%d %%d %%d strides=%%d %%d %%d %%d", cudnnGetErrorString(%(err)s), CudaNdarray_HOST_DIMS(%(var)s)[0], CudaNdarray_HOST_DIMS(%(var)s)[1], CudaNdarray_HOST_DIMS(%(var)s)[2], CudaNdarray_HOST_DIMS(%(var)s)[3], str0, str1, str2, str3 ); %(fail)s } } """ % dict(var=var, err=err, desc=desc, fail=fail) def c_clean_tensor_desc(desc): return """ if(%(desc)s!= NULL) cudnnDestroyTensorDescriptor(%(desc)s); """ % dict(desc=desc) class DnnBase(GpuOp, COp): """ Creates a handle for cudnn and pulls in the cudnn libraries and headers. """ # dnn does not know about broadcasting, so we do not need to assert # the input broadcasting pattern. check_broadcast = False def __init__(self): COp.__init__(self, "dnn_base.c") def c_headers(self): return ['cudnn.h', 'cudnn_helper.h'] def c_header_dirs(self): return [os.path.dirname(__file__), config.dnn.include_path] def c_libraries(self): return ['cudnn'] def c_lib_dirs(self): return [config.dnn.library_path] def c_compile_args(self): return ['-Wl,-rpath,' + config.dnn.library_path] def c_code_cache_version(self): return (super(DnnBase, self).c_code_cache_version(), version()) class GpuDnnConvDesc(GpuOp): """ This Op builds a convolution descriptor for use in the other convolution operations. See the doc of :func:`dnn_conv` for a description of the parameters. """ __props__ = ('border_mode', 'subsample', 'conv_mode', 'precision') def c_headers(self): return ['cudnn.h', 'cudnn_helper.h'] def c_header_dirs(self): return [os.path.dirname(__file__), config.dnn.include_path] def c_libraries(self): return ['cudnn'] def c_lib_dirs(self): return [config.dnn.library_path] def c_compiler(self): return NVCC_compiler def do_constant_folding(self, node): return False def __init__(self, border_mode, subsample=(1, 1), conv_mode='conv', precision="float32"): if isinstance(border_mode, integer_types): border_mode = (border_mode,) * len(subsample) if isinstance(border_mode, tuple): assert len(border_mode) == len(subsample) border_mode = tuple(map(int, border_mode)) if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or border_mode in ('valid', 'full', 'half')): raise ValueError( 'invalid border_mode {}, which must be either ' '"valid", "full", "half", an integer or a pair of' ' integers'.format(border_mode)) self.border_mode = border_mode assert len(subsample) in [2, 3] self.subsample = subsample assert conv_mode in ('conv', 'cross') self.conv_mode = conv_mode assert precision in ['float16', 'float32', 'float64'] self.precision = precision def make_node(self, img_shape, kern_shape): if img_shape.type.ndim != 1 or img_shape.type.dtype != 'int64': raise TypeError('img must be 1D shape tensor') if kern_shape.type.ndim != 1 or kern_shape.type.dtype != 'int64': raise TypeError('kern must be 1D shape tensor') node = Apply(self, [img_shape, kern_shape], [CDataType("cudnnConvolutionDescriptor_t", freefunc="cudnnDestroyConvolutionDescriptor")()]) # DebugMode cannot compare the values of CDataType variables, so by # default it returns False all the time. To prevent DebugMode from # complaining because of the MergeOptimizer, we make this variable # always compare to True. out = node.outputs[0] out.tag.values_eq_approx = tensor.type.values_eq_approx_always_true return node def c_code(self, node, name, inputs, outputs, sub): img_shape, kern_shape = inputs desc, = outputs nb_dim = len(self.subsample) if isinstance(self.border_mode, tuple): pad_desc = tuple(map(int, self.border_mode)) assert min(pad_desc) >= 0 bmode = 1 else: pad_desc = [0] * nb_dim if self.border_mode == "valid": bmode = 1 elif self.border_mode == "half": bmode = 2 else: assert self.border_mode == "full" bmode = 0 if self.conv_mode == 'conv': conv_flag = 'CUDNN_CONVOLUTION' else: conv_flag = 'CUDNN_CROSS_CORRELATION' pad_str = ", ".join([str(s) for s in pad_desc]) subsample_str = ", ".join([str(s) for s in self.subsample]) upscale_str = ", ".join(["1"] * nb_dim) if self.precision == 'float16': precision = 'CUDNN_DATA_HALF' elif self.precision == 'float32': precision = 'CUDNN_DATA_FLOAT' else: assert self.precision == 'float64' precision = 'CUDNN_DATA_DOUBLE' return """ { cudnnStatus_t err; if ((err = cudnnCreateConvolutionDescriptor(&%(desc)s)) != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_MemoryError, "could not allocate convolution " "descriptor: %%s", cudnnGetErrorString(err)); %(fail)s } #if defined(CUDNN_VERSION) && CUDNN_VERSION >= 30 int pad[%(nb_dim)d] = {%(pad_str)s}; int subsample[%(nb_dim)d] = {%(subsample_str)s}; int upscale[%(nb_dim)d] = {%(upscale_str)s}; // Adjust padding values if using full convolution if (%(bmode)d == 0) { pad[0] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 2) - 1; pad[1] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 3) - 1; if (%(nb_dim)d >= 3) { pad[2] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 4) - 1; } } // Adjust padding values if using half convolution else if (%(bmode)d == 2) { pad[0] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 2) / 2; pad[1] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 3) / 2; if (%(nb_dim)d >= 3) { pad[2] = *(npy_int64 *)PyArray_GETPTR1(%(kern_shape)s, 4) / 2; } } err = cudnnSetConvolutionNdDescriptor( %(desc)s, %(nb_dim)d, pad, subsample, upscale, %(conv_flag)s, %(precision)s ); #else PyErr_Format(PyExc_RuntimeError, "could not set op descriptor: CUDNN_VERSION must be >= 30"); #endif if (err != CUDNN_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "could not set op descriptor: %%s", cudnnGetErrorString(err)); %(fail)s } } """ % dict(name=name, img_shape=img_shape, kern_shape=kern_shape, desc=desc, bmode=bmode, conv_flag=conv_flag, fail=sub['fail'], pad_str=pad_str, subsample_str=subsample_str, upscale_str=upscale_str, nb_dim=nb_dim, precision=precision) def c_code_cache_version(self): return (4, version()) # scalar constants _zero = constant(numpy.asarray(0.0, dtype='float32')) _one = constant(numpy.asarray(1.0, dtype='float32')) def ensure_float(val, default, name): if val is None: return default.clone() if not isinstance(val, Variable): val = constant(val) if hasattr(val, 'ndim') and val.ndim == 0: val = as_scalar(val) if not isinstance(val.type, theano.scalar.Scalar): raise TypeError("%s: expected a scalar value" % (name,)) if not val.type.dtype == 'float32': raise TypeError("%s: type is not float32" % (name,)) return val class GpuDnnConv(DnnBase, COp): """ The forward convolution. Parameters ---------- image kernel descr The convolution descriptor. workmem *deprecated*, use parameter algo instead. algo : {'none', 'small', 'large', 'fft', 'fft_tiling', 'guess_once', 'winograd', 'guess_on_shape_change', 'time_once', 'time_on_shape_change'} Default is the value of :attr:`config.dnn.conv.algo_fwd`. """ __props__ = ('algo', 'inplace') __input_name__ = ('image', 'kernel', 'output', 'descriptor', 'alpha', 'beta') def __init__(self, workmem=None, inplace=False, algo=None): COp.__init__(self, ["dnn_base.c", "dnn_conv_base.c", "dnn_fwd.c"], "APPLY_SPECIFIC(conv_fwd)") if workmem is not None: warnings.warn(("GpuDnnConv: parameter 'workmem' is deprecated. " "Use 'algo' instead."), stacklevel=3) assert algo is None self.algo = workmem else: if algo is None: algo = config.dnn.conv.algo_fwd self.algo = algo self.inplace = inplace if self.inplace: self.destroy_map = {0: [2]} if version() < (5000, 5000): if self.algo == 'winograd': raise RuntimeError("cuDNN winograd convolution requires " "cuDNN v5 or more recent") assert self.algo in ['none', 'small', 'large', 'fft', 'fft_tiling', 'winograd', 'guess_once', 'guess_on_shape_change', 'time_once', 'time_on_shape_change'] def __setstate__(self, d): self.__dict__.update(d) if not hasattr(self, 'algo'): if hasattr(self, 'workmem'): self.algo = self.workmem else: self.algo = config.dnn.conv.algo_fwd if not hasattr(self, 'inplace'): self.inplace = False # Work around to reload old pickle. # We need to find the new file name and reload c code. self.load_c_code(["dnn_base.c", "dnn_conv_base.c", "dnn_fwd.c"]) def get_op_params(self): if self.inplace: inpl_def = [('CONV_INPLACE', '1')] else: inpl_def = [] choose_alg = '0' choose_alg_once = '0' choose_alg_time = '0' if version() == -1: alg = "0" else: if self.algo == 'none': alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM' elif self.algo == 'small': alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM' elif self.algo == 'large': alg = 'CUDNN_CONVOLUTION_FWD_ALGO_GEMM' elif self.algo == 'direct': # need v2 alg = 'CUDNN_CONVOLUTION_FWD_ALGO_DIRECT' elif self.algo == 'fft': # need v3 alg = 'CUDNN_CONVOLUTION_FWD_ALGO_FFT' elif self.algo == 'fft_tiling': # need v4 for conv2d, need v5 for conv3d alg = 'CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING' elif self.algo == 'winograd': # need v5 alg = 'CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD' elif self.algo in ['guess_once', 'guess_on_shape_change']: # The convolution implementation should be choosen according # to a heuristic alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM' choose_alg = '1' if self.algo == 'guess_once': choose_alg_once = '1' elif self.algo in ['time_once', 'time_on_shape_change']: # The convolution implementation should be choosen by timing # every available implementation alg = 'CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM' choose_alg = '1' choose_alg_time = '1' if self.algo == 'time_once': choose_alg_once = '1' alg_def = ('CONV_ALGO', alg) alg_choose_def = ('CHOOSE_ALGO', choose_alg) alg_choose_once_def = ('CHOOSE_ALGO_ONCE', choose_alg_once) alg_choose_time_def = ('CHOOSE_ALGO_TIME', choose_alg_time) return [alg_def, alg_choose_def, alg_choose_once_def, alg_choose_time_def] + inpl_def def make_node(self, img, kern, output, desc, alpha=None, beta=None): img = as_cuda_ndarray_variable(img) kern = as_cuda_ndarray_variable(kern) output = as_cuda_ndarray_variable(output) if img.type.ndim != 4: raise TypeError('img must be 4D tensor') if kern.type.ndim != 4:
= len([a for a in activities if a is not None and a.name == 'upload_episode']) num_shares = len([a for a in activities if a is not None and a.name in ['share_existing', 'share_new']]) num_unshares = len([a for a in activities if a is not None and a.name == 'unshare']) num_comment_posts = len([a for a in activities if a is not None and a.name == 'post_comment']) logging.info('QUERY NOTIFICATIONS: user: %d, device: %d, start_key: %r, count: %d, ' '%d uploads, %d shares, %d unshares, %d comment posts' % (user_id, device_id, start_key, len(notifications), num_uploads, num_shares, num_unshares, num_comment_posts)) # Disable notification responses for older clients. if request['headers']['original_version'] < Message.UPDATE_SHARE_VERSION: raise gen.Return({'notifications': []}) else: raise gen.Return(response) @gen.coroutine def QueryUsers(client, obj_store, user_id, device_id, request): """Queries users by user id, filtering by friendships.""" user_friend_list = yield gen.Task(User.QueryUsers, client, user_id, request['user_ids']) user_dicts = yield [gen.Task(user.MakeUserMetadataDict, client, user_id, forward_friend, reverse_friend) for user, forward_friend, reverse_friend in user_friend_list] response = {'users': user_dicts} logging.info('QUERY USERS: user: %d, device: %d, %d users' % (user_id, device_id, len(user_dicts))) raise gen.Return(response) @gen.coroutine def QueryViewpoints(client, obj_store, user_id, device_id, request): """Queries viewpoint metadata, as well as associated followers and episodes. """ @gen.coroutine def _QueryFollowers(): """Produces list of (followers, last_key) tuples, one for each viewpoint in the request.""" tasks = [] for vp_dict in request['viewpoints']: if vp_dict.get('get_followers', False): start_key = vp_dict.get('follower_start_key', None) tasks.append(Viewpoint.QueryFollowers(client, vp_dict['viewpoint_id'], excl_start_key=int(start_key) if start_key is not None else None, limit=limit)) else: tasks.append(util.GenConstant(None)) follower_results = yield tasks raise gen.Return(follower_results) @gen.coroutine def _QueryActivities(): """Produces list of (activities, last_key) tuples, one for each viewpoint in the request.""" tasks = [] for vp_dict in request['viewpoints']: if vp_dict.get('get_activities', False): tasks.append(gen.Task(Viewpoint.QueryActivities, client, vp_dict['viewpoint_id'], excl_start_key=vp_dict.get('activity_start_key', None), limit=limit)) else: tasks.append(util.GenConstant(None)) activity_results = yield tasks raise gen.Return(activity_results) @gen.coroutine def _QueryEpisodes(): """Produces list of (episodes, last_key) tuples, one for each viewpoint in the request.""" tasks = [] for vp_dict in request['viewpoints']: if vp_dict.get('get_episodes', False): tasks.append(gen.Task(Viewpoint.QueryEpisodes, client, vp_dict['viewpoint_id'], excl_start_key=vp_dict.get('episode_start_key', None), limit=limit)) else: tasks.append(util.GenConstant(None)) episode_results = yield tasks raise gen.Return(episode_results) @gen.coroutine def _QueryComments(): """Produces list of (comments, last_key) tuples, one for each viewpoint in the request.""" tasks = [] for vp_dict in request['viewpoints']: if vp_dict.get('get_comments', False): tasks.append(gen.Task(Viewpoint.QueryComments, client, vp_dict['viewpoint_id'], excl_start_key=vp_dict.get('comment_start_key', None), limit=limit)) else: tasks.append(util.GenConstant(None)) comment_results = yield tasks raise gen.Return(comment_results) limit = request.get('limit', None) viewpoint_keys = [db_client.DBKey(vp_dict['viewpoint_id'], None) for vp_dict in request['viewpoints']] follower_keys = [db_client.DBKey(user_id, vp_dict['viewpoint_id']) for vp_dict in request['viewpoints']] results = yield [gen.Task(Viewpoint.BatchQuery, client, viewpoint_keys, None, must_exist=False), gen.Task(Follower.BatchQuery, client, follower_keys, None, must_exist=False), _QueryFollowers(), _QueryActivities(), _QueryEpisodes(), _QueryComments()] viewpoints, followers, follower_id_results, activity_results, episode_results, comment_results = results zip_list = zip(request['viewpoints'], viewpoints, followers, follower_id_results, activity_results, episode_results, comment_results) num_followers = 0 num_activities = 0 num_episodes = 0 num_comments = 0 response_vp_dicts = [] for vp_dict, viewpoint, follower, follower_result, activity_result, episode_result, comment_result in zip_list: # Only return the viewpoint metadata if the caller is a follower of the viewpoint. if follower is not None: response_vp_dict = {'viewpoint_id': viewpoint.viewpoint_id} # Only return viewpoint metadata if "get_attributes" is True. if vp_dict.get('get_attributes', False): response_vp_dict.update(_MakeViewpointMetadataDict(viewpoint, follower, obj_store)) # Only return followers if the follower is not removed and "get_followers" is True. if not follower.IsRemoved() and vp_dict.get('get_followers', False): followers, last_key = follower_result response_vp_dict['followers'] = [foll.MakeFriendMetadataDict() for foll in followers] if last_key is not None: response_vp_dict['follower_last_key'] = www_util.FormatIntegralLastKey(last_key) num_followers += len(followers) # Only return content about viewpoint if follower is allowed to view it. if _CanViewViewpointContent(viewpoint, follower): # Only return activities if "get_activities" is True. if vp_dict.get('get_activities', False): activities, last_key = activity_result response_vp_dict['activities'] = [act.MakeMetadataDict() for act in activities] if last_key is not None: response_vp_dict['activity_last_key'] = last_key num_activities += len(activities) # Only return episodes if "get_episodes" is True. if vp_dict.get('get_episodes', False): episodes, last_key = episode_result response_vp_dict['episodes'] = [ep._asdict() for ep in episodes] if last_key is not None: response_vp_dict['episode_last_key'] = last_key num_episodes += len(episodes) # Only return comments if "get_comments" is True. if vp_dict.get('get_comments', False): comments, last_key = comment_result response_vp_dict['comments'] = [co._asdict() for co in comments] if last_key is not None: response_vp_dict['comment_last_key'] = last_key num_comments += len(comments) response_vp_dicts.append(response_vp_dict) logging.info('QUERY VIEWPOINTS: user: %d, device: %d, %d viewpoints, %d followers, ' '%d activities, %d episodes, %d comments' % (user_id, device_id, len(response_vp_dicts), num_followers, num_activities, num_episodes, num_comments)) raise gen.Return({'viewpoints': response_vp_dicts}) def RecordSubscription(client, obj_store, user_id, device_id, request, callback): """Records an external subscription.""" def _OnRecord(verify_response, op): callback({'subscription': Subscription.CreateFromITunes(user_id, verify_response).MakeMetadataDict()}) def _OnVerify(environment, verify_response): if (environment == 'prod' and verify_response.GetStatus() == VerifyResponse.SANDBOX_ON_PROD_ERROR): ITunesStoreClient.Instance('dev').VerifyReceipt(receipt_data, partial(_OnVerify, 'dev')) return if not verify_response.IsValid(): logging.warning('record_subscription: invalid signature; request: %r', request) raise web.HTTPError(400, 'invalid receipt signature') if environment == 'prod': op_request = { 'headers': request['headers'], 'user_id': user_id, 'verify_response_str': verify_response.ToString(), } Operation.CreateAndExecute(client, user_id, device_id, 'Subscription.RecordITunesTransactionOperation', op_request, partial(_OnRecord, verify_response)) else: # Accept sandbox receipts, but do not record them. This is required # for app store approval (reviewers will attempt to subscribe with # sandbox accounts and we must not return an error). callback({'subscription': Subscription.CreateFromITunes(user_id, verify_response).MakeMetadataDict()}) receipt_data = base64.b64decode(request['receipt_data']) # We must support both prod and sandbox itunes instances: Even release # builds will use the sandbox when the app is under review. There is no # (supported) way for an app to know whether a receipt is from a prod # or sandbox purchase until we attempt to verify the signature. Apple # recommends always trying prod first, and falling back to sandbox # upon receiving an appropriate error code. # https://developer.apple.com/library/ios/#technotes/tn2259/_index.html#//apple_ref/doc/uid/DTS40009578-CH1-FREQUENTLY_ASKED_QUESTIONS ITunesStoreClient.Instance('prod').VerifyReceipt(receipt_data, partial(_OnVerify, 'prod')) @gen.coroutine def RemoveContacts(client, obj_store, user_id, device_id, request): """Remove contacts.""" request['user_id'] = user_id yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'RemoveContactsOperation.Execute', request) logging.info('REMOVE CONTACTS: user: %d, device: %d, contact_count: %d' % (user_id, device_id, len(request['contacts']))) raise gen.Return({}) @gen.coroutine def RemoveFollowers(client, obj_store, user_id, device_id, request): """Remove followers of an existing viewpoint.""" request['user_id'] = user_id yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id']) yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'RemoveFollowersOperation.Execute', request) logging.info('REMOVE FOLLOWERS: user: %d, device: %d, %d followers' % (user_id, device_id, len(request['remove_ids']))) raise gen.Return({}) @gen.coroutine def RemovePhotos(client, obj_store, user_id, device_id, request): """Removes photos from a user's personal library. To be more precise, *posts* are marked as removed. This means that if a photo has been uploaded or saved to the library multiple times, every instance of that photo (i.e. post) should be marked as removed. """ request['user_id'] = user_id yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'RemovePhotosOperation.Execute', request) num_photos = sum(len(ep_dict['photo_ids']) for ep_dict in request['episodes']) logging.info('REMOVE PHOTOS: user: %d, device: %d, %d photos' % (user_id, device_id, num_photos)) raise gen.Return({}) @gen.coroutine def RemoveViewpoint(client, obj_store, user_id, device_id, request): """Remove a viewpoint from a user's inbox.""" request['user_id'] = user_id viewpoint_id = request['viewpoint_id'] # Check that the user isn't trying to remove their default viewpoint. We do it here # because it saves us a query for user during the operation and the default viewpoint id # can't change. if base.ViewfinderContext.current().user.private_vp_id == viewpoint_id: raise PermissionError('User is not allowed to remove their default viewpoint: %s' % viewpoint_id) yield gen.Task(Operation.CreateAndExecute, client, user_id, device_id, 'RemoveViewpointOperation.Execute', request) logging.info('REMOVE VIEWPOINT: user: %d, device: %d, viewpoint: %s' % (user_id, device_id, viewpoint_id)) raise gen.Return({}) @gen.coroutine def ResolveContacts(client, obj_store, user_id, device_id, request): """Resolves contact identities to user ids.""" ident_tasks = [] for ident in request['identities']: # Validate identity key. Identity.ValidateKey(ident) if ident.startswith(('Email:', 'Phone:')): # Only allow email addresses and phone numbers to be resolved through this interface. Other # identity types (e.g. FacebookGraph) are denser and could be exhaustively enumerated, and # there is little use in allowing users to enter them directly. ident_tasks.append(gen.Task(Identity.Query, client, ident, None, must_exist=False)) else: ident_tasks.append(util.GenConstant(None)) ident_results = yield ident_tasks user_tasks = [] for ident in ident_results: if ident is not None and ident.user_id is not None: user_tasks.append(gen.Task(User.Query, client, ident.user_id, None, must_exist=False)) else: user_tasks.append(util.GenConstant(None)) user_results = yield user_tasks results = [] for request_ident, ident, user in zip(request['identities'], ident_results, user_results): result_contact = {'identity': request_ident} if user is not None: assert ident is not None and user.user_id == ident.user_id assert ident.key == request_ident result_contact['user_id'] = ident.user_id util.SetIfNotNone(result_contact, 'name', user.name) util.SetIfNotNone(result_contact, 'given_name', user.given_name) util.SetIfNotNone(result_contact, 'family_name', user.family_name) result_contact['labels'] = user.MakeLabelList(False) results.append(result_contact) raise gen.Return({'contacts': results}) @gen.coroutine def SavePhotos(client, obj_store, user_id, device_id, request): """Saves photos from existing episodes to new episodes in the current user's default viewpoint. This is used to implement the "save photos to library" functionality. """ request['user_id'] = user_id yield Activity.VerifyActivityId(client, user_id, device_id, request['activity']['activity_id']) vp_ids = request.get('viewpoint_ids', []) ep_dicts =
0.1, 0.1, 0.1, ... A=A, mu=0.0) >>> static_conesta = StaticCONESTA(max_iter=10000) >>> beta1 = static_conesta.run(function, np.zeros((50, 1))) >>> beta2 = np.dot(np.linalg.pinv(X), y) >>> np.linalg.norm(beta1 - beta2) # doctest: +ELLIPSIS 0.96629070... """ INTERFACES = [properties.NesterovFunction, properties.StepSize, properties.ProximalOperator, properties.Continuation, properties.DualFunction] INFO_PROVIDED = [Info.ok, Info.converged, Info.num_iter, Info.continuations, Info.time, Info.fvalue, Info.func_val, Info.mu, Info.verbose] def __init__(self, mu_min=consts.TOLERANCE, tau=0.5, exponent=1.52753, info=[], eps=consts.TOLERANCE, max_iter=10000, min_iter=1, callback=None, simulation=False): super(StaticCONESTA, self).__init__(info=info, max_iter=max_iter, min_iter=min_iter) self.mu_min = max(consts.FLOAT_EPSILON, float(mu_min)) self.tau = max(consts.TOLERANCE, min(float(tau), 1.0 - consts.TOLERANCE)) self.exponent = max(1.001, min(float(exponent), 2.0)) self.eps = max(consts.TOLERANCE, float(eps)) self.callback = callback self.simulation = bool(simulation) self._harmonic = None def _harmonic_number_approx(self): if self._harmonic is None: x = [1.001, 1.00125, 1.0025, 1.005, 1.01, 1.025, 1.05, 1.075, 1.1, 1.2, 1.3, 1.4, 1.5, 1.52753, 1.6, 1.7, 1.8, 1.9, 1.95, 2.0] y = [1000.58, 800.577, 400.577, 200.578, 100.578, 40.579, 20.5808, 13.916, 10.5844, 5.59158, 3.93195, 3.10555, 2.61238, 2.50988, 2.28577, 2.05429, 1.88223, 1.74975, 1.69443, 1.6449340668] f = interp1(x, y) self._harmonic = f(self.exponent) return self._harmonic def _approximate_eps(self, function, beta0): old_mu = function.set_mu(self.mu_min) step = function.step(beta0) D1 = maths.norm(function.prox(-step * function.grad(beta0), step, # Arbitrary eps ... eps=np.sqrt(consts.TOLERANCE), max_iter=self.max_iter)) function.set_mu(old_mu) return (2.0 / step) * D1 * self._harmonic_number_approx() @bases.force_reset @bases.check_compatibility def run(self, function, beta): # Copy the allowed info keys for FISTA. fista_info = list() for nfo in self.info_copy(): if nfo in FISTA.INFO_PROVIDED: fista_info.append(nfo) # Create the inner algorithm. algorithm = FISTA(info=fista_info, eps=self.eps, max_iter=self.max_iter, min_iter=self.min_iter) # Not ok until the end. if self.info_requested(Info.ok): self.info_set(Info.ok, False) # Time the init computation. if self.info_requested(Info.time): init_time = utils.time() # Estimate the initial precision, eps, and the smoothing parameter mu. gM = function.eps_max(1.0) # gamma * M if maths.norm(beta) > consts.TOLERANCE: mu = function.estimate_mu(beta) eps = mu * gM else: eps = self._approximate_eps(function, beta) mu = eps / gM function.set_mu(mu) # Initialise info variables. Info variables have the suffix "_". if self.info_requested(Info.time): t_ = [] init_time = utils.time() - init_time if self.info_requested(Info.fvalue) \ or self.info_requested(Info.func_val): f_ = [] if self.info_requested(Info.converged): self.info_set(Info.converged, False) if self.info_requested(Info.mu): mu_ = [] i = 0 # Iteration counter. while True: converged = False # Give current parameters to the algorithm. algorithm.set_params(eps=eps, max_iter=self.max_iter - self.num_iter) # Run FISTA. beta_new = algorithm.run(function, beta) # Update global iteration count. self.num_iter += algorithm.num_iter # Get info from algorithm. if Info.time in algorithm.info and \ self.info_requested(Info.time): t_ += algorithm.info_get(Info.time) if i == 0: # Add init time to first iteration. t_[0] += init_time if Info.func_val in algorithm.info \ and self.info_requested(Info.func_val): f_ += algorithm.info_get(Info.func_val) elif Info.fvalue in algorithm.info \ and self.info_requested(Info.fvalue): f_ += algorithm.info_get(Info.fvalue) if self.info_requested(Info.mu): mu_ += [mu] * algorithm.num_iter # Unless this is a simulation, you want the algorithm to stop when # it has converged. if not self.simulation: # Stopping criterion. step = function.step(beta_new) if maths.norm(beta_new - beta) < step * self.eps: if self.info_requested(Info.converged): self.info_set(Info.converged, True) converged = True beta = beta_new if self.callback is not None: self.callback(locals()) if self.info_requested(Info.verbose): print("StaticCONESTA ite: %i, eps: %g, mu: %g" % (i, eps, mu)) # All combined stopping criteria. if (converged or self.num_iter >= self.max_iter) \ and self.num_iter >= self.min_iter: break # Update the precision eps. eps = self.tau * eps # Compute and update mu. mu = max(self.mu_min, eps / gM) function.set_mu(mu) i = i + 1 if self.info_requested(Info.num_iter): self.info_set(Info.num_iter, self.num_iter) if self.info_requested(Info.continuations): self.info_set(Info.continuations, i + 1) if self.info_requested(Info.time): self.info_set(Info.time, t_) if self.info_requested(Info.func_val): self.info_set(Info.func_val, f_) if self.info_requested(Info.fvalue): self.info_set(Info.fvalue, f_) if self.info_requested(Info.mu): self.info_set(Info.mu, mu_) if self.info_requested(Info.ok): self.info_set(Info.ok, True) return beta #class ProjectionADMM(bases.ExplicitAlgorithm): # """ The Alternating direction method of multipliers, where the functions # have projection operators onto the corresponding convex sets. # """ # INTERFACES = [properties.Function, # properties.ProjectionOperator] # # def __init__(self, output=False, # eps=consts.TOLERANCE, # max_iter=consts.MAX_ITER, min_iter=1): # # self.output = output # self.eps = eps # self.max_iter = max_iter # self.min_iter = min_iter # # def run(self, function, x): # """Finds the projection onto the intersection of two sets. # # Parameters # ---------- # function : List or tuple with two Functions. The two functions. # # x : Numpy array. The point that we wish to project. # """ # self.check_compatibility(function[0], self.INTERFACES) # self.check_compatibility(function[1], self.INTERFACES) # # z = x # u = np.zeros(x.shape) # for i in xrange(1, self.max_iter + 1): # x = function[0].proj(z - u) # z = function[1].proj(x + u) # u = u + x - z # # if maths.norm(z - x) / maths.norm(z) < self.eps \ # and i >= self.min_iter: # break # # return z class ADMM(bases.ExplicitAlgorithm, bases.IterativeAlgorithm, bases.InformationAlgorithm): """The alternating direction method of multipliers (ADMM). Computes the minimum of the sum of two functions with associated proximal or projection operators. Solves problems on the form min. f(x, y) = g(x) + h(y) s.t. y = x The functions have associated proximal or projection operators. Parameters ---------- rho : Positive float. The penalty parameter. mu : Float, greater than 1. The factor within which the primal and dual variables should be kept. Set to less than or equal to 1 if you don't want to update the penalty parameter rho dynamically. tau : Float, greater than 1. Increase rho by a factor tau. info : List or tuple of utils.consts.Info. What, if any, extra run information should be stored. Default is an empty list, which means that no run information is computed nor returned. eps : Positive float. Tolerance for the stopping criterion. max_iter : Non-negative integer. Maximum allowed number of iterations. min_iter : Non-negative integer less than or equal to max_iter. Minimum number of iterations that must be performed. Default is 1. """ INTERFACES = [properties.SplittableFunction, properties.AugmentedProximalOperator, properties.OR(properties.ProximalOperator, properties.ProjectionOperator)] INFO_PROVIDED = [Info.ok, Info.num_iter, Info.time, Info.fvalue, Info.converged] def __init__(self, rho=1.0, mu=10.0, tau=2.0, info=[], eps=consts.TOLERANCE, max_iter=consts.MAX_ITER, min_iter=1, simulation=False): # TODO: Investigate what is a good default value here! super(ADMM, self).__init__(info=info, max_iter=max_iter, min_iter=min_iter) self.rho = max(consts.FLOAT_EPSILON, float(rho)) self.mu = max(1.0, float(mu)) self.tau = max(1.0, float(tau)) self.eps = max(consts.FLOAT_EPSILON, float(eps)) self.simulation = bool(simulation) @bases.force_reset @bases.check_compatibility def run(self, functions, xy): """Finds the minimum of two functions with associated proximal operators. Parameters ---------- functions : List or tuple with two Functions or a SplittableFunction. The two functions. xy : List or tuple with two elements, numpy arrays. The starting points for the minimisation. """ if self.info_requested(Info.ok): self.info_set(Info.ok, False) if self.info_requested(Info.time): t = [] if self.info_requested(Info.fvalue): f = [] if self.info_requested(Info.converged): self.info_set(Info.converged, False) funcs = [functions.g, functions.h] x_new = xy[0] y_new = xy[1] z_new = x_new.copy() u_new = y_new.copy() for i in range(1, self.max_iter + 1): if self.info_requested(Info.time): tm = utils.time_cpu() x_old = x_new z_old = z_new u_old = u_new if isinstance(funcs[0], properties.ProximalOperator): x_new = funcs[0].prox(z_old - u_old) else: x_new = funcs[0].proj(z_old - u_old) y_new = x_new # TODO: Allow a linear operator here. if isinstance(funcs[1], properties.ProximalOperator): z_new = funcs[1].prox(y_new + u_old) else: z_new = funcs[1].proj(y_new + u_old) # The order here is important! Do not change! u_new = (y_new - z_new) + u_old if self.info_requested(Info.time): t.append(utils.time_cpu() - tm) if self.info_requested(Info.fvalue): fval = funcs[0].f(z_new) + funcs[1].f(z_new) f.append(fval) if not self.simulation: if i == 1: if maths.norm(x_new - x_old) < self.eps \ and i >= self.min_iter: # print "Stopping criterion kicked in!" if self.info_requested(Info.converged): self.info_set(Info.converged, True) break else: if maths.norm(x_new - x_old) / maths.norm(x_old) < self.eps \ and i >= self.min_iter: # print "Stopping criterion kicked in!" if self.info_requested(Info.converged): self.info_set(Info.converged, True) break # Update the penalty parameter, rho, dynamically. if self.mu > 1.0: r = x_new - z_new s = (z_new - z_old) * -self.rho norm_r = maths.norm(r) norm_s = maths.norm(s) # print "norm(r): ", norm_r, ", norm(s): ", norm_s, ", rho:", \ # self.rho if norm_r > self.mu * norm_s: self.rho *= self.tau u_new *= 1.0 / self.tau # Rescale dual variable. elif norm_s > self.mu * norm_r: self.rho /= self.tau u_new *= self.tau # Rescale dual variable. # Update the penalty parameter in the functions. functions.set_rho(self.rho) self.num_iter = i if self.info_requested(Info.num_iter): self.info_set(Info.num_iter, i) if self.info_requested(Info.time): self.info_set(Info.time, t) if self.info_requested(Info.fvalue): self.info_set(Info.fvalue, f) if self.info_requested(Info.ok): self.info_set(Info.ok, True) return z_new class DykstrasProximalAlgorithm(bases.ExplicitAlgorithm): """Dykstra's proximal algorithm. Computes the minimum of the sum of two proximal operators. The functions have proximal operators (ProjectionOperator.prox). """ INTERFACES = [properties.Function, properties.ProximalOperator] def __init__(self, eps=consts.TOLERANCE, max_iter=1000, min_iter=1): # TODO: Investigate what good default value are here!
<filename>rtk/analyses/pof/PhysicsOfFailure.py #!/usr/bin/env python """ ========================= Physics of Failure Module ========================= """ # -*- coding: utf-8 -*- # # rtk.analyses.pof.PhysicsOfFailure.py is part of The RTK Project # # All rights reserved. # Copyright 2007 - 2017 <NAME> <EMAIL>rew.rowland <AT> reliaqual <DOT> com # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER # OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Import modules for localization support. import gettext import locale # Import other RTK modules. try: import Configuration except ImportError: # pragma: no cover import rtk.Configuration as Configuration from Mechanism import Model as Mechanism from Load import Model as Load from Stress import Model as Stress from Method import Model as Method __author__ = '<NAME>' __email__ = '<EMAIL>' __organization__ = 'ReliaQual Associates, LLC' __copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland' try: locale.setlocale(locale.LC_ALL, Configuration.LOCALE) except locale.Error: # pragma: no cover locale.setlocale(locale.LC_ALL, '') _ = gettext.gettext class ParentError(Exception): """ Exception raised when None is passed for the hardware ID when initializing an instance of the PoF model. """ pass class Model(object): """ The Physics of Failure (PoF) data model aggregates the Mechanism, Load, Stress and Method data models to produce an overall PoF analysis. A Hardware item will consist of one PoF analysis. The attributes of a PoF are: :ivar dict dicMechanisms: Dictionary of the Mechanisms associated with the PoF. Key is the Mechanism ID; value is a pointer to the instance of the Mechanism data model. :ivar int assembly_id: the Hardware item ID the PoF analysis is associated with. """ def __init__(self, assembly_id): """ Method to initialize a PoF data model instance. :param int assembly_id: the Hardware item ID that the PoF analysis will be associated with. """ # Model must be associated with either a Function or Hardware item. if assembly_id is None: raise ParentError # Define private dictionary attributes. # Define private list attributes. # Define private scalar attributes. # Define public dictionary attributes. self.dicMechanisms = {} # Define public list attributes. # Define public scalar attributes. self.assembly_id = assembly_id class PoF(object): """ The PoF data controller provides an interface between the PoF data model and an RTK view model. A single PoF data controller can manage one or more PoF data models. :ivar _dao: the :py:class:`rtk.dao.DAO.DAO` used to communicate with the RTK Project database. :ivar dict dicPoF: Dictionary of the PoF data models controlled. Key is the Hardware ID; value is a pointer to the instance of the PoF data model. """ def __init__(self): """ Method to initialize a PoF controller instance. """ # Define private dictionary attributes. # Define private list attributes. # Define private scalar attributes. self._dao = None # Define public dictionary attributes. self.dicPoF = {} # Define public list attributes. # Define public scalar attributes. def request_pof(self, dao, assembly_id=None): # pylint: disable=R0914 """ Method to load the entire PoF for a Hardware item. Starting at the Mechanism level, the steps to create the PoF are: #. Create an instance of the PoF (Mechanism, Load, Stress, Method) data model. #. Add instance pointer to the PoF dictionary for the passed Hardware item. #. Retrieve the mechanisms (loads, stresses, methods) from the RTK Project database. #. Create an instance of the data model. #. Set the attributes of the data model instance from the returned results. #. Add instance pointer to the Mechanism (Load, Stress, Method) dictionary. :param dao: the :py:class:`rtk.dao.DAO.DAO` to use for communicating with the RTK Project database. :keyword int assembly_id: the Hardware item ID that the PoF will be associated with. :return: False if successful or True if an error is encountered. :rtype: bool """ # Controller must be associated with a Hardware item. if assembly_id is None: raise ParentError self._dao = dao _pof = Model(assembly_id) self.dicPoF[assembly_id] = _pof _query = "SELECT fld_assembly_id, fld_mechanism_id, fld_description \ FROM rtk_mechanisms \ WHERE fld_assembly_id={0:d} \ AND fld_include_pof=1 \ ORDER BY fld_mechanism_id ASC".format(assembly_id) (_results, _error_code, __) = self._dao.execute(_query) try: _n_mechanisms = len(_results) except TypeError: _n_mechanisms = 0 for i in range(_n_mechanisms): _mechanism = Mechanism() _mechanism.set_attributes(_results[i]) _pof.dicMechanisms[_mechanism.mechanism_id] = _mechanism _query = "SELECT * FROM rtk_op_loads \ WHERE fld_mechanism_id={0:d}".format( _mechanism.mechanism_id) (_loads, _error_code, __) = self._dao.execute(_query, commit=False) try: _n_loads = len(_loads) except TypeError: _n_loads = 0 for i in range(_n_loads): _load = Load() _load.set_attributes(_loads[i]) _mechanism.dicLoads[_load.load_id] = _load _query = "SELECT * FROM rtk_op_stress \ WHERE fld_load_id={0:d}".format( _load.load_id) (_stresses, _error_code, __) = self._dao.execute(_query) try: _n_stresses = len(_stresses) except TypeError: _n_stresses = 0 for j in range(_n_stresses): _stress = Stress() _stress.set_attributes(_stresses[j]) _load.dicStresses[_stress.stress_id] = _stress _query = "SELECT * FROM rtk_test_methods \ WHERE fld_stress_id={0:d}".format( _stress.stress_id) (_methods, _error_code, __) = self._dao.execute(_query) try: _n_methods = len(_methods) except TypeError: _n_methods = 0 for k in range(_n_methods): _method = Method() _method.set_attributes(_methods[k]) _stress.dicMethods[_method.method_id] = _method return False def add_pof(self, hardware_id=None): """ Method to add a new PoF to the dictionary of profiles managed by this controller. :keyword int hardware_id: the Hardware item ID to add the PoF. :return: False if successful or True if an error is encountered. :rtype: bool """ self.request_pof(self._dao, hardware_id) return False def add_mechanism(self, hardware_id): """ Method to add a new Mechanism to the selected Hardware item. :param int hardware_id: the Hardware ID to add the Mechanism. :return: (_results, _error_code, _last_id) :rtype: tuple """ _pof = self.dicPoF[hardware_id] _query = "INSERT INTO rtk_mechanisms \ (fld_assembly_id, fld_mode_id, fld_include_pof) \ VALUES ({0:d}, 10000, 1)".format(hardware_id) (_results, _error_code, _last_id) = self._dao.execute(_query, commit=True) _mechanism = Mechanism() _mechanism.set_attributes((hardware_id, _last_id, '')) _pof.dicMechanisms[_last_id] = _mechanism return(_results, _error_code, _last_id) def delete_mechanism(self, hardware_id, mechanism_id): """ Method to delete the selected Mechanism. :param int hardware_id: the Hardware ID of the Mechanism to delete. :param int mechanism_id: the Mechanism ID to delete. :return: (_results, _error_code) :rtype: tuple """ _pof = self.dicPoF[hardware_id] _query = "DELETE FROM rtk_mechanisms \ WHERE fld_mechanism_id={0:d}".format(mechanism_id) (_results, _error_code, _last_id) = self._dao.execute(_query, commit=True) try: _pof.dicMechanisms.pop(mechanism_id) except KeyError: _error_code = 60 return(_results, _error_code) def add_load(self, hardware_id, mechanism_id): """ Method to add a new Operating Load to the selected Mechanism. :param int hardware_id: the Hardware ID to add the Load. :param int mechanism_id: the Mechanism ID to add the Load. :return: (_results, _error_code, _last_id) :rtype: tuple """ _pof = self.dicPoF[hardware_id] _mechanism = _pof.dicMechanisms[mechanism_id] _query = "INSERT INTO rtk_op_loads \ (fld_mechanism_id) \ VALUES ({0:d})".format(mechanism_id) (_results, _error_code, _last_id) = self._dao.execute(_query, commit=True) _load = Load() _load.set_attributes((mechanism_id, _last_id, 'Test Load', 0)) _mechanism.dicLoads[_last_id] = _load return(_results, _error_code, _last_id) def delete_load(self, hardware_id, mechanism_id, load_id): """ Method to delete the selected operating Load. :param int hardware_id: the Hardware ID of the operating Load to delete. :param int mechanism_id: the Mechanism ID of the operating Load to delete. :param int load_id: the operating Load ID to delete. :return: (_results, _error_code) :rtype: tuple """ _pof = self.dicPoF[hardware_id] _mechanism = _pof.dicMechanisms[mechanism_id] _query = "DELETE FROM rtk_op_loads \ WHERE fld_load_id={0:d}".format(load_id) (_results, _error_code, __) = self._dao.execute(_query,
rdz=False, realspace=False): if realspace: if rdz: self._rdz_real = data else: self._xyz_real = data else: if rdz: self._rdz = data else: self._xyz = data def _get_gals(self, rdz=False, realspace=False): if rdz: xyz = self.get_data(realspace=realspace) xyz = util.ra_dec_z(xyz - self.origin, np.zeros_like(xyz), self.simbox.cosmo, self.zprec) else: xyz = util.xyz_array(self.simbox.gals)[self.selection] if not realspace: if (self.halo_vel_factor is None) and ( self.gal_vel_factor is None): vz = self.simbox.gals["vz"][self.selection] else: vz = util.factor_velocity( self.simbox.gals["vz"][self.selection], self.simbox.gals["halo_vz"][self.selection], halo_vel_factor=self.halo_vel_factor, gal_vel_factor=self.gal_vel_factor, inplace=False) xyz = self._apply_distortion(xyz, vz) return xyz def _apply_distortion(self, xyz, vz): return return_xyz_formatted_array(*xyz.T, self.simbox.Lbox, self.simbox.cosmo, self.simbox.redshift, velocity=vz, velocity_distortion_dimension="z") class MockField: """ MockField(simbox, **kwargs) Conduct a mock observation of a single field of the populated galaxies of a simbox via celestial selection (i.e., galaxies selected by ra, dec, redshift) Default Values: - **center** = `simbox.Lbox`/2. - **center_rdz** = [0., 0., `simbox.redshift`] - **scheme** = "square" - **sqdeg** = 15. - **delta_z** = 0.1 - **collision_fraction** = 0. - **realspace_selection** = False - **cartesian_selection** = False - **cartesian_distortion** = False - **empty** = False - **rand_density_factor** = 20. - **zprec** = 1e-3 Arguments --------- simbox : SimBox object SimBox object containing halos and galaxies from which to select and observe Keyword Arguments ----------------- center : array_like Cartesian coordinate between [0,0,0] and `simbox.Lbox` which specifies the position of [ra,dec,z] = [0, 0, `simbox.redshift`]. center_rdz : array_like, with shape (2,) or (3,) [ra,dec] at which to center the field around (redshift index ignored). scheme : string Shape of the field, e.g. "circle", "square", "hexagon". sqdeg : float Field size in square degrees. delta_z : float Select redshift within `simbox.redshift` :math:`\pm` 0.5* `delta_z`. collision_fraction : float, between 0 and 1 Fraction of galaxies to randomly exclude from the observation. realspace_selection : boolean If true, select galaxies before applying velocity distortion. cartesian_selection : boolean If true, select galaxies by Cartesian coordinates, with x-y selections applied as if the entire sample was at the redshift of the simbox. cartesian_distortion : boolean If true, apply velocity distortion along the :math:`\\hat{z}` direction instead of the exact line-of-sight direction. empty : boolean If true, don't actually select any galaxies. Necessary if the simbox has not been populated. rand_density_factor : float If randoms are generated by self.make_rands(), then generate this many times more data than galaxies. zprec : float The precision of the redshift cubic interpolation grid. Smaller values provide more accurate redshifts, but this can be expensive. Useful Methods -------------- - get_data(rdz=False, realspace=False) - get_rands(rdz=False, realspace=True) - get_vel() - get_redshift(realspace=False) - get_dist(realspace=False) - get_mgid() - get_shape(rdz=False) - make_rands() """ defaults = { "cartesian_distortion": False, "cartesian_selection": False, "realspace_selection": False, "collision_fraction": 0., "scheme": "square", "sqdeg": 15., "delta_z": 0.1, "zprec": 1e-3, "rand_density_factor": 20., } def __init__(self, simbox, **kwargs): self._kwargs_ = {**kwargs, "simbox": simbox} self.simbox = simbox self.center = self.simbox.Lbox / 2. self.center_rdz = np.array([0., 0., simbox.redshift]) self.empty = simbox.empty self.__dict__.update(self.defaults) self.halo_vel_factor = None self.gal_vel_factor = None # self.cartesian_distortion = False # self.cartesian_selection = False # self.realspace_selection = False # self.collision_fraction = 0. # self.scheme = "square" # self.sqdeg = 15. # self.delta_z = 0.1 # self.zprec = 1e-3 # self.rand_density_factor = 20. # Update default parameters with any keyword arguments util.kwargs2attributes(self, kwargs) self._gals = {} self._rands = {} self.origin, self.Lbox_rdz = self._centers_to_origin() self.field_selector, self.redshift_selector = self.get_selectors() # Create field selection from FieldSelector, given sqdeg and scheme if not self.empty: self._initialize() def _initialize(self): if hasattr(self.simbox, "gals"): self.selection = self._make_selection() else: self.selection = slice(None) for key in self._gals: self._gals[key] = self._gals[key][self.selection] # gc.collect() def get_selectors(self): if self.cartesian_selection: return CartesianSelector(self), RedshiftSelector(self) else: return CelestialSelector(self), RedshiftSelector(self) # Public member functions for data access # ======================================= def get_data(self, rdz=False, realspace=False): if rdz: return self._get_rdz(dataset=self._gals, realspace=realspace) else: return self._get_xyz(dataset=self._gals, realspace=realspace) def get_rands(self, rdz=False, realspace=True): del realspace if rdz: return self._get_rdz(dataset=self._rands) else: return self._get_xyz(dataset=self._rands) def get_vel(self, halo_vel_factor=None, gal_vel_factor=None): return self._get_vel(halo_vel_factor=halo_vel_factor, gal_vel_factor=gal_vel_factor) def get_redshift(self, realspace=False): return self._get_redshift(realspace=realspace) def get_dist(self, realspace=False): xyz = self.get_data(realspace=realspace) return np.sqrt(np.sum(xyz ** 2, axis=1)) def get_mgid(self): return np.asarray(self.simbox.gals["mgid"][self.selection]) def get_shape(self, rdz=False, deg=False): if self.cartesian_selection: selector = CartesianSelector(self) else: selector = CelestialSelector(self) shape = selector.get_fieldshape(rdz=rdz) if deg: shape[:2] *= 180./np.pi return shape def get_lims(self, rdz=False, overestimation_factor=1., deg=False): shape = self.get_shape(rdz=rdz, deg=deg) * overestimation_factor center = self.center_rdz if rdz else self.center xlim = [center[0] - shape[0] / 2., center[0] + shape[0] / 2.] ylim = [center[1] - shape[1] / 2., center[1] + shape[1] / 2.] zlim = [center[2] - shape[2] / 2., center[2] + shape[2] / 2.] return np.array([xlim, ylim, zlim], dtype=np.float32) def make_rands(self, density_factor=None, seed=None): if density_factor is None: density_factor = self.rand_density_factor else: self.rand_density_factor = density_factor density_gals = self.simbox.get_density() density_rands = density_factor * density_gals if self.cartesian_selection: volume = np.product(self.get_shape(rdz=False)) Nran = int(density_rands * volume + 0.5) # Cartesian selection if not seed is None: np.random.seed(seed) rands = (np.random.random((Nran, 3)).astype(np.float32) - 0.5) * self.get_shape(rdz=False)[None, :] + self.center[None, :] else: # Celestial (ra,dec) selection ralim, declim, _ = self.get_lims(rdz=True, overestimation_factor=1.02) zlim = self.get_lims(rdz=False, overestimation_factor=1.02)[2] - self.origin[2] volume = util.volume_rdz(ralim, declim, zlim) Nran = int(density_rands * volume + 0.5) rands = util.rand_rdz(Nran, ralim, declim, zlim, seed=seed) rands = self.rdz2xyz(rands, input_is_distance=True) self._rands = { "x_real": rands[:, 0], "y_real": rands[:, 1], "z_real": rands[:, 2] } selection = self._make_selection(dataset='rands') & self._select_within_simbox(rands) for key in self._rands: self._rands[key] = self._rands[key][selection] get = BoxField.get get_data.__doc__ = BoxField.get_data.__doc__ get_rands.__doc__ = BoxField.get_rands.__doc__ get_vel.__doc__ = BoxField.get_vel.__doc__ get_redshift.__doc__ = BoxField.get_redshift.__doc__ get_dist.__doc__ = BoxField.get_dist.__doc__ get_mgid.__doc__ = BoxField.get_mgid.__doc__ get_shape.__doc__ = BoxField.get_shape.__doc__ make_rands.__doc__ = BoxField.make_rands.__doc__ def xyz2rdz(self, xyz, vel=None): return util.ra_dec_z(xyz - self.origin, vel, cosmo=self.simbox.cosmo, zprec=self.zprec) def rdz2xyz(self, rdz, input_is_distance=False): cosmo = None if input_is_distance else self.simbox.cosmo return util.rdz2xyz(rdz, cosmo=cosmo) + self.origin def volume(self): (_, _, (d1, d2)), v, _ = self._measure_volume_setup(oef=1) if self.cartesian_selection: return v else: omega = self.sqdeg * (np.pi / 180.) ** 2 return omega / 3. * (d2 ** 3 - d1 ** 3) def measure_volume(self, precision=1e-3, recursion_lim=10): lims, volume, rand_generator = self._measure_volume_setup() N0 = 10000 N = 0 n = 0 r = 0 # order of magnitude estimation only sigma = lambda: np.sqrt((1 - r) / (r * (N - 1))) get_Nmore = lambda: min([ int(N0 - N + 1 + (1 - r) / (r * precision ** 2)), 2 ** 24]) Nmore = N0 i = 0 while (not r) or (sigma() > precision): i += 1 if i > recursion_lim: s = sigma() if r else np.inf raise RecursionError(f"After {recursion_lim} " f"iterations, {N} randoms were generated. " f"precision={s:e}, short of the required " f"precision of {precision:e}.") rand = rand_generator(Nmore, *lims) n += self.apply_selection(rand, input_is_distance=True).sum() N += Nmore r = n / N Nmore = get_Nmore() if r else N0 return r * volume def apply_selection(self, data, input_is_distance=False): return (self.field_selector(data[:, :2]) & self.redshift_selector(data[:, 2], input_is_distance)) def _measure_volume_setup(self, oef=1.5): if self.cartesian_selection: lims = self.get_lims(rdz=False, overestimation_factor=oef) lims[-1] -= self.origin[-1] volume = np.product(np.diff(lims, axis=1)) rand_generator = (lambda N, xlim, ylim, zlim: (np.random.random((N, 3)) * np.diff(lims, axis=1).T) + lims[:, :1].T) else: lims = self.get_lims(rdz=True, overestimation_factor=oef) lims[2] = self.simbox.redshift2distance(lims[2]) volume = util.volume_rdz(*lims) rand_generator = util.rand_rdz return lims, volume, rand_generator def _get_rdz(self, dataset=None, realspace=False): dataset, datanames, _ = self._get_dataset(dataset) if dataset is self._rands: realspace = True if realspace: zkey = 'redshift_real' else: zkey = 'redshift' already_done = {'ra', 'dec', zkey}.issubset(datanames) if not already_done: rdz = self._redshift_distortion_rdz(realspace=realspace, dataset=dataset) if self.cartesian_distortion: rdz2 = self._get_redshift(realspace=realspace, dataset=dataset) else: rdz2 = rdz[:, 2] util.update_table(dataset, {'ra': rdz[:, 0], 'dec': rdz[:, 1], zkey: rdz2}) rdz = util.xyz_array(dataset, keys=['ra', 'dec', zkey]) return rdz def _get_xyz(self, dataset=None, realspace=False): dataset, datanames, selection = self._get_dataset(dataset) if dataset is self._rands: realspace = True if realspace: if dataset is self._gals: return util.xyz_array(self.simbox.gals)[selection] elif dataset is self.simbox.gals: return util.xyz_array(self.simbox.gals) # update_table(dataset, {"x_real":x, "y_real":y, "z_real":z}) if realspace: xkey, ykey, zkey = "x_real", "y_real", "z_real" elif self.cartesian_distortion: xkey, ykey, zkey = "x_real", "y_real", "z" if dataset is self.simbox.gals: zkey = "z_red" else: xkey, ykey, zkey = "x", "y", "z" if dataset is self.simbox.gals: xkey, ykey, zkey = "x_red", "y_red", "z_red" if realspace or self.cartesian_distortion: already_done = {xkey, ykey, zkey}.issubset(datanames) if not already_done: xyz = self._cartesian_distortion_xyz(realspace=realspace, dataset=dataset) util.update_table(dataset, {xkey: xyz[:, 0], ykey: xyz[:,
rebalance_tuple = self._GetRebalanceTuple() recover_tuple = self._GetRecoverTuple() while recover_tuple is not None: if job_key.IsCancelled(): break ( prefix, recoverable_location, correct_location ) = recover_tuple text = 'Recovering \'' + prefix + '\' from ' + recoverable_location + ' to ' + correct_location HydrusData.Print( text ) job_key.SetVariable( 'popup_text_1', text ) recoverable_path = os.path.join( recoverable_location, prefix ) correct_path = os.path.join( correct_location, prefix ) HydrusPaths.MergeTree( recoverable_path, correct_path ) recover_tuple = self._GetRecoverTuple() finally: job_key.SetVariable( 'popup_text_1', 'done!' ) job_key.Finish() job_key.Delete() def RebalanceWorkToDo( self ): with self._rwlock.read: return self._GetRebalanceTuple() is not None def RegenerateThumbnail( self, media ): hash = media.GetHash() mime = media.GetMime() with self._rwlock.read: file_path = self._GenerateExpectedFilePath( hash, mime ) if not os.path.exists( file_path ): raise HydrusExceptions.FileMissingException( 'The thumbnail for file ' + hash.hex() + ' could not be regenerated from the original file because the original file is missing! This event could indicate hard drive corruption. Please check everything is ok.') thumbnail_bytes = self._GenerateThumbnailBytes( file_path, media ) with self._rwlock.write: self._AddThumbnailFromBytes( hash, thumbnail_bytes ) def LocklessRegenerateThumbnail( self, media ): if HG.file_report_mode: hash = media.GetHash() mime = media.GetMime() HydrusData.ShowText( 'Thumbnail regen request: ' + str( ( hash, mime ) ) ) self._GenerateAndSaveThumbnail( media ) def LocklessRegenerateThumbnailIfWrongSize( self, media ): do_it = False try: hash = media.GetHash() mime = media.GetMime() ( media_width, media_height ) = media.GetResolution() path = self._GenerateExpectedThumbnailPath( hash ) numpy_image = ClientImageHandling.GenerateNumpyImage( path, mime ) ( current_width, current_height ) = ClientImageHandling.GetNumPyImageResolution( numpy_image ) bounding_dimensions = self._controller.options[ 'thumbnail_dimensions' ] ( expected_width, expected_height ) = HydrusImageHandling.GetThumbnailResolution( ( media_width, media_height ), bounding_dimensions ) if current_width != expected_width or current_height != expected_height: do_it = True except: do_it = True if do_it: self.LocklessRegenerateThumbnail( media ) return do_it class DataCache( object ): def __init__( self, controller, cache_size, timeout = 1200 ): self._controller = controller self._cache_size = cache_size self._timeout = timeout self._keys_to_data = {} self._keys_fifo = collections.OrderedDict() self._total_estimated_memory_footprint = 0 self._lock = threading.Lock() self._controller.sub( self, 'MaintainCache', 'memory_maintenance_pulse' ) def _Delete( self, key ): if key not in self._keys_to_data: return deletee_data = self._keys_to_data[ key ] del self._keys_to_data[ key ] self._RecalcMemoryUsage() def _DeleteItem( self ): ( deletee_key, last_access_time ) = self._keys_fifo.popitem( last = False ) self._Delete( deletee_key ) def _RecalcMemoryUsage( self ): self._total_estimated_memory_footprint = sum( ( data.GetEstimatedMemoryFootprint() for data in list(self._keys_to_data.values()) ) ) def _TouchKey( self, key ): # have to delete first, rather than overwriting, so the ordereddict updates its internal order if key in self._keys_fifo: del self._keys_fifo[ key ] self._keys_fifo[ key ] = HydrusData.GetNow() def Clear( self ): with self._lock: self._keys_to_data = {} self._keys_fifo = collections.OrderedDict() self._total_estimated_memory_footprint = 0 def AddData( self, key, data ): with self._lock: if key not in self._keys_to_data: while self._total_estimated_memory_footprint > self._cache_size: self._DeleteItem() self._keys_to_data[ key ] = data self._TouchKey( key ) self._RecalcMemoryUsage() def DeleteData( self, key ): with self._lock: self._Delete( key ) def GetData( self, key ): with self._lock: if key not in self._keys_to_data: raise Exception( 'Cache error! Looking for ' + str( key ) + ', but it was missing.' ) self._TouchKey( key ) return self._keys_to_data[ key ] def GetIfHasData( self, key ): with self._lock: if key in self._keys_to_data: self._TouchKey( key ) return self._keys_to_data[ key ] else: return None def HasData( self, key ): with self._lock: return key in self._keys_to_data def MaintainCache( self ): with self._lock: while True: if len( self._keys_fifo ) == 0: break else: ( key, last_access_time ) = next( iter(self._keys_fifo.items()) ) if HydrusData.TimeHasPassed( last_access_time + self._timeout ): self._DeleteItem() else: break class FileViewingStatsManager( object ): def __init__( self, controller ): self._controller = controller self._lock = threading.Lock() self._pending_updates = {} self._last_update = HydrusData.GetNow() self._my_flush_job = self._controller.CallRepeating( 5, 60, self.REPEATINGFlush ) def REPEATINGFlush( self ): self.Flush() def Flush( self ): with self._lock: if len( self._pending_updates ) > 0: content_updates = [] for ( hash, ( preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) ) in self._pending_updates.items(): row = ( hash, preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILE_VIEWING_STATS, HC.CONTENT_UPDATE_ADD, row ) content_updates.append( content_update ) service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : content_updates } # non-synchronous self._controller.Write( 'content_updates', service_keys_to_content_updates, do_pubsubs = False ) self._pending_updates = {} def Update( self, viewtype, hash, views_delta, viewtime_delta ): if not HG.client_controller.new_options.GetBoolean( 'file_viewing_statistics_active' ): return with self._lock: preview_views_delta = 0 preview_viewtime_delta = 0 media_views_delta = 0 media_viewtime_delta = 0 if viewtype == 'preview': preview_views_delta = views_delta preview_viewtime_delta = viewtime_delta elif viewtype == 'media': media_views_delta = views_delta media_viewtime_delta = viewtime_delta if hash not in self._pending_updates: self._pending_updates[ hash ] = ( preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) else: ( existing_preview_views_delta, existing_preview_viewtime_delta, existing_media_views_delta, existing_media_viewtime_delta ) = self._pending_updates[ hash ] self._pending_updates[ hash ] = ( existing_preview_views_delta + preview_views_delta, existing_preview_viewtime_delta + preview_viewtime_delta, existing_media_views_delta + media_views_delta, existing_media_viewtime_delta + media_viewtime_delta ) row = ( hash, preview_views_delta, preview_viewtime_delta, media_views_delta, media_viewtime_delta ) content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_FILE_VIEWING_STATS, HC.CONTENT_UPDATE_ADD, row ) service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : [ content_update ] } HG.client_controller.pub( 'content_updates_data', service_keys_to_content_updates ) HG.client_controller.pub( 'content_updates_gui', service_keys_to_content_updates ) class LocalBooruCache( object ): def __init__( self, controller ): self._controller = controller self._lock = threading.Lock() self._RefreshShares() self._controller.sub( self, 'RefreshShares', 'refresh_local_booru_shares' ) self._controller.sub( self, 'RefreshShares', 'restart_client_server_service' ) def _CheckDataUsage( self ): if not self._local_booru_service.BandwidthOK(): raise HydrusExceptions.InsufficientCredentialsException( 'This booru has used all its monthly data. Please try again next month.' ) def _CheckFileAuthorised( self, share_key, hash ): self._CheckShareAuthorised( share_key ) info = self._GetInfo( share_key ) if hash not in info[ 'hashes_set' ]: raise HydrusExceptions.NotFoundException( 'That file was not found in that share.' ) def _CheckShareAuthorised( self, share_key ): self._CheckDataUsage() info = self._GetInfo( share_key ) timeout = info[ 'timeout' ] if timeout is not None and HydrusData.TimeHasPassed( timeout ): raise HydrusExceptions.InsufficientCredentialsException( 'This share has expired.' ) def _GetInfo( self, share_key ): try: info = self._keys_to_infos[ share_key ] except: raise HydrusExceptions.NotFoundException( 'Did not find that share on this booru.' ) if info is None: info = self._controller.Read( 'local_booru_share', share_key ) hashes = info[ 'hashes' ] info[ 'hashes_set' ] = set( hashes ) media_results = self._controller.Read( 'media_results', hashes ) info[ 'media_results' ] = media_results hashes_to_media_results = { media_result.GetHash() : media_result for media_result in media_results } info[ 'hashes_to_media_results' ] = hashes_to_media_results self._keys_to_infos[ share_key ] = info return info def _RefreshShares( self ): self._local_booru_service = self._controller.services_manager.GetService( CC.LOCAL_BOORU_SERVICE_KEY ) self._keys_to_infos = {} share_keys = self._controller.Read( 'local_booru_share_keys' ) for share_key in share_keys: self._keys_to_infos[ share_key ] = None def CheckShareAuthorised( self, share_key ): with self._lock: self._CheckShareAuthorised( share_key ) def CheckFileAuthorised( self, share_key, hash ): with self._lock: self._CheckFileAuthorised( share_key, hash ) def GetGalleryInfo( self, share_key ): with self._lock: self._CheckShareAuthorised( share_key ) info
#I. cleangot(): clean dfgot from wikiling.de #1. insert links() #2. every lemma() to own row #3. occurences() to own col #4. certainty() to own col #5. reconstructedness() to own col #6.a clean col lemma #6.b clean col lemma #6. translations() #7.a activate got-ipa transcription file #8 clean English translations #9.a activate dictionary for translating pos-tag-names from wikiling to nltk #9.b translate pos-tags from wikiling to nltk notation #11. write empty file to fill in substitutions #12. write clean dfgot.csv #II. cleanuralonet(): clean uralonet_raw.csv from uralonet.nytud.hu #1. turn sound to C for consonant or V for vowel #2. get phonotactic profile of word #3. activate transcription files with copy2epitran if not already activated while cleaning dfgot #4. clean uralonet_raw.csv #III. mine and clean zaicz.csv #1. mine pdf #2. create dictionary from webscraped txts of English-Hungarian dictionary (web-address: ) #3. create dictionary of word-origin pairs from zaicz.pdf #4. read annex of zaicz pdf and tranform to csv (main input file) #5. add missing translations with google translate #6. add pos-tags with spacy #!works on python 3.5. and spacy 2.0.12 (Hungarian pos_tagger) #!create virtual environment via anaconda navigator for this function #7. converts spacy's pos-tags to nltk's. https://spacy.io/api/annotation #8.a translate origin-tags from Hungarian to English with google translate #8.b insert translated origin-tags to df #9.a convert origin to tags "U, FU, Ug" #9.b insert new origin-tags "U, FU, Ug" #10. remove brackets #11. translate info-col to English #11.b insert translated info-col to df #imports for cleangot() and cleanuralonet() import pandas as pd import re #reconstr(), clemma(), ctransl() import epitran #transcribe gothic to ipa import os #copy2epitran() import shutil #copy2epitran() import itertools #for deletion() from lingpy import ipa2tokens from loanpy import word2struc #imports for zaicz.csv from bs4 import BeautifulSoup import pdfminer from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter from pdfminer.layout import LAParams from pdfminer.converter import TextConverter from io import StringIO from pdfminer.pdfpage import PDFPage from googletrans import Translator translator = Translator() hu2en_origdict={} origtagdict={} posdict={} cns="jwʘǀǃǂǁk͡pɡ͡bcɡkqɖɟɠɢʄʈʛbb͡ddd̪pp͡ttt̪ɓɗb͡βk͡xp͡ɸq͡χɡ͡ɣɢ͡ʁc͡çd͡ʒt͡ʃɖ͡ʐɟ͡ʝʈ͡ʂb͡vd̪͡z̪d̪͡ðd̪͡ɮ̪d͡zd͡ɮd͡ʑp͡ft̪͡s̪t̪͡ɬ̪t̪͡θt͡st͡ɕt͡ɬxçħɣʁʂʃʐʒʕʝχfss̪vzz̪ðɸβθɧɕɬɬ̪ɮʑɱŋɳɴmnn̪ɲʀʙʟɭɽʎrr̪ɫɺɾhll̪ɦðʲt͡ʃʲnʲʃʲC" vow="ɑɘɞɤɵʉaeiouyæøœɒɔəɘɵɞɜɛɨɪɯɶʊɐʌʏʔɥɰʋʍɹɻɜ¨ȣ∅" os.chdir(os.path.dirname(os.path.abspath(__file__))+r"\data\pre") #change to folder "data" #I. Clean dfgot #1. insert col links def links(): linkliste=[] for i in range(1,281): #number of pages linkliste.append(20*["https://koeblergerhard.de/wikiling/?query=&f=got&mod=0&page="+str(i)]) #20 entries per page linkliste=[item for sublist in linkliste for item in sublist][:5582] #flatten list return linkliste #2. explode lemmas def explemma(graw): graw.at[1373,"got_lemma"]=str(graw.iloc[1373,1]).replace("lat.got., ","") #clean line 1373 graw["got_lemma"]=graw["got_lemma"].str.split(", ") #bei Lemma stehen mehrere Grundformen durch ', ' separiert graw=graw.explode("got_lemma").reset_index(drop=True) #diese Alternativformen in neue Reihen einfügen return graw #3. occurences to own col def occurences(entry): return re.findall("[0-9]+",entry) #4. level of certainty to own col def certainty(entry): if "?" in entry: return "uncertain" else: return "certain" #5. reconstructedness to own col def reconstr(entry): #https://www.koeblergerhard.de/got/3A/got_vorwort.html (ctrl+f: "Stern") if re.search(r"\*\?? ?[0-9]* ?$",entry) is not None: return "form" #other forms documented, basic form reconstructed elif re.search(r"^\*",entry) is not None: return "word" #other forms not documented, word itself reconstructed else: return "" #6.a clean col "got_lemma" def helpclean(filename,column): chars=[] df=pd.read_csv(filename,encoding="utf-8") df=df.fillna("") for i in df[column].tolist(): chars+=i return set(chars) #6.b clean col "got_lemma" def clemma(entry): entry.replace("Pl.","") entry.lower() return re.sub(r"[^a-zA-ZÀ-ÿþāēīōūƕ]+", "", entry) #use helpclean() to find out what to keep #7. copy files to epitran\data\map and epitran\data\post to piggyback epitran def copy2epitran(): epipath=epitran.__file__[:-(len("\epitran.py"))]+r"data" dstmap = epipath+r"\map" dstpost = epipath+r"\post" srcgotmap = os.getcwd()+r"\got-translit.csv" srcgotpost = os.getcwd()+r"\got-translit.txt" srcuew = os.getcwd()+r"\uew-scrape.csv" shutil.copy(srcgotmap,dstmap) shutil.copy(srcgotpost,dstpost) #special rules go to folder "post" shutil.copy(srcuew,dstmap) #8 clean English translations def ctransl(entry): entry=re.sub(r" ?\([^)]+\)", "", entry) #remove parentheses and their content entry=entry.replace(', ',',').replace(' (','(').replace(' ','_') entry=re.sub(r"[^0-9A-Za-z,_äéþōƕ]+", "", entry) #use helpclean() to find out what to keep entry=entry.replace(",", ", ") return entry #9.a activate dictionary of wikiling-pos-tags to nltk-pos-tags def getposdict(): poskeys="Abkürzung,Adj.,Adv.,Art.,Buchstabe,F.,Interj.,Konj.,LN.,M.,N.,Num.,ON.,Partikel,PN.,Präp.,"\ "Pron.,Sb.,V.,Wort," #last comma important posvalues=["r","a","r","r","r","n","r","r","n","n","n","r","n","r","n","r","r","n","v","n","nvar"] global posdict posdict = dict(zip(poskeys.split(','), posvalues)) #9.b translate wikiling pos-tags to nltk-pos tags def nltktags(entry): if posdict=={}: getposdict() nltktags="" for i in entry.split(", "): try: nltktags+=posdict[i] except KeyError: return "nvar" return nltktags #11. write fillitout.csv def fillitout(column): #automate this function later fillout = pd.DataFrame({"to_substitute" : sorted(list(set([i for s in column.apply(ipa2tokens, merge_vowels=False, merge_geminates=False).tolist() for i in s])))}) fillout["substitution"]="" fillout.to_csv("fillitout.csv",encoding="utf-8",index=False) def cleangot(filename): #e.g. g_raw.csv graw=pd.read_csv(filename, encoding="utf-8") graw=graw.rename({"Lemma":"got_lemma"}, axis=1) #rename column graw=graw.drop(['#', 'Sprachen'], axis=1) graw=graw.fillna("") #else problems with nans graw["links"] = links() graw=explemma(graw) graw["occurences"]=graw["got_lemma"].apply(occurences) graw["got_certainty"]=graw["got_lemma"].apply(certainty) graw["got_reconstructedness"]=graw["got_lemma"].apply(reconstr) graw["got_lemma"]=graw["got_lemma"].apply(clemma) graw=graw[graw["got_lemma"].astype(bool)] #remove rows where lemma turned empty after cleaning copy2epitran() #copy files to epitran-folder graw["got_ipa"]=graw["got_lemma"].apply(epitran.Epitran("got-translit").transliterate) graw["got_en"]=graw["Englische Bedeutung"].apply(ctransl) graw["got_pos"]=graw["Wortart"].apply(nltktags) gotclean=graw gotclean.to_csv("dfgot.csv",encoding="utf-8",index=False) return gotclean ################################################################################################################# #II. Clean uralonet_raw.csv #1. turn sound to C for consonant or V for vowel #2. get phonotactic profile of word #3. activate transcription files with copy2epitran if not already activated while cleaning dfgot #4. clean uralonet_raw.csv def cleanuralonet(filename): #in: uralonet_raw.csv df=pd.read_csv(filename,encoding="utf-8") copy2epitran() df["New"]=df.New_orth.apply(epitran.Epitran('hun-Latn').transliterate) df["Old"]=df.Old_orth.apply(epitran.Epitran('uew-scrape').transliterate) df["old_struc"]=df.Old.apply(word2struc) df.to_csv("uralonet.csv",encoding="utf-8",index=False) return df ################################################################################################################### #III mine and clean zaicz.csv #1. mine pdf #2. create dictionary from webscraped txts of English-Hungarian dictionary (web-address: ) #3. create dictionary of word-origin pairs from zaicz.pdf #4. read annex of zaicz pdf and tranform to csv (main input file) #5. add missing translations with google translate #6. add pos-tags with spacy #!works on python 3.5. and spacy 2.0.12 (Hungarian pos_tagger) #!create virtual environment via anaconda navigator for this function #7. converts spacy's pos-tags to nltk's. https://spacy.io/api/annotation #8.a translate origin-tags from Hungarian to English with google translate #8.b insert translated origin-tags to df #9.a convert origin to tags "U, FU, Ug" #9.b insert new origin-tags "U, FU, Ug" #10. remove brackets #11. translate info-col to English #11.b insert translated info-col to df #1. mine pdf def get_pdf_file_content(path_to_pdf): resource_manager = PDFResourceManager(caching=True) out_text = StringIO() laParams = LAParams() text_converter = TextConverter(resource_manager, out_text,laparams=laParams) fp = open(path_to_pdf, 'rb') interpreter = PDFPageInterpreter(resource_manager,text_converter) for page in PDFPage.get_pages(fp,pagenos=set(),maxpages=0,password="",caching=True,check_extractable=True): interpreter.process_page(page) text= out_text.getvalue() fp.close() text_converter.close() out_text.close() return text #2. create dictionary from webscraped txts of English-Hungarian dictionary (web-address: ) def getdict_huen(): hunendict={} for i in range(ord('a'), ord('z')+1): #dictionary entries from a-z print(chr(i)) if i != ord("q"): #only letter missing from dictionary's website is "q" hul=[] enl=[] subdict={} soup1=BeautifulSoup(open("szotar"+chr(i)+".txt").read()) soup1=soup1.body #cut out trash from beignning and end for s in soup1.select('script'): #cut off anything between tag "script"(bottom) s.extract() for s in soup1.select('center'): #cut off anything between tag "center" (top) s.extract() zl= re.sub(r'\<.*?\>', '', str(soup1)) #remove tags <body> and <html> from top and bottom if i == ord("z"): #z has some extra strings in the end that cause errors zl=zl[1:-9] #cut off the troublesome strings zlsplit=zl.split("\n\n ")[1:-1] #cut off first and last char, they cause errors for j in zlsplit: wordpair=j.split(" -» ") #split into hu and en word hul.append(wordpair[0].replace("õ","ő").replace("û","ű"))#correct wrong encoding enl.append(wordpair[1]) for index, j in enumerate(hul): if j in hunendict: hunendict[j].append(enl[index]) #add meaning if already in dict else: hunendict[j]=[enl[index]] #else create new entry hunendict="hunendict="+str(hunendict) with open('hunendict.py','w',encoding="utf-8") as data: data.write(hunendict) return hunendict #3. create dictionary of word-origin pairs from zaicz.pdf def getdictorig(): zaiczcsv=pd.DataFrame(columns=['word','year','info','disambiguated','suffix',"en"]) #dffinal #zaicz1: year, zaicz2: origin zaicz2=zaicz.split(' \n \n\n \n\n\x0cA SZAVAK EREDET SZERINTI CSOPORTOSÍTÁSA* \n\n \n \n \n \n \n \n',1)[1] dictorig={} zlist=zaicz2.split("\n \n") for index,i in enumerate(zlist): if index<101: para=i.split("\n",1) paratag=para[0] if len(para)>1: paratxt=para[1] for i in paratxt.split(", "): if i[-1]=="?": dictorig[i.replace("x0c","").replace("\n","").replace("?","").replace(" ","")]=paratag+"?" else: dictorig[i.replace("x0c","").replace("\n","").replace(" ","")]=paratag if index>=101 and (index % 2) ==0: for j in i.split(", "): if i[-1]=="?": dictorig[j.replace("x0c","").replace("\n","").replace(" ","")]=zlist[index-1]+"?" else: dictorig[j.replace("x0c","").replace("\n","").replace(" ","")]=zlist[index-1] dictorig="dictorig="+str(dictorig) with open('dictorig.py','w',encoding="utf-8") as data: data.write(dictorig) return dictorig #4. read annex of zaicz pdf and tranform to csv (main input file) def zaicz2csv(): try: from hunendict import hunendict except: print("create hunendict with getdict_huen()") try: from dictorig import dictorig except: print("create dictorig with getdict_huen()") zaiczcsv=pd.DataFrame(columns=['word','year','info','disambiguated','suffix',"en","orig","pos_hun", "wordipa"]) #dffinal #zaicz1: year, zaicz2: origin path_to_pdf = r"C:\Users\Viktor\OneDrive\PhD cloud\Vorgehensweisen\loanpy6\szotar\TAMOP_annex.pdf" zaicz=get_pdf_file_content(path_to_pdf) zaicz=zaicz.replace("Valószínűleg ősi szavak","Valószínűleg ősi szavak\n") #correct typo in dictionary zaicz1=zaicz.split(' \n \n\n \n\n\x0cA SZAVAK EREDET SZERINTI CSOPORTOSÍTÁSA* \n\n \n \n \n \n \n \n',1)[0] zaicz2=zaicz.split(' \n \n\n \n\n\x0cA SZAVAK EREDET SZERINTI CSOPORTOSÍTÁSA* \n\n \n \n \n \n \n \n',1)[1] #zaicz1 (year): for index,i in enumerate(zaicz1.split('[')): #list of year-word pairs if ':' in i: #otherwise error zaiczcsv.at[index,'word']=i.split(':')[1].replace(" ","").replace("\n","") .replace("1951-től","").replace("1000-ig","") zaiczcsv.at[index,'year']=re.sub("[^0-9]", "", i.split(':')[0].split(',')[-1]) #the sure year zaiczcsv.at[index,'info']=i.split(':')[0][:-1].replace("\n","") #all other info for index,row in zaiczcsv.iterrows(): zaiczcsv.at[index,'word']=row['word'].split(',') #explode funktioniert nur mit listen, darum split() #explode, reset index,drop old index,remove rows with empty cells in column "word" zcsv=zaiczcsv.explode('word') zcsv=zcsv.reset_index(drop=True) zcsv= zcsv[zcsv.word != ''] for index,row in zcsv.iterrows(): if len(row['year'])==2: #e.g. 20 ist left from "20.sz" (20th century) so we'll append "00" zcsv.at[index,'year']=row['year']+'00' if row['word'][-2:].isnumeric(): #remove headers (like "1001-1100") zcsv.at[index,'word']=row['word'][:-9] #4+4+1 (year1+hyphen+year2) zcsv.at[index,'disambiguated']=row['word'] if row['word'][-1].isnumeric(): #disambiguation to other column zcsv.at[index,'word']=row['word'][:-1] zcsv.at[index,'word']=row['word'].replace("~","/").split("/") #explode needs list, so
both antpols for ap in antpols: per_ant_corr_cross_pol_metrics[(a1, ap)] = np.nanmax([np.nanmean(d) for d in diffs]) return per_ant_corr_cross_pol_metrics def load_antenna_metrics(filename): """Load cut decisions and metrics from an HDF5 into python dictionary. Loading is handled via hera_qm.metrics_io.load_metric_file Parameters ---------- filename : str Full path to the filename of the metric to load. Must be either HDF5 (recommended) or JSON (Depreciated in Future) file type. Returns ------- metrics : dict Dictionary of metrics stored in the input file. """ return metrics_io.load_metric_file(filename) ####################################################################### # High level functionality for HERA ####################################################################### class AntennaMetrics(): """Container for holding data and meta-data for ant metrics calculations. This class creates an object for holding relevant visibility data and metadata, and provides interfaces to two antenna metrics: one for identifying dead / not correlating atennas and the other for identifying cross-polarized antennas. These metrics can be used iteratively to identify bad antennas. The object handles all stroage of metrics, and supports writing metrics to an HDF5 filetype. The analysis functions are designed to work on raw data from one or more observations with all four polarizations. """ def __init__(self, sum_files, diff_files=None, apriori_xants=[], Nbls_per_load=None): """Initilize an AntennaMetrics object and load mean visibility amplitudes. Parameters ---------- sum_files : str or list of str Path to file or files of raw sum data to calculate antenna metrics on diff_files : str or list of str Path to file or files of raw diff data to calculate antenna metrics on If not provided, even/odd correlations will be inferred with interleaving. Assumed to match sum_files in metadata. Flags will be ORed with sum_files. apriori_xants : list of integers or tuples, optional List of integer antenna numbers or antpol tuples e.g. (0, 'Jee') to mark as excluded apriori. These are included in self.xants, but not self.dead_ants or self.crossed_ants when writing results to disk. Nbls_per_load : integer, optional Number of baselines to load simultaneously. Trades speed for memory efficiency. Default None means load all baselines. Attributes ---------- hd_sum : HERAData HERAData object generated from sum_files. hd_diff : HERAData HERAData object generated from diff_files. ants : list of tuples List of antenna-polarization tuples to assess antnums : list of ints List of antenna numbers antpols : List of str List of antenna polarization strings. Typically ['Jee', 'Jnn'] bls : list of ints List of baselines in HERAData object. datafile_list_sum : list of str List of sum data filenames that went into this calculation. datafile_list_diff : list of str List of diff data filenames that went into this calculation. abs_vis_stats : dictionary Dictionary mapping baseline keys e.g. (0, 1, 'ee') to single floats representing visibility amplitudes. version_str : str The version of the hera_qm module used to generate these metrics. history : str History to append to the metrics files when writing out files. """ # Instantiate HERAData object and figure out baselines from hera_cal.io import HERAData if isinstance(sum_files, str): sum_files = [sum_files] if isinstance(diff_files, str): diff_files = [diff_files] if (diff_files is not None) and (len(diff_files) != len(sum_files)): raise ValueError(f'The number of sum files ({len(sum_files)}) does not match the number of diff files ({len(diff_files)}).') self.datafile_list_sum = sum_files self.hd_sum = HERAData(sum_files) if diff_files is None or len(diff_files) == 0: self.datafile_list_diff = None self.hd_diff = None else: self.datafile_list_diff = diff_files self.hd_diff = HERAData(diff_files) if len(self.hd_sum.filepaths) > 1: # only load baselines in all files self.bls = sorted(set.intersection(*[set(bls) for bls in self.hd_sum.bls.values()])) else: self.bls = self.hd_sum.bls # Figure out polarizations in the data: from hera_cal.utils import split_bl, comply_pol, split_pol self.pols = set([bl[2] for bl in self.bls]) self.cross_pols = [pol for pol in self.pols if split_pol(pol)[0] != split_pol(pol)[1]] self.same_pols = [pol for pol in self.pols if split_pol(pol)[0] == split_pol(pol)[1]] # Figure out which antennas are in the data self.split_bl = split_bl # prevents the need for importing again later self.ants = set([ant for bl in self.bls for ant in split_bl(bl)]) self.antnums = set([ant[0] for ant in self.ants]) self.antpols = set([ant[1] for ant in self.ants]) # Parse apriori_xants if not (isinstance(apriori_xants, list) or isinstance(apriori_xants, np.ndarray)): raise ValueError('apriori_xants must be a list or numpy array.') self.apriori_xants = set([]) for ant in apriori_xants: if isinstance(ant, int): for ap in self.antpols: self.apriori_xants.add((ant, ap)) elif isinstance(ant, tuple): if (len(ant) != 2) or (comply_pol(ant[1]) not in self.antpols): raise ValueError(f'{ant} is not a valid entry in apriori_xants.') self.apriori_xants.add((ant[0], comply_pol(ant[1]))) else: raise ValueError(f'{ant} is not a valid entry in apriori_xants.') # Set up metadata and summary stats self.version_str = hera_qm_version_str self.history = '' self._reset_summary_stats() # Load and summarize data self._load_corr_stats(Nbls_per_load=Nbls_per_load) def _reset_summary_stats(self): """Reset all the internal summary statistics back to empty.""" self.xants, self.crossed_ants, self.dead_ants = [], [], [] self.iter = 0 self.removal_iteration = {} self.all_metrics = {} self.final_metrics = {} for ant in self.apriori_xants: self.xants.append(ant) self.removal_iteration[ant] = -1 def _load_corr_stats(self, Nbls_per_load=None): """Loop through groups of baselines to calculate self.corr_stats using calc_corr_stats() """ if Nbls_per_load is None: bl_load_groups = [self.bls] else: bl_load_groups = [self.bls[i:i + Nbls_per_load] for i in range(0, len(self.bls), Nbls_per_load)] # loop through baseline load groups, computing corr_stats self.corr_stats = {} for blg in bl_load_groups: data_sum, flags, _ = self.hd_sum.read(bls=blg, axis='blt') data_diff = None if self.hd_diff is not None: data_diff, flags_diff, _ = self.hd_diff.read(bls=blg, axis='blt') for bl in flags: flags[bl] |= flags_diff[bl] self.corr_stats.update(calc_corr_stats(data_sum, data_diff=data_diff, flags=flags)) def _find_totally_dead_ants(self, verbose=False): """Flag antennas whose median correlation coefficient is 0.0. These antennas are marked as dead. They do not appear in recorded antenna metrics or zscores. Their removal iteration is -1 (i.e. before iterative flagging). """ # assign corr_stats to antennas corr_stats_by_ant = {ant: [] for ant in self.ants} for bl in self.corr_stats: for ant in self.split_bl(bl): corr_stats_by_ant[ant].append(self.corr_stats[bl]) # remove antennas that are totally dead and all nans for ant, corrs in corr_stats_by_ant.items(): med = np.nanmedian(corrs) if ~np.isfinite(med) or (med == 0): self.xants.append(ant) self.dead_ants.append(ant) self.removal_iteration[ant] = -1 if verbose: print(f'Antenna {ant} appears totally dead and is removed.') def _run_all_metrics(self): """Local call for all metrics as part of iterative flagging method. """ # Compute all raw metrics metNames = [] metVals = [] metNames.append('corr') metVals.append(corr_metrics(self.corr_stats, xants=self.xants, pols=self.same_pols)) metNames.append('corrXPol') metVals.append(corr_cross_pol_metrics(self.corr_stats, xants=self.xants)) # Save all metrics metrics = {} for metric, metName in zip(metVals, metNames): metrics[metName] = metric for key in metric: if metName in self.final_metrics: self.final_metrics[metName][key] = metric[key] else: self.final_metrics[metName] = {key: metric[key]} self.all_metrics.update({self.iter: metrics}) def iterative_antenna_metrics_and_flagging(self, crossCut=0, deadCut=0.4, verbose=False): """Run corr metric and crosspol metrics and stores results in self. Parameters ---------- crossCut : float, optional Cut in cross-pol correlation metric below which to flag antennas as cross-polarized. Default is 0. deadCut : float, optional Cut in correlation metric below which antennas are most likely dead / not correlating. Default is 0.4. """ self._reset_summary_stats() self._find_totally_dead_ants(verbose=verbose) self.crossCut, self.deadCut = crossCut, deadCut # iteratively remove antennas, removing only the worst antenna for iteration in range(len(self.antpols) * len(self.ants)): self.iter = iteration self._run_all_metrics() worstDeadCutDiff = 1 worstCrossCutDiff = 1 # Find most likely dead/crossed antenna deadMetrics = {ant: metric for ant, metric in self.all_metrics[iteration]['corr'].items() if np.isfinite(metric)} crossMetrics = {ant: np.max(metric) for ant, metric in self.all_metrics[iteration]['corrXPol'].items() if np.isfinite(metric)} if (len(deadMetrics) == 0) or (len(crossMetrics) == 0): break # no unflagged antennas remain worstDeadAnt = min(deadMetrics, key=deadMetrics.get) worstDeadCutDiff = np.abs(deadMetrics[worstDeadAnt]) - deadCut worstCrossAnt = min(crossMetrics, key=crossMetrics.get) worstCrossCutDiff = crossMetrics[worstCrossAnt] - crossCut # Find the single worst antenna, remove it, log it, and run again if (worstCrossCutDiff <= worstDeadCutDiff) and (worstCrossCutDiff < 0): for antpol in self.antpols: # if crossed remove both polarizations crossed_ant = (worstCrossAnt[0], antpol) self.xants.append(crossed_ant) self.crossed_ants.append(crossed_ant) self.removal_iteration[crossed_ant] = iteration if verbose: print(f'On iteration {iteration} we flag {crossed_ant} with cross-pol corr metric of {crossMetrics[worstCrossAnt]}.') elif (worstDeadCutDiff < worstCrossCutDiff) and (worstDeadCutDiff < 0): dead_ants = set([worstDeadAnt]) for dead_ant in dead_ants: self.xants.append(dead_ant) self.dead_ants.append(dead_ant) self.removal_iteration[dead_ant] = iteration if verbose: print(f'On iteration {iteration} we flag {dead_ant} with corr metric z of {deadMetrics[worstDeadAnt]}.') else: break def save_antenna_metrics(self, filename, overwrite=False): """Output all meta-metrics and cut decisions to HDF5 file. Saves all cut decisions and meta-metrics in an HDF5 that can be loaded back into a dictionary using hera_qm.ant_metrics.load_antenna_metrics() Parameters ---------- filename :
# Copyright (c) 2018, 2019, 2020 Nordic Semiconductor ASA # Copyright 2018, 2019 Foundries.io Ltd # # SPDX-License-Identifier: Apache-2.0 ''' Parser and abstract data types for west manifests. ''' import configparser import enum import errno import logging import os from pathlib import PurePosixPath, Path import re import shlex import subprocess import sys from typing import Any, Callable, Dict, Iterable, List, NoReturn, \ NamedTuple, Optional, Set, Tuple, TYPE_CHECKING, Union from packaging.version import parse as parse_version import pykwalify.core import yaml from west import util from west.util import PathType import west.configuration as cfg # # Public constants # #: Index in a Manifest.projects attribute where the `ManifestProject` #: instance for the workspace is stored. MANIFEST_PROJECT_INDEX = 0 #: A git revision which points to the most recent `Project` update. MANIFEST_REV_BRANCH = 'manifest-rev' #: A fully qualified reference to `MANIFEST_REV_BRANCH`. QUAL_MANIFEST_REV_BRANCH = 'refs/heads/' + MANIFEST_REV_BRANCH #: Git ref space used by west for internal purposes. QUAL_REFS_WEST = 'refs/west/' #: The latest manifest schema version supported by this west program. #: #: This value changes when a new version of west includes new manifest #: file features not supported by earlier versions of west. SCHEMA_VERSION = '0.10' # MAINTAINERS: # # If you want to update the schema version, you need to make sure that # it has the exact same value as west.version.__version__ when the # next release is cut. # # Internal helpers # # Type aliases # The value of a west-commands as passed around during manifest # resolution. It can become a list due to resolving imports, even # though it's just a str in each individual file right now. WestCommandsType = Union[str, List[str]] # Type for the importer callback passed to the manifest constructor. # (ImportedContentType is just an alias for what it gives back.) ImportedContentType = Optional[Union[str, List[str]]] ImporterType = Callable[['Project', str], ImportedContentType] # Type for an import map filter function, which takes a Project and # returns a bool. The various allowlists and blocklists are used to # create these filter functions. A None value is treated as a function # which always returns True. ImapFilterFnType = Optional[Callable[['Project'], bool]] # A list of group names to enable and disable, like ['+foo', '-bar']. GroupFilterType = List[str] # A list of group names belonging to a project, like ['foo', 'bar'] GroupsType = List[str] # The parsed contents of a manifest YAML file as returned by _load(), # after sanitychecking with validate(). ManifestDataType = Union[str, Dict] # Logging _logger = logging.getLogger(__name__) # Type for the submodule value passed through the manifest file. class Submodule(NamedTuple): '''Represents a Git submodule within a project.''' path: str name: Optional[str] = None # Submodules may be a list of values or a bool. SubmodulesType = Union[List[Submodule], bool] # Manifest locating, parsing, loading, etc. class _defaults(NamedTuple): remote: Optional[str] revision: str _DEFAULT_REV = 'master' _WEST_YML = 'west.yml' _SCHEMA_PATH = os.path.join(os.path.dirname(__file__), "manifest-schema.yml") _SCHEMA_VER = parse_version(SCHEMA_VERSION) _EARLIEST_VER_STR = '0.6.99' # we introduced the version feature after 0.6 _VALID_SCHEMA_VERS = [_EARLIEST_VER_STR, '0.7', '0.8', '0.9', SCHEMA_VERSION] def _is_yml(path: PathType) -> bool: return Path(path).suffix in ['.yml', '.yaml'] def _load(data: str) -> Any: try: return yaml.safe_load(data) except yaml.scanner.ScannerError as e: raise MalformedManifest(data) from e def _west_commands_list(west_commands: Optional[WestCommandsType]) -> \ List[str]: # Convert the raw data from a manifest file to a list of # west_commands locations. (If it's already a list, make a # defensive copy.) if west_commands is None: return [] elif isinstance(west_commands, str): return [west_commands] else: return list(west_commands) def _west_commands_maybe_delist(west_commands: List[str]) -> WestCommandsType: # Convert a west_commands list to a string if there's # just one element, otherwise return the list itself. if len(west_commands) == 1: return west_commands[0] else: return west_commands def _west_commands_merge(wc1: List[str], wc2: List[str]) -> List[str]: # Merge two west_commands lists, filtering out duplicates. if wc1 and wc2: return wc1 + [wc for wc in wc2 if wc not in wc1] else: return wc1 or wc2 def _mpath(cp: Optional[configparser.ConfigParser] = None, topdir: Optional[PathType] = None) -> Tuple[str, str]: # Return the value of the manifest.path configuration option # in *cp*, a ConfigParser. If not given, create a new one and # load configuration options with the given *topdir* as west # workspace root. # # TODO: write a cfg.get(section, key) # wrapper, with friends for update and delete, to avoid # requiring this boilerplate. if cp is None: cp = cfg._configparser() cfg.read_config(configfile=cfg.ConfigFile.LOCAL, config=cp, topdir=topdir) try: path = cp.get('manifest', 'path') filename = cp.get('manifest', 'file', fallback=_WEST_YML) return (path, filename) except (configparser.NoOptionError, configparser.NoSectionError) as e: raise MalformedConfig('no "manifest.path" config option is set') from e # Manifest import handling def _default_importer(project: 'Project', file: str) -> NoReturn: raise ManifestImportFailed(project, file) def _manifest_content_at(project: 'Project', path: PathType, rev: str = QUAL_MANIFEST_REV_BRANCH) \ -> ImportedContentType: # Get a list of manifest data from project at path # # The data are loaded from Git at ref QUAL_MANIFEST_REV_BRANCH, # *NOT* the file system. # # If path is a tree at that ref, the contents of the YAML files # inside path are returned, as strings. If it's a file at that # ref, it's a string with its contents. # # Though this module and the "west update" implementation share # this code, it's an implementation detail, not API. path = os.fspath(path) _logger.debug(f'{project.name}: looking up path {path} type at {rev}') # Returns 'blob', 'tree', etc. for path at revision, if it exists. out = project.git(['ls-tree', rev, path], capture_stdout=True, capture_stderr=True).stdout if not out: # It's a bit inaccurate to raise FileNotFoundError for # something that isn't actually file, but this is internal # API, and git is a content addressable file system, so close # enough! raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), path) ptype = out.decode('utf-8').split()[1] if ptype == 'blob': # Importing a file: just return its content. return project.read_at(path, rev=rev).decode('utf-8') elif ptype == 'tree': # Importing a tree: return the content of the YAML files inside it. ret = [] # Use a PurePosixPath because that's the form git seems to # store internally, even on Windows. pathobj = PurePosixPath(path) for f in filter(_is_yml, project.listdir_at(path, rev=rev)): ret.append(project.read_at(pathobj / f, rev=rev).decode('utf-8')) return ret else: raise MalformedManifest(f"can't decipher project {project.name} " f'path {path} revision {rev} ' f'(git type: {ptype})') class _import_map(NamedTuple): file: str name_allowlist: List[str] path_allowlist: List[str] name_blocklist: List[str] path_blocklist: List[str] path_prefix: str def _is_imap_list(value: Any) -> bool: # Return True if the value is a valid import map 'blocklist' or # 'allowlist'. Empty strings and lists are OK, and list nothing. return (isinstance(value, str) or (isinstance(value, list) and all(isinstance(item, str) for item in value))) def _imap_filter(imap: _import_map) -> ImapFilterFnType: # Returns either None (if no filter is necessary) or a # filter function for the given import map. if any([imap.name_allowlist, imap.path_allowlist, imap.name_blocklist, imap.path_blocklist]): return lambda project: _is_imap_ok(imap, project) else: return None def _ensure_list(item: Union[str, List[str]]) -> List[str]: # Converts item to a list containing it if item is a string, or # returns item. if isinstance(item, str): return [item] return item def _is_imap_ok(imap: _import_map, project: 'Project') -> bool: # Return True if a project passes an import map's filters, # and False otherwise. nwl, pwl, nbl, pbl = [_ensure_list(lst) for lst in (imap.name_allowlist, imap.path_allowlist, imap.name_blocklist, imap.path_blocklist)] name = project.name path = Path(project.path) blocked = (name in nbl) or any(path.match(p) for p in pbl) allowed = (name in nwl) or any(path.match(p) for p in pwl) no_allowlists = not (nwl or pwl) if blocked: return allowed else: return allowed or no_allowlists class _import_ctx(NamedTuple): # Holds state that changes as we recurse down the manifest import tree. # The current map from already-defined project names to Projects. # # This is shared, mutable state between Manifest() constructor # calls that happen during resolution. We mutate this directly # when handling 'manifest: projects:' lists. Manifests which are # imported earlier get higher precedence: if a 'projects:' list # contains a name which is already present here, we ignore that # element. projects: Dict[str, 'Project'] # The current shared group filter. This is mutable state in the # same way 'projects' is. Manifests which are imported earlier get # higher precedence here too. # # This is done by prepending (NOT appending) any 'manifest: # group-filter:' lists we encounter during import resolution onto # this list. Since group-filter lists have "last entry wins" # semantics, earlier manifests take precedence. group_filter: GroupFilterType # The current restrictions on which projects the importing # manifest is interested in. # # These accumulate as we pick up additional allowlists
<reponame>poleha/google-maps-services-python<gh_stars>1-10 # # Copyright 2015 Google Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # """Performs requests to the Google Places API.""" from uuid import uuid4 as places_autocomplete_session_token from googlemaps import convert PLACES_FIND_FIELDS_BASIC = set([ "formatted_address", "geometry", "icon", "id", "name", "permanently_closed", "photos", "place_id", "plus_code", "scope", "types", ]) PLACES_FIND_FIELDS_CONTACT = set(["opening_hours",]) PLACES_FIND_FIELDS_ATMOSPHERE = set(["price_level", "rating"]) PLACES_FIND_FIELDS = (PLACES_FIND_FIELDS_BASIC ^ PLACES_FIND_FIELDS_CONTACT ^ PLACES_FIND_FIELDS_ATMOSPHERE) PLACES_DETAIL_FIELDS_BASIC = set([ "address_component", "adr_address", "alt_id", "formatted_address", "geometry", "icon", "id", "name", "permanently_closed", "photo", "place_id", "plus_code", "scope", "type", "url", "utc_offset", "vicinity", ]) PLACES_DETAIL_FIELDS_CONTACT = set([ "formatted_phone_number", "international_phone_number", "opening_hours", "website", ]) PLACES_DETAIL_FIELDS_ATMOSPHERE = set(["price_level", "rating", "review",]) PLACES_DETAIL_FIELDS = (PLACES_DETAIL_FIELDS_BASIC ^ PLACES_DETAIL_FIELDS_CONTACT ^ PLACES_DETAIL_FIELDS_ATMOSPHERE) def find_place(client, input, input_type, fields=None, location_bias=None, language=None): """ A Find Place request takes a text input, and returns a place. The text input can be any kind of Places data, for example, a name, address, or phone number. :param input: The text input specifying which place to search for (for example, a name, address, or phone number). :type input: string :param input_type: The type of input. This can be one of either 'textquery' or 'phonenumber'. :type input_type: string :param fields: The fields specifying the types of place data to return, separated by a comma. For full details see: https://developers.google.com/places/web-service/search#FindPlaceRequests :type input: list :param location_bias: Prefer results in a specified area, by specifying either a radius plus lat/lng, or two lat/lng pairs representing the points of a rectangle. See: https://developers.google.com/places/web-service/search#FindPlaceRequests :type location_bias: string :param language: The language in which to return results. :type language: string :rtype: result dict with the following keys: status: status code candidates: list of places """ params = {"input": input, "inputtype": input_type} if input_type != "textquery" and input_type != "phonenumber": raise ValueError("Valid values for the `input_type` param for " "`find_place` are 'textquery' or 'phonenumber', " "the given value is invalid: '%s'" % input_type) if fields: invalid_fields = set(fields) - PLACES_FIND_FIELDS if invalid_fields: raise ValueError("Valid values for the `fields` param for " "`find_place` are '%s', these given field(s) " "are invalid: '%s'" % ( "', '".join(PLACES_FIND_FIELDS), "', '".join(invalid_fields))) params["fields"] = convert.join_list(",", fields) if location_bias: valid = ["ipbias", "point", "circle", "rectangle"] if location_bias.split(":")[0] not in valid: raise ValueError("location_bias should be prefixed with one of: %s" % valid) params["locationbias"] = location_bias if language: params["language"] = language return client._request("/maps/api/place/findplacefromtext/json", params) def places(client, query, location=None, radius=None, language=None, min_price=None, max_price=None, open_now=False, type=None, region=None, page_token=None): """ Places search. :param query: The text string on which to search, for example: "restaurant". :type query: string :param location: The latitude/longitude value for which you wish to obtain the closest, human-readable address. :type location: string, dict, list, or tuple :param radius: Distance in meters within which to bias results. :type radius: int :param language: The language in which to return results. :type language: string :param min_price: Restricts results to only those places with no less than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type min_price: int :param max_price: Restricts results to only those places with no greater than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type max_price: int :param open_now: Return only those places that are open for business at the time the query is sent. :type open_now: bool :param type: Restricts the results to places matching the specified type. The full list of supported types is available here: https://developers.google.com/places/supported_types :type type: string :param region: The region code, optional parameter. See more @ https://developers.google.com/places/web-service/search :type region: string :param page_token: Token from a previous search that when provided will returns the next page of results for the same search. :type page_token: string :rtype: result dict with the following keys: results: list of places html_attributions: set of attributions which must be displayed next_page_token: token for retrieving the next page of results """ return _places(client, "text", query=query, location=location, radius=radius, language=language, min_price=min_price, max_price=max_price, open_now=open_now, type=type, region=region, page_token=page_token) def places_nearby(client, location=None, radius=None, keyword=None, language=None, min_price=None, max_price=None, name=None, open_now=False, rank_by=None, type=None, page_token=None): """ Performs nearby search for places. :param location: The latitude/longitude value for which you wish to obtain the closest, human-readable address. :type location: string, dict, list, or tuple :param radius: Distance in meters within which to bias results. :type radius: int :param region: The region code, optional parameter. See more @ https://developers.google.com/places/web-service/search :type region: string :param keyword: A term to be matched against all content that Google has indexed for this place. :type keyword: string :param language: The language in which to return results. :type language: string :param min_price: Restricts results to only those places with no less than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type min_price: int :param max_price: Restricts results to only those places with no greater than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type max_price: int :param name: One or more terms to be matched against the names of places. :type name: string or list of strings :param open_now: Return only those places that are open for business at the time the query is sent. :type open_now: bool :param rank_by: Specifies the order in which results are listed. Possible values are: prominence (default), distance :type rank_by: string :param type: Restricts the results to places matching the specified type. The full list of supported types is available here: https://developers.google.com/places/supported_types :type type: string :param page_token: Token from a previous search that when provided will returns the next page of results for the same search. :type page_token: string :rtype: result dict with the following keys: status: status code results: list of places html_attributions: set of attributions which must be displayed next_page_token: token for retrieving the next page of results """ if not location and not page_token: raise ValueError("either a location or page_token arg is required") if rank_by == "distance": if not (keyword or name or type): raise ValueError("either a keyword, name, or type arg is required " "when rank_by is set to distance") elif radius is not None: raise ValueError("radius cannot be specified when rank_by is set to " "distance") return _places(client, "nearby", location=location, radius=radius, keyword=keyword, language=language, min_price=min_price, max_price=max_price, name=name, open_now=open_now, rank_by=rank_by, type=type, page_token=page_token) def places_radar(client, location, radius, keyword=None, min_price=None, max_price=None, name=None, open_now=False, type=None): """ Performs radar search for places. :param location: The latitude/longitude value for which you wish to obtain the closest, human-readable address. :type location: string, dict, list, or tuple :param radius: Distance in meters within which to bias results. :type radius: int :param keyword: A term to be matched against all content that Google has indexed for this place. :type keyword: string :param min_price: Restricts results to only those places with no less than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type min_price: int :param max_price: Restricts results to only those places with no greater than this price level. Valid values are in the range from 0 (most affordable) to 4 (most expensive). :type max_price: int :param name: One or more terms to be matched against the names of places. :type name: string or list of strings :param open_now: Return only those places that are open for business at the time the query is sent. :type open_now: bool :param type: Restricts the results to places matching the specified type. The full list of supported types is available here: https://developers.google.com/places/supported_types :type type: string :rtype: result dict with the following keys: status: status code
<filename>benchmarks/benchmarks/go_benchmark_functions/go_funcs_D.py # -*- coding: utf-8 -*- from __future__ import division, print_function, absolute_import import numpy as np from numpy import abs, cos, exp, arange, pi, sin, sqrt, sum, zeros, tanh from numpy.testing import assert_almost_equal from .go_benchmark import Benchmark class Damavandi(Benchmark): """ Damavandi objective function. This class defines the Damavandi [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Damavandi}}(x) = \left[ 1 - \lvert{\frac{ \sin[\pi (x_1 - 2)]\sin[\pi (x2 - 2)]}{\pi^2 (x_1 - 2)(x_2 - 2)}} \rvert^5 \right] \left[2 + (x_1 - 7)^2 + 2(x_2 - 7)^2 \right] Here, :math:`n` represents the number of dimensions and :math:`x_i \in [0, 14]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0.0` for :math:`x_i = 2` for :math:`i = 1, ..., n` .. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, 2) self._bounds = zip([0.0] * self.N, [14.0] * self.N) self.global_optimum = [[2 for _ in range(self.N)]] self.fglob = np.nan def fun(self, x, *args): self.nfev += 1 try: num = sin(pi * (x[0] - 2.0)) * sin(pi * (x[1] - 2.0)) den = (pi ** 2) * (x[0] - 2.0) * (x[1] - 2.0) factor1 = 1.0 - (abs(num / den)) ** 5.0 factor2 = 2 + (x[0] - 7.0) ** 2.0 + 2 * (x[1] - 7.0) ** 2.0 return factor1 * factor2 except ZeroDivisionError: return np.nan def success(self, x): """Is a candidate solution at the global minimum""" val = self.fun(x) if np.isnan(val): return True try: assert_almost_equal(val, 0., 4) return True except AssertionError: return False return False class Deb01(Benchmark): """ Deb 1 objective function. This class defines the Deb 1 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Deb01}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6(5 \pi x_i) Here, :math:`n` represents the number of dimensions and :math:`x_i \in [-1, 1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced in the function landscape, where :math:`n` represents the dimension of the problem. .. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.change_dimensionality = True self._bounds = zip([-1.0] * self.N, [1.0] * self.N) self.global_optimum = [[0.3, -0.3]] self.fglob = -1.0 def fun(self, x, *args): self.nfev += 1 return -(1.0 / self.N) * sum(sin(5 * pi * x) ** 6.0) class Deb03(Benchmark): """ Deb 3 objective function. This class defines the Deb 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Deb02}}(x) = - \frac{1}{N} \sum_{i=1}^n \sin^6 \left[ 5 \pi \left ( x_i^{3/4} - 0.05 \right) \right ] Here, :math:`n` represents the number of dimensions and :math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced in the function landscape, where :math:`n` represents the dimension of the problem. .. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.change_dimensionality = True self._bounds = zip([-1.0] * self.N, [1.0] * self.N) self.global_optimum = [[0.93388314, 0.68141781]] self.fglob = -1.0 def fun(self, x, *args): self.nfev += 1 return -(1.0 / self.N) * sum(sin(5 * pi * (x ** 0.75 - 0.05)) ** 6.0) class Decanomial(Benchmark): """ Decanomial objective function. This class defines the Decanomial function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Decanomial}}(x) = 0.001 \left(\lvert{x_{2}^{4} + 12 x_{2}^{3} + 54 x_{2}^{2} + 108 x_{2} + 81.0}\rvert + \lvert{x_{1}^{10} - 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6} - 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2} - 5120 x_{1} + 2624.0}\rvert\right)^{2} with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = 0` for :math:`x = [2, -3]` .. [1] <NAME>. Global Optimization Benchmarks and AMPGO retrieved 2015 """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-10.0] * self.N, [10.0] * self.N) self.custom_bounds = [(0, 2.5), (-2, -4)] self.global_optimum = [[2.0, -3.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev += 1 val = x[1] ** 4 + 12 * x[1] ** 3 + 54 * x[1] ** 2 + 108 * x[1] + 81.0 val2 = x[0] ** 10. - 20 * x[0] ** 9 + 180 * x[0] ** 8 - 960 * x[0] ** 7 val2 += 3360 * x[0] ** 6 - 8064 * x[0] ** 5 + 13340 * x[0] ** 4 val2 += - 15360 * x[0] ** 3 + 11520 * x[0] ** 2 - 5120 * x[0] + 2624 return 0.001 * (abs(val) + abs(val2)) ** 2. class Deceptive(Benchmark): """ Deceptive objective function. This class defines the Deceptive [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{Deceptive}}(x) = - \left [\frac{1}{n} \sum_{i=1}^{n} g_i(x_i) \right ]^{\beta} Where :math:`\beta` is a fixed non-linearity factor; in this exercise, :math:`\beta = 2`. The function :math:`g_i(x_i)` is given by: .. math:: g_i(x_i) = \begin{cases} - \frac{x}{\alpha_i} + \frac{4}{5} & \textrm{if} \hspace{5pt} 0 \leq x_i \leq \frac{4}{5} \alpha_i \\ \frac{5x}{\alpha_i} -4 & \textrm{if} \hspace{5pt} \frac{4}{5} \alpha_i \le x_i \leq \alpha_i \\ \frac{5(x - \alpha_i)}{\alpha_i-1} & \textrm{if} \hspace{5pt} \alpha_i \le x_i \leq \frac{1 + 4\alpha_i}{5} \\ \frac{x - 1}{1 - \alpha_i} & \textrm{if} \hspace{5pt} \frac{1 + 4\alpha_i}{5} \le x_i \leq 1 \end{cases} Here, :math:`n` represents the number of dimensions and :math:`x_i \in [0, 1]` for :math:`i = 1, ..., n`. *Global optimum*: :math:`f(x) = -1` for :math:`x_i = \alpha_i` for :math:`i = 1, ..., n` .. [1] <NAME>. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: this function was taken from the Gavana website. The following code is based on his code. His code and the website don't match, the equations are wrong. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([0.0] * self.N, [1.0] * self.N) alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0) self.global_optimum = [alpha] self.fglob = -1.0 self.change_dimensionality = True def fun(self, x, *args): self.nfev += 1 alpha = arange(1.0, self.N + 1.0) / (self.N + 1.0) beta = 2.0 g = zeros((self.N, )) for i in range(self.N): if x[i] <= 0.0: g[i] = x[i] elif x[i] < 0.8 * alpha[i]: g[i] = -x[i] / alpha[i] + 0.8 elif x[i] < alpha[i]: g[i] = 5.0 * x[i] / alpha[i] - 4.0 elif x[i] < (1.0 + 4 * alpha[i]) / 5.0: g[i] = 5.0 * (x[i] - alpha[i]) / (alpha[i] - 1.0) + 1.0 elif x[i] <= 1.0: g[i] = (x[i] - 1.0) / (1.0 - alpha[i]) + 4.0 / 5.0 else: g[i] = x[i] - 1.0 return -((1.0 / self.N) * sum(g)) ** beta class DeckkersAarts(Benchmark): """ Deckkers-Aarts objective function. This class defines the Deckkers-Aarts [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\text{DeckkersAarts}}(x) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + 10^{-5}(x_1^2 + x_2^2)^4 with :math:`x_i \in [-20, 20]` for :math:`i = 1, 2`. *Global optimum*: :math:`f(x) = -24776.518242168` for :math:`x = [0, \pm 14.9451209]` .. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: jamil solution and global minimum are slightly wrong. """ def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self._bounds = zip([-20.0] * self.N, [20.0] * self.N) self.custom_bounds = ([-1, 1], [14, 16]) self.global_optimum = [[0.0, 14.9451209]] self.fglob = -24776.518342168 def fun(self, x, *args): self.nfev += 1 return (1.e5 * x[0] ** 2 + x[1] ** 2 -
<filename>examples/faster-rcnn/util.py<gh_stars>1-10 # ---------------------------------------------------------------------------- # Copyright 2016 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Utility functions for Faster-RCNN example and demo. Reference: "Faster R-CNN" https://arxiv.org/pdf/1506.01497 https://github.com/rbgirshick/py-faster-rcnn """ from __future__ import division from __future__ import print_function from builtins import zip from builtins import range import numpy as np import os import pickle from neon.initializers import Constant, Xavier, Gaussian from neon.transforms import Rectlin, Identity, Softmax, PixelwiseSoftmax from neon.layers import Conv, Pooling, Affine, BranchNode, Tree, Dropout from neon.models import Model from neon.util.persist import load_obj from neon.data.datasets import Dataset from voc_eval import voc_eval from roi_pooling import RoiPooling from proposal_layer import ProposalLayer def add_vgg_layers(): # setup layers init1_vgg = Xavier(local=True) relu = Rectlin() conv_params = {'strides': 1, 'padding': 1, 'init': init1_vgg, 'bias': Constant(0), 'activation': relu} # Set up the model layers vgg_layers = [] # set up 3x3 conv stacks with different feature map sizes vgg_layers.append(Conv((3, 3, 64), name="skip", **conv_params)) vgg_layers.append(Conv((3, 3, 64), name="skip", **conv_params)) vgg_layers.append(Pooling(2, strides=2)) vgg_layers.append(Conv((3, 3, 128), name="skip", **conv_params)) vgg_layers.append(Conv((3, 3, 128), name="skip", **conv_params)) vgg_layers.append(Pooling(2, strides=2)) vgg_layers.append(Conv((3, 3, 256), **conv_params)) vgg_layers.append(Conv((3, 3, 256), **conv_params)) vgg_layers.append(Conv((3, 3, 256), **conv_params)) vgg_layers.append(Pooling(2, strides=2)) vgg_layers.append(Conv((3, 3, 512), **conv_params)) vgg_layers.append(Conv((3, 3, 512), **conv_params)) vgg_layers.append(Conv((3, 3, 512), **conv_params)) vgg_layers.append(Pooling(2, strides=2)) vgg_layers.append(Conv((3, 3, 512), **conv_params)) vgg_layers.append(Conv((3, 3, 512), **conv_params)) vgg_layers.append(Conv((3, 3, 512), **conv_params)) # not used after this layer # vgg_layers.append(Pooling(2, strides=2)) # vgg_layers.append(Affine(nout=4096, init=initfc, bias=Constant(0), activation=relu)) # vgg_layers.append(Dropout(keep=0.5)) # vgg_layers.append(Affine(nout=4096, init=initfc, bias=Constant(0), activation=relu)) # vgg_layers.append(Dropout(keep=0.5)) # vgg_layers.append(Affine(nout=1000, init=initfc, bias=Constant(0), activation=Softmax())) return vgg_layers def scale_bbreg_weights(model, means, stds, num_classes): means = np.array(num_classes * means) stds = np.array(num_classes * stds) means_be = model.be.array(means) stds_be = model.be.array(stds) model.layers.layers[2].layers[1].layers[1].layers[-3].W[:] = \ model.layers.layers[2].layers[1].layers[1].layers[-3].W * stds_be model.layers.layers[2].layers[1].layers[1].layers[-2].W[:] = \ model.layers.layers[2].layers[1].layers[1].layers[-2].W * stds_be + means_be return model def load_vgg_all_weights(model, path): # load a pre-trained VGG16 from Neon model zoo to the local url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/' filename = 'VGG_D.p' size = 554227541 workdir, filepath = Dataset._valid_path_append(path, '', filename) if not os.path.exists(filepath): Dataset.fetch_dataset(url, filename, filepath, size) print('De-serializing the pre-trained VGG16 model...') pdict = load_obj(filepath) param_layers = [l for l in model.layers.layers[0].layers] param_dict_list = pdict['model']['config']['layers'] i = 0 for layer, ps in zip(param_layers, param_dict_list): i += 1 if i == 43: break layer.load_weights(ps, load_states=True) print(layer.name + " <-- " + ps['config']['name']) # to load the fc6 and fc7 from caffe into neon fc layers after ROI pooling neon_fc_layers = model.layers.layers[2].layers[1].layers[0].layers[2:5] +\ model.layers.layers[2].layers[1].layers[0].layers[6:9] vgg_fc_layers = param_dict_list[44:47] + param_dict_list[48:51] for layer, ps in zip(neon_fc_layers, vgg_fc_layers): layer.load_weights(ps, load_states=True) print(layer.name + " <-- " + ps['config']['name']) def load_vgg_weights(model, path): url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/' filename = 'VGG_D_Conv.p' size = 169645138 workdir, filepath = Dataset._valid_path_append(path, '', filename) if not os.path.exists(filepath): Dataset.fetch_dataset(url, filename, filepath, size) print('De-serializing the pre-trained VGG16 model...') pdict = load_obj(filepath) param_layers = [l for l in model.layers.layers[0].layers] param_dict_list = pdict['model']['config']['layers'] for layer, ps in zip(param_layers, param_dict_list): layer.load_weights(ps, load_states=True) print(layer.name + " <-- " + ps['config']['name']) def build_model(dataset, frcn_rois_per_img, inference=False): """ Returns the Faster-RCNN model. For inference, also returns a reference to the proposal layer. Faster-RCNN contains three modules: VGG, the Region Proposal Network (RPN), and the Classification Network (ROI-pooling + Fully Connected layers), organized as a tree. Tree has 4 branches: VGG -> b1 -> Conv (3x3) -> b2 -> Conv (1x1) -> CrossEntropyMulti (objectness label) b2 -> Conv (1x1) -> SmoothL1Loss (bounding box targets) b1 -> PropLayer -> ROI -> Affine -> Affine -> b3 -> Affine -> CrossEntropyMulti b3 -> Affine -> SmoothL1Loss When the model is constructed for inference, several elements are different: - The number of regions to keep before and after non-max suppression is (6000, 300) for training and (12000, 2000) for inference. - The out_shape of the proposalLayer of the network is equal to post_nms_N (number of rois to keep after performaing nms). This is configured by passing the inference flag to the proposalLayer constructor. Arguments: dataset (objectlocalization): Dataset object. frcn_rois_per_img (int): Number of ROIs per image considered by the classification network. inference (bool): Construct the model for inference. Default is False. Returns: model (Model): Faster-RCNN model. proposalLayer (proposalLayer): Reference to proposalLayer in the model. Returned only for inference=True. """ num_classes = dataset.num_classes # define the branch points b1 = BranchNode(name="conv_branch") b2 = BranchNode(name="rpn_branch") b3 = BranchNode(name="roi_branch") # define VGG VGG = add_vgg_layers() # define RPN rpn_init = dict(strides=1, init=Gaussian(scale=0.01), bias=Constant(0)) # these references are passed to the ProposalLayer. RPN_3x3 = Conv((3, 3, 512), activation=Rectlin(), padding=1, **rpn_init) RPN_1x1_obj = Conv((1, 1, 18), activation=PixelwiseSoftmax(c=2), padding=0, **rpn_init) RPN_1x1_bbox = Conv((1, 1, 36), activation=Identity(), padding=0, **rpn_init) # inference uses different network settings if not inference: pre_nms_N = 12000 post_nms_N = 2000 else: pre_nms_N = 6000 post_nms_N = 300 proposalLayer = ProposalLayer([RPN_1x1_obj, RPN_1x1_bbox], dataset.get_global_buffers(), pre_nms_N=pre_nms_N, post_nms_N=post_nms_N, num_rois=frcn_rois_per_img, inference=inference) # define ROI classification network ROI = [proposalLayer, RoiPooling(HW=(7, 7)), Affine(nout=4096, init=Gaussian(scale=0.005), bias=Constant(.1), activation=Rectlin()), Dropout(keep=0.5), Affine(nout=4096, init=Gaussian(scale=0.005), bias=Constant(.1), activation=Rectlin()), Dropout(keep=0.5)] ROI_category = Affine(nout=num_classes, init=Gaussian(scale=0.01), bias=Constant(0), activation=Softmax()) ROI_bbox = Affine(nout=4 * num_classes, init=Gaussian(scale=0.001), bias=Constant(0), activation=Identity()) # build the model # the four branches of the tree mirror the branches listed above frcn_tree = Tree([ROI + [b3, ROI_category], [b3, ROI_bbox] ]) model = Model(layers=Tree([VGG + [b1, RPN_3x3, b2, RPN_1x1_obj], [b2, RPN_1x1_bbox], [b1] + [frcn_tree], ])) if inference: return (model, proposalLayer) else: return model def get_bboxes(outputs, proposals, num_proposals, num_classes, im_shape, im_scale, max_per_image=100, thresh=0.001, nms_thresh=0.4): """ Returns bounding boxes for detected objects, organized by class. Transforms the proposals from the region proposal network to bounding box predictions using the bounding box regressions from the classification network: (1) Applying bounding box regressions to the region proposals. (2) For each class, take proposed boxes where the corresponding objectness score is greater then THRESH. (3) Apply non-maximum suppression across classes using NMS_THRESH (4) Limit the maximum number of detections over all classes to MAX_PER_IMAGE Arguments: outputs (list of tensors): Faster-RCNN model outputs proposals (Tensor): Proposed boxes from the model's proposalLayer num_proposals (int): Number of proposals num_classes (int): Number of classes im_shape (tuple): Shape of image im_scale (float): Scaling factor of image max_per_image (int): Maximum number of allowed detections per image. Default is 100. None indicates no enforced maximum. thresh (float): Threshold for objectness score. Default is 0.001. nms_thresh (float): Threshold for non-maximum suppression. Default is 0.4. Returns: detections (list): List of bounding box detections, organized by class. Each element contains a numpy array of bounding boxes for detected objects of that class. """ detections = [[] for _ in range(num_classes)] proposals = proposals.get()[:num_proposals, :] # remove padded proposals boxes = proposals[:, 1:5] / im_scale # scale back to real image space # obtain bounding box corrections from the frcn layers scores = outputs[2][0].get()[:, :num_proposals].T bbox_deltas = outputs[2][1].get()[:, :num_proposals].T # apply bounding box corrections to the region proposals pred_boxes = bbox_transform_inv(boxes, bbox_deltas) pred_boxes = clip_boxes(pred_boxes, im_shape) # Skip the background class, start processing from class 1 for j in range(1, num_classes): inds = np.where(scores[:, j] > thresh)[0] # obtain class-specific boxes and scores cls_scores = scores[inds, j] cls_boxes = pred_boxes[inds, j * 4:(j + 1) * 4] cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False) # apply non-max suppression keep = nms(cls_dets, nms_thresh) cls_dets = cls_dets[keep, :] # store results detections[j] = cls_dets # Limit to max_per_image detections *over all classes* if max_per_image is not None: # obtain flattened list of all image scores image_scores = np.hstack([detections[j][:, -1] for j in range(1, num_classes)]) if len(image_scores) > max_per_image: # compute threshold needed to keep the top max_per_image image_thresh = np.sort(image_scores)[-max_per_image] # apply threshold for j in range(1, num_classes): keep = np.where(detections[j][:, -1] >= image_thresh)[0] detections[j] = detections[j][keep, :] return detections # -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by <NAME> # -------------------------------------------------------- def nms(dets, thresh): """Pure Python NMS baseline.""" x1 =
# tagmerge.py - merge .hgtags files # # Copyright 2014 <NAME> <<EMAIL>> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. # This module implements an automatic merge algorithm for mercurial's tag files # # The tagmerge algorithm implemented in this module is able to resolve most # merge conflicts that currently would trigger a .hgtags merge conflict. The # only case that it does not (and cannot) handle is that in which two tags point # to different revisions on each merge parent _and_ their corresponding tag # histories have the same rank (i.e. the same length). In all other cases the # merge algorithm will choose the revision belonging to the parent with the # highest ranked tag history. The merged tag history is the combination of both # tag histories (special care is taken to try to combine common tag histories # where possible). # # In addition to actually merging the tags from two parents, taking into # account the base, the algorithm also tries to minimize the difference # between the merged tag file and the first parent's tag file (i.e. it tries to # make the merged tag order as as similar as possible to the first parent's tag # file order). # # The algorithm works as follows: # 1. read the tags from p1, p2 and the base # - when reading the p1 tags, also get the line numbers associated to each # tag node (these will be used to sort the merged tags in a way that # minimizes the diff to p1). Ignore the file numbers when reading p2 and # the base # 2. recover the "lost tags" (i.e. those that are found in the base but not on # p1 or p2) and add them back to p1 and/or p2 # - at this point the only tags that are on p1 but not on p2 are those new # tags that were introduced in p1. Same thing for the tags that are on p2 # but not on p2 # 3. take all tags that are only on p1 or only on p2 (but not on the base) # - Note that these are the tags that were introduced between base and p1 # and between base and p2, possibly on separate clones # 4. for each tag found both on p1 and p2 perform the following merge algorithm: # - the tags conflict if their tag "histories" have the same "rank" (i.e. # length) AND the last (current) tag is NOT the same # - for non conflicting tags: # - choose which are the high and the low ranking nodes # - the high ranking list of nodes is the one that is longer. # In case of draw favor p1 # - the merged node list is made of 3 parts: # - first the nodes that are common to the beginning of both # the low and the high ranking nodes # - second the non common low ranking nodes # - finally the non common high ranking nodes (with the last # one being the merged tag node) # - note that this is equivalent to putting the whole low ranking # node list first, followed by the non common high ranking nodes # - note that during the merge we keep the "node line numbers", which will # be used when writing the merged tags to the tag file # 5. write the merged tags taking into account to their positions in the first # parent (i.e. try to keep the relative ordering of the nodes that come # from p1). This minimizes the diff between the merged and the p1 tag files # This is done by using the following algorithm # - group the nodes for a given tag that must be written next to each other # - A: nodes that come from consecutive lines on p1 # - B: nodes that come from p2 (i.e. whose associated line number is # None) and are next to one of the a nodes in A # - each group is associated with a line number coming from p1 # - generate a "tag block" for each of the groups # - a tag block is a set of consecutive "node tag" lines belonging to # the same tag and which will be written next to each other on the # merged tags file # - sort the "tag blocks" according to their associated number line # - put blocks whose nodes come all from p2 first # - write the tag blocks in the sorted order from __future__ import absolute_import import operator from .i18n import _ from .node import ( hex, nullid, ) from .import ( tags as tagsmod, util, ) hexnullid = hex(nullid) def readtagsformerge(ui, repo, lines, fn='', keeplinenums=False): '''read the .hgtags file into a structure that is suitable for merging Depending on the keeplinenums flag, clear the line numbers associated with each tag. This is done because only the line numbers of the first parent are useful for merging. ''' filetags = tagsmod._readtaghist(ui, repo, lines, fn=fn, recode=None, calcnodelines=True)[1] for tagname, taginfo in filetags.items(): if not keeplinenums: for el in taginfo: el[1] = None return filetags def grouptagnodesbyline(tagnodes): ''' Group nearby nodes (i.e. those that must be written next to each other) The input is a list of [node, position] pairs, corresponding to a given tag The position is the line number where the node was found on the first parent .hgtags file, or None for those nodes that came from the base or the second parent .hgtags files. This function groups those [node, position] pairs, returning a list of groups of nodes that must be written next to each other because their positions are consecutive or have no position preference (because their position is None). The result is a list of [position, [consecutive node list]] ''' firstlinenum = None for hexnode, linenum in tagnodes: firstlinenum = linenum if firstlinenum is not None: break if firstlinenum is None: return [[None, [el[0] for el in tagnodes]]] tagnodes[0][1] = firstlinenum groupednodes = [[firstlinenum, []]] prevlinenum = firstlinenum for hexnode, linenum in tagnodes: if linenum is not None and linenum - prevlinenum > 1: groupednodes.append([linenum, []]) groupednodes[-1][1].append(hexnode) if linenum is not None: prevlinenum = linenum return groupednodes def writemergedtags(repo, mergedtags): ''' write the merged tags while trying to minimize the diff to the first parent This function uses the ordering info stored on the merged tags dict to generate an .hgtags file which is correct (in the sense that its contents correspond to the result of the tag merge) while also being as close as possible to the first parent's .hgtags file. ''' # group the node-tag pairs that must be written next to each other for tname, taglist in mergedtags.items(): mergedtags[tname] = grouptagnodesbyline(taglist) # convert the grouped merged tags dict into a format that resembles the # final .hgtags file (i.e. a list of blocks of 'node tag' pairs) def taglist2string(tlist, tname): return '\n'.join(['%s %s' % (hexnode, tname) for hexnode in tlist]) finaltags = [] for tname, tags in mergedtags.items(): for block in tags: block[1] = taglist2string(block[1], tname) finaltags += tags # the tag groups are linked to a "position" that can be used to sort them # before writing them # the position is calculated to ensure that the diff of the merged .hgtags # file to the first parent's .hgtags file is as small as possible finaltags.sort(key=operator.itemgetter(0)) # finally we can join the sorted groups to get the final contents of the # merged .hgtags file, and then write it to disk mergedtagstring = '\n'.join([tags for rank, tags in finaltags if tags]) fp = repo.wfile('.hgtags', 'wb') fp.write(mergedtagstring + '\n') fp.close() def singletagmerge(p1nodes, p2nodes): ''' merge the nodes corresponding to a single tag Note that the inputs are lists of node-linenum pairs (i.e. not just lists of nodes) ''' if not p2nodes: return p1nodes if not p1nodes: return p2nodes # there is no conflict unless both tags point to different revisions # and
<filename>reconcile/cli.py import json import logging import os import sys import re import click import sentry_sdk from reconcile.utils import config from reconcile.utils import gql import reconcile.dyn_traffic_director import reconcile.github_org import reconcile.github_owners import reconcile.github_users import reconcile.github_scanner import reconcile.github_validator import reconcile.openshift_clusterrolebindings import reconcile.openshift_rolebindings import reconcile.openshift_groups import reconcile.openshift_limitranges import reconcile.openshift_resourcequotas import reconcile.openshift_users import reconcile.openshift_resources import reconcile.openshift_vault_secrets import reconcile.openshift_routes import reconcile.openshift_namespace_labels import reconcile.openshift_namespaces import reconcile.openshift_network_policies import reconcile.openshift_serviceaccount_tokens import reconcile.openshift_saas_deploy import reconcile.openshift_saas_deploy_wrapper import reconcile.openshift_saas_deploy_trigger_moving_commits import reconcile.openshift_saas_deploy_trigger_upstream_jobs import reconcile.openshift_saas_deploy_trigger_configs import reconcile.openshift_saas_deploy_trigger_cleaner import reconcile.openshift_tekton_resources import reconcile.saas_file_owners import reconcile.gitlab_ci_skipper import reconcile.gitlab_labeler import reconcile.saas_file_validator import reconcile.quay_membership import reconcile.gcr_mirror import reconcile.quay_mirror import reconcile.quay_mirror_org import reconcile.quay_repos import reconcile.quay_permissions import reconcile.ldap_users import reconcile.terraform_resources import reconcile.terraform_resources_wrapper import reconcile.terraform_users import reconcile.terraform_vpc_peerings import reconcile.terraform_tgw_attachments import reconcile.github_repo_invites import reconcile.github_repo_permissions_validator import reconcile.jenkins_roles import reconcile.jenkins_plugins import reconcile.jenkins_job_builder import reconcile.jenkins_job_cleaner import reconcile.jenkins_webhooks import reconcile.jenkins_webhooks_cleaner import reconcile.jira_watcher import reconcile.unleash_watcher import reconcile.openshift_upgrade_watcher import reconcile.slack_usergroups import reconcile.slack_cluster_usergroups import reconcile.gitlab_integrations import reconcile.gitlab_permissions import reconcile.gitlab_housekeeping import reconcile.gitlab_fork_compliance import reconcile.gitlab_members import reconcile.gitlab_owners import reconcile.gitlab_mr_sqs_consumer import reconcile.gitlab_projects import reconcile.aws_garbage_collector import reconcile.aws_iam_keys import reconcile.aws_iam_password_reset import reconcile.aws_ecr_image_pull_secrets import reconcile.aws_support_cases_sos import reconcile.ocm_groups import reconcile.ocm_clusters import reconcile.ocm_external_configuration_labels import reconcile.ocm_machine_pools import reconcile.ocm_upgrade_scheduler import reconcile.ocm_addons import reconcile.ocm_aws_infrastructure_access import reconcile.ocm_github_idp import reconcile.ocm_additional_routers import reconcile.email_sender import reconcile.sentry_helper import reconcile.requests_sender import reconcile.service_dependencies import reconcile.sentry_config import reconcile.sql_query import reconcile.user_validator import reconcile.integrations_validator import reconcile.dashdotdb_cso import reconcile.ocp_release_mirror import reconcile.ecr_mirror import reconcile.kafka_clusters import reconcile.terraform_aws_route53 import reconcile.prometheus_rules_tester import reconcile.dashdotdb_dvo import reconcile.sendgrid_teammates import reconcile.osd_mirrors_data_updater import reconcile.dashdotdb_slo import reconcile.jenkins_job_builds_cleaner import reconcile.cluster_deployment_mapper import reconcile.gabi_authorized_users import reconcile.status_page_components from reconcile.status import ExitCodes from reconcile.status import RunningState from reconcile.utils.gql import (GqlApiErrorForbiddenSchema, GqlApiIntegrationNotFound) from reconcile.utils.aggregated_list import RunnerException from reconcile.utils.binary import binary, binary_version from reconcile.utils.environ import environ from reconcile.utils.unleash import get_feature_toggle_state TERRAFORM_VERSION = '0.13.7' TERRAFORM_VERSION_REGEX = r'^Terraform\sv([\d]+\.[\d]+\.[\d]+)$' OC_VERSION = '4.8.11' OC_VERSION_REGEX = r'^Client\sVersion:\s([\d]+\.[\d]+\.[\d]+)$' LOG_FMT = '[%(asctime)s] [%(levelname)s] ' \ '[%(filename)s:%(funcName)s:%(lineno)d] - %(message)s' LOG_DATEFMT = '%Y-%m-%d %H:%M:%S' def before_breadcrumb(crumb, hint): # https://docs.sentry.io/platforms/python/configuration/filtering/ # Configure breadcrumb to filter error mesage if 'category' in crumb and crumb['category'] == 'subprocess': # remove cluster token crumb['message'] = re.sub( r'--token \S*\b', '--token ***', crumb['message'] ) return crumb # Enable Sentry if os.getenv('SENTRY_DSN'): sentry_sdk.init( # pylint: disable=abstract-class-instantiated os.environ['SENTRY_DSN'], before_breadcrumb=before_breadcrumb ) def config_file(function): help_msg = 'Path to configuration file in toml format.' function = click.option('--config', 'configfile', required=True, default=os.environ.get('QONTRACT_CONFIG'), help=help_msg)(function) return function def log_level(function): function = click.option('--log-level', help='log-level of the command. Defaults to INFO.', type=click.Choice([ 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']))(function) return function def dry_run(function): help_msg = ('If `true`, it will only print the planned actions ' 'that would be performed, without executing them.') function = click.option('--dry-run/--no-dry-run', default=False, help=help_msg)(function) return function def validate_schemas(function): help_msg = 'Fail integration if it queries forbidden schemas' function = click.option('--validate-schemas/--no-validate-schemas', default=True, help=help_msg)(function) return function def dump_schemas(function): help_msg = 'Dump schemas to a file' function = click.option('--dump-schemas', 'dump_schemas_file', help=help_msg)(function) return function def gql_sha_url(function): help_msg = ('If `false`, it will not use the sha_url endpoint ' 'of graphql (prevent stopping execution on data reload).') function = click.option('--gql-sha-url/--no-gql-sha-url', default=True, help=help_msg)(function) return function def gql_url_print(function): help_msg = ('If `false`, it will not print the url endpoint of graphql.') function = click.option('--gql-url-print/--no-gql-url-print', default=True, help=help_msg)(function) return function def threaded(**kwargs): def f(function): opt = '--thread-pool-size' msg = 'number of threads to run in parallel.' function = click.option(opt, default=kwargs.get('default', 10), help=msg)(function) return function return f def take_over(**kwargs): def f(function): help_msg = 'manage resources exclusively (take over existing ones).' function = click.option('--take-over/--no-take-over', help=help_msg, default=True)(function) return function return f def internal(**kwargs): def f(function): help_msg = 'manage resources in internal or external clusters only.' function = click.option('--internal/--external', help=help_msg, default=None)(function) return function return f def use_jump_host(**kwargs): def f(function): help_msg = 'use jump host if defined.' function = click.option('--use-jump-host/--no-use-jump-host', help=help_msg, default=True)(function) return function return f def print_only(function): function = click.option('--print-only/--no-print-only', help='only print the config file.', default=False)(function) return function def config_name(function): function = click.option('--config-name', help='jenkins config name to print out.' 'must works with --print-only mode', default=None)(function) return function def job_name(function): function = click.option('--job-name', help='jenkins job name to print out.', default=None)(function) return function def instance_name(function): function = click.option('--instance-name', help='jenkins instance name to act on.', default=None)(function) return function def throughput(function): function = click.option('--io-dir', help='directory of input/output files.', default='throughput/')(function) return function def vault_input_path(function): function = click.option('--vault-input-path', help='path in Vault to find input resources.', default='')(function) return function def vault_output_path(function): function = click.option('--vault-output-path', help='path in Vault to store output resources.', default='')(function) return function def vault_throughput_path(function): function = click.option('--vault-throughput-path', help='path in Vault to find input resources ' 'and store output resources.', default='')(function) return function def cluster_name(function): function = click.option('--cluster-name', help='cluster name to act on.', default=None)(function) return function def namespace_name(function): function = click.option('--namespace-name', help='namespace name to act on.', default=None)(function) return function def account_name(function): function = click.option('--account-name', help='aws account name to act on.', default=None)(function) return function def gitlab_project_id(function): function = click.option('--gitlab-project-id', help='gitlab project id to submit PRs to. ' 'not required if mergeRequestGateway ' 'is not set to gitlab', default=None)(function) return function def saas_file_name(function): function = click.option('--saas-file-name', help='saas-file to act on.', default=None)(function) return function def enable_deletion(**kwargs): def f(function): opt = '--enable-deletion/--no-enable-deletion' msg = 'enable destroy/replace action.' function = click.option(opt, default=kwargs.get('default', True), help=msg)(function) return function return f def send_mails(**kwargs): def f(function): opt = '--send-mails/--no-send-mails' msg = 'send email notification to users.' function = click.option(opt, default=kwargs.get('default', False), help=msg)(function) return function return f def enable_rebase(**kwargs): def f(function): opt = '--enable-rebase/--no-enable-rebase' msg = 'enable the merge request rebase action.' function = click.option(opt, default=kwargs.get('default', True), help=msg)(function) return function return f def run_integration(func_container, ctx, *args, **kwargs): try: int_name = func_container.QONTRACT_INTEGRATION.replace('_', '-') running_state = RunningState() running_state.integration = int_name except AttributeError: sys.stderr.write("Integration missing QONTRACT_INTEGRATION.\n") sys.exit(ExitCodes.ERROR) try: gql.init_from_config(sha_url=ctx['gql_sha_url'], integration=int_name, validate_schemas=ctx['validate_schemas'], print_url=ctx['gql_url_print']) except GqlApiIntegrationNotFound as e: sys.stderr.write(str(e) + "\n") sys.exit(ExitCodes.INTEGRATION_NOT_FOUND) unleash_feature_state = get_feature_toggle_state(int_name) if not unleash_feature_state: logging.info('Integration toggle is disabled, skipping integration.') sys.exit(ExitCodes.SUCCESS) dry_run = ctx.get('dry_run', False) try: func_container.run(dry_run, *args, **kwargs) except RunnerException as e: sys.stderr.write(str(e) + "\n") sys.exit(ExitCodes.ERROR) except GqlApiErrorForbiddenSchema as e: sys.stderr.write(str(e) + "\n") sys.exit(ExitCodes.FORBIDDEN_SCHEMA) finally: if ctx.get('dump_schemas_file'): gqlapi = gql.get_api() with open(ctx.get('dump_schemas_file'), 'w') as f: f.write(json.dumps(gqlapi.get_queried_schemas())) def init_log_level(log_level): level = getattr(logging, log_level) if log_level else logging.INFO logging.basicConfig(format=LOG_FMT, datefmt=LOG_DATEFMT, level=level) @click.group() @config_file @dry_run @validate_schemas @dump_schemas @gql_sha_url @gql_url_print @log_level @click.pass_context def integration(ctx, configfile, dry_run, validate_schemas, dump_schemas_file, log_level, gql_sha_url, gql_url_print): ctx.ensure_object(dict) init_log_level(log_level) config.init_from_toml(configfile) ctx.obj['dry_run'] = dry_run ctx.obj['validate_schemas'] = validate_schemas ctx.obj['gql_sha_url'] = gql_sha_url ctx.obj['gql_url_print'] = gql_url_print ctx.obj['dump_schemas_file'] = dump_schemas_file @integration.command() @print_only @threaded() @binary(['terraform']) @binary_version('terraform', ['version'], TERRAFORM_VERSION_REGEX, TERRAFORM_VERSION) @enable_deletion(default=False) @click.pass_context def terraform_aws_route53(ctx, print_only, enable_deletion, thread_pool_size): run_integration(reconcile.terraform_aws_route53, ctx.obj, print_only, enable_deletion, thread_pool_size) @integration.command() @click.pass_context def github(ctx): run_integration(reconcile.github_org, ctx.obj) @integration.command() @click.pass_context def github_owners(ctx): run_integration(reconcile.github_owners, ctx.obj) @integration.command() @environ(['gitlab_pr_submitter_queue_url']) @gitlab_project_id @threaded() @enable_deletion(default=False) @send_mails(default=False) @click.pass_context def github_users(ctx, gitlab_project_id, thread_pool_size, enable_deletion, send_mails): run_integration(reconcile.github_users, ctx.obj, gitlab_project_id, thread_pool_size, enable_deletion, send_mails) @integration.command() @environ(['gitlab_pr_submitter_queue_url']) @gitlab_project_id @threaded() @binary(['git', 'git-secrets']) @click.pass_context def github_scanner(ctx, gitlab_project_id, thread_pool_size): run_integration(reconcile.github_scanner, ctx.obj, gitlab_project_id, thread_pool_size) @integration.command() @click.pass_context def github_validator(ctx): run_integration(reconcile.github_validator, ctx.obj) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_clusterrolebindings(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_clusterrolebindings, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_rolebindings(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_rolebindings, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_groups(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_groups, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_users(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_users, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @vault_output_path @click.pass_context def openshift_serviceaccount_tokens(ctx, thread_pool_size, internal, use_jump_host, vault_output_path): run_integration(reconcile.openshift_serviceaccount_tokens, ctx.obj, thread_pool_size, internal, use_jump_host, vault_output_path) @integration.command() @click.pass_context def jenkins_roles(ctx): run_integration(reconcile.jenkins_roles, ctx.obj) @integration.command() @click.pass_context def jenkins_plugins(ctx): run_integration(reconcile.jenkins_plugins, ctx.obj) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @print_only @config_name @job_name @instance_name @throughput @click.pass_context def jenkins_job_builder(ctx, io_dir, print_only, config_name, job_name, instance_name): run_integration(reconcile.jenkins_job_builder, ctx.obj, io_dir, print_only, config_name, job_name, instance_name) @integration.command() @click.pass_context def jenkins_job_builds_cleaner(ctx): run_integration(reconcile.jenkins_job_builds_cleaner, ctx.obj) @integration.command() @click.pass_context def jenkins_job_cleaner(ctx): run_integration(reconcile.jenkins_job_cleaner, ctx.obj) @integration.command() @click.pass_context def jenkins_webhooks(ctx): run_integration(reconcile.jenkins_webhooks, ctx.obj) @integration.command() @click.pass_context def jenkins_webhooks_cleaner(ctx): run_integration(reconcile.jenkins_webhooks_cleaner, ctx.obj) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @click.pass_context def jira_watcher(ctx): run_integration(reconcile.jira_watcher, ctx.obj) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @click.pass_context def unleash_watcher(ctx): run_integration(reconcile.unleash_watcher, ctx.obj) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @threaded() @internal() @use_jump_host() @click.pass_context def openshift_upgrade_watcher(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_upgrade_watcher, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @click.pass_context def slack_usergroups(ctx): run_integration(reconcile.slack_usergroups, ctx.obj) @integration.command() @click.pass_context def slack_cluster_usergroups(ctx): run_integration(reconcile.slack_cluster_usergroups, ctx.obj) @integration.command() @click.pass_context def gitlab_integrations(ctx): run_integration(reconcile.gitlab_integrations, ctx.obj) @integration.command() @threaded() @click.pass_context def gitlab_permissions(ctx, thread_pool_size): run_integration(reconcile.gitlab_permissions, ctx.obj, thread_pool_size) @integration.command() @click.option('--wait-for-pipeline/--no-wait-for-pipeline', default=False, help='wait for pending/running pipelines before acting.') @click.pass_context def gitlab_housekeeping(ctx, wait_for_pipeline): run_integration(reconcile.gitlab_housekeeping, ctx.obj, wait_for_pipeline) @integration.command() @environ(['gitlab_pr_submitter_queue_url']) @click.argument('gitlab-project-id') @click.pass_context def gitlab_mr_sqs_consumer(ctx, gitlab_project_id): run_integration(reconcile.gitlab_mr_sqs_consumer, ctx.obj, gitlab_project_id) @integration.command() @throughput @threaded() @click.pass_context def aws_garbage_collector(ctx, thread_pool_size, io_dir): run_integration(reconcile.aws_garbage_collector, ctx.obj, thread_pool_size, io_dir) @integration.command() @threaded() @account_name @click.pass_context def aws_iam_keys(ctx, thread_pool_size, account_name): run_integration(reconcile.aws_iam_keys, ctx.obj, thread_pool_size, account_name=account_name) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @click.pass_context def aws_iam_password_reset(ctx): run_integration(reconcile.aws_iam_password_reset, ctx.obj) @integration.command() @vault_output_path @click.pass_context def aws_ecr_image_pull_secrets(ctx, vault_output_path): run_integration(reconcile.aws_ecr_image_pull_secrets, ctx.obj, vault_output_path) @integration.command() @environ(['gitlab_pr_submitter_queue_url']) @gitlab_project_id @threaded() @click.pass_context def aws_support_cases_sos(ctx, gitlab_project_id, thread_pool_size): run_integration(reconcile.aws_support_cases_sos, ctx.obj, gitlab_project_id, thread_pool_size) @integration.command() @threaded(default=20) @binary(['oc', 'ssh', 'amtool']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @cluster_name @namespace_name @click.pass_context def openshift_resources(ctx, thread_pool_size, internal, use_jump_host, cluster_name, namespace_name): run_integration(reconcile.openshift_resources, ctx.obj, thread_pool_size, internal, use_jump_host, cluster_name=cluster_name, namespace_name=namespace_name) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @environ(['gitlab_pr_submitter_queue_url']) @gitlab_project_id @threaded(default=20) @throughput @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @click.option('--saas-file-name', default=None, help='saas-file to act on.') @click.option('--env-name', default=None, help='environment to deploy to.') @click.pass_context def openshift_saas_deploy(ctx, thread_pool_size, io_dir, saas_file_name, env_name, gitlab_project_id): run_integration(reconcile.openshift_saas_deploy, ctx.obj, thread_pool_size, io_dir, saas_file_name, env_name, gitlab_project_id) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @environ(['gitlab_pr_submitter_queue_url']) @gitlab_project_id @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @throughput @click.pass_context def openshift_saas_deploy_wrapper(ctx, thread_pool_size, io_dir, gitlab_project_id): run_integration(reconcile.openshift_saas_deploy_wrapper, ctx.obj, thread_pool_size, io_dir, gitlab_project_id) @integration.command() @click.pass_context def saas_file_validator(ctx): run_integration(reconcile.saas_file_validator, ctx.obj) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_saas_deploy_trigger_moving_commits(ctx, thread_pool_size, internal, use_jump_host): run_integration( reconcile.openshift_saas_deploy_trigger_moving_commits, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_saas_deploy_trigger_upstream_jobs(ctx, thread_pool_size, internal, use_jump_host): run_integration( reconcile.openshift_saas_deploy_trigger_upstream_jobs, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_saas_deploy_trigger_configs(ctx, thread_pool_size, internal, use_jump_host): run_integration( reconcile.openshift_saas_deploy_trigger_configs, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_saas_deploy_trigger_cleaner(ctx, thread_pool_size, internal, use_jump_host): run_integration( reconcile.openshift_saas_deploy_trigger_cleaner, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @internal() @use_jump_host() @saas_file_name @click.pass_context def openshift_tekton_resources(ctx, thread_pool_size, internal, use_jump_host, saas_file_name): run_integration(reconcile.openshift_tekton_resources, ctx.obj, thread_pool_size, internal, use_jump_host, saas_file_name) @integration.command() @throughput @click.argument('gitlab-project-id') @click.argument('gitlab-merge-request-id') @click.option('--compare/--no-compare', default=True, help='compare between current and desired state.') @click.pass_context def saas_file_owners(ctx, gitlab_project_id, gitlab_merge_request_id, io_dir, compare): run_integration(reconcile.saas_file_owners, ctx.obj, gitlab_project_id, gitlab_merge_request_id, io_dir, compare) @integration.command() @click.argument('gitlab-project-id') @click.argument('gitlab-merge-request-id') @click.pass_context def gitlab_ci_skipper(ctx, gitlab_project_id, gitlab_merge_request_id): run_integration(reconcile.gitlab_ci_skipper, ctx.obj, gitlab_project_id, gitlab_merge_request_id) @integration.command() @click.argument('gitlab-project-id') @click.argument('gitlab-merge-request-id') @click.pass_context def gitlab_labeler(ctx, gitlab_project_id, gitlab_merge_request_id): run_integration(reconcile.gitlab_labeler, ctx.obj, gitlab_project_id, gitlab_merge_request_id) @integration.command() @threaded() @environ(['APP_INTERFACE_STATE_BUCKET', 'APP_INTERFACE_STATE_BUCKET_ACCOUNT']) @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_namespace_labels(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_namespace_labels, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_namespaces(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_namespaces, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_network_policies(ctx, thread_pool_size, internal, use_jump_host): run_integration(reconcile.openshift_network_policies, ctx.obj, thread_pool_size, internal, use_jump_host) @integration.command() @threaded() @take_over() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_limitranges(ctx, thread_pool_size, internal, use_jump_host, take_over): run_integration(reconcile.openshift_limitranges, ctx.obj, thread_pool_size, internal, use_jump_host, take_over) @integration.command() @threaded() @take_over() @binary(['oc', 'ssh']) @binary_version('oc', ['version', '--client'], OC_VERSION_REGEX, OC_VERSION) @internal() @use_jump_host() @click.pass_context def openshift_resourcequotas(ctx, thread_pool_size, internal, use_jump_host, take_over): run_integration(reconcile.openshift_resourcequotas, ctx.obj, thread_pool_size,
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ multiPlot version 1.2 cleans up to make Pi and Windows use easier multiPlot version 1.1 adds sunrise sunset times for location of lat long in first file requires suntime library https://github.com/SatAgro/suntime multiPlot version v1.0 plots up to 10 PSWS "rawdata" files and average value modified from WWV_plt2.py @authors dkazdan jgibbons expects a homepath directory with raw files in subdirs, leaves plot in Mplot directory plots files from multiple subdir to compare node results plot title from first file windows version hardcoded homepath directory location for Pi comment out windows homepath and uncomment Pi lines uses WWV_utility2.py <NAME>, KD8CGH, 7/29/2021 create text file "plotfiles.txt" in homepath directory keyword ('Doppler' or 'Power') subdir/filename1 subdir/filename2 ... if found 'Doppler' will plot Doppler shifts, else will plot Power loads file names in list plots first file and create axis and title info plots rest in loop as curves on first plot calculates average and plots leaves plotfile in Mplot directory uses WWV_utility2.py 20 February 2020 WWV utility file Routines and classes used in WWV file management and graphing <NAME>, AD8Y <NAME>, N8OBJ - mods to plot header 2/3/20 """ #import os # uncomment for pi from os import path import sys import csv #import shutil # uncomment for pi #from datetime import date, timedelta # uncomment for pi import numpy as np import matplotlib.pyplot as plt from scipy.signal import filtfilt, butter import datetime from suntime import Sun, SunTimeException #import subprocess from WWV_utility2 import time_string_to_decimals ''' #uncomment for Pi # ~ points to users home directory - usually /home/pi/ homepath = os.path.expanduser('~') # imbed the trailing / in the home path homepath = homepath + "/PSWS/" #comment out windows homepath ''' homepath = "E:\\Documents\\PSWS\\" # set your windows path, comment out for Pi names = open(homepath+"plotfiles.txt","r") PlotTarget = names.readline() PlotTarget = PlotTarget.strip('\n') PlotAverage = names.readline() if PlotAverage[0:7] == 'Average': doavg=True else: doavg=False Filenames=['a' for a in range (10)] Filedates=['a' for a in range (10)] PrFilenames=['a' for a in range (10)] nfiles = 0 colors=['b','g','r','c','m','y','tab:orange','tab:gray','tab:purple','tab:brown'] while True: temp = names.readline() if len(temp) == 0: break Filenames[nfiles]=temp.strip("\n") Filedates[nfiles]=temp[17:23] nfiles=nfiles + 1 #print(Filenames[0:9]) #print(Filedates[0:9]) print('number of files',nfiles) if nfiles > 10 : print('10 file limit') sys.exit(0) PROCESSDIR = homepath #saved plot directrory PlotDir = homepath + 'Mplot/' ''' read first file ''' PrFilenames=(PROCESSDIR + Filenames[0]) if (path.exists(PrFilenames)): print('File ' + PrFilenames + ' found!\nProcessing...') else: print('File ' + PrFilenames + ' not available.\nExiting disappointed...') sys.exit(0) with open(PrFilenames, 'r') as dataFile: dataReader=csv.reader(dataFile) data = list(dataReader) Header = data.pop(0) #Figure out which header format reading NewHdr = 'Unknown' print('Header to check=',Header) # Check if First header line is of new format example #,2020-05-16T00:00:00Z,N00001,EN91fh,41.3219273, -81.5047731, 284.5,Macedonia Ohio,G1,WWV5 if (Header[0] == "#"): print('New Header String Detected') # Have new header format - pull the data fields out NewHdr = 'New' UTCDTZ = Header[1] print('\nUTCDTZ Original Header from file read = ' + UTCDTZ) UTC_DT = UTCDTZ[:10] # Strip off time and ONLY keep UTC Date print('\nExtracted UTC_DT only = ' + UTC_DT) UTCDTZ=UTCDTZ.replace(':','') # remove the semicolons print('\ncorrected UTCDTZ =', UTCDTZ) node= Header[2] # print('Node =', node) GridSqr = Header[3] # print('GridSqr =', GridSqr) Lat = Header[4] print('Lat =', Lat) Long = Header[5] print('Long =', Long) Elev = Header[6] # print('Elev =', Elev) citystate = Header[7] # print('City State =', citystate) RadioID = Header[8] # print('Radio ID =', RadioID) beacon = Header[9] # print('Beacon =', beacon) if (NewHdr == 'Unknown'): ChkDate = Header[0] # load in first row entry Cent = ChkDate[:2] # check first 2 digits = 20? print( ChkDate, 'Header Yields Century of', Cent) # diag printout if Cent == "20": print('Old Header String Detected') # Have old header format - pull the data fields out #2020-05-15,N8OBJ Macedonia Ohio EN91fh,LB GPSDO,41.3219273, -81.5047731, 284.5 UTCDTZ = Header[0] UTC_DT = UTCDTZ[:10] # Strip off time and ONLY keep UTC Date UTCDTZ=UTCDTZ.replace(':','') # remove the semicolons print('UTCDTZ =', UTCDTZ) #get this stations Node # Lat = Header[3] #print('Lat =', Lat) Long = Header[4] #print('Long =', Long) Elev = Header[5] #print('Elev =', Elev) GridSqr = mh.to_maiden(float(Lat), float(Long)) print('GridSqr =', GridSqr) citystate = Header[1] #print('City State =', citystate) RadioID = 'G1' #print('Radio ID =', RadioID) # beacon = "Unknown" # print('Beacon =', beacon) NewHdr = 'Old' print('Header Decode =',NewHdr) #print('Scanning for UTC header line') if (NewHdr == 'Unknown'): print('Unknown File header Structure - Aborting!') sys.exit(0) print('Ready to start processing records') # Prepare data arrays hours=[[],[],[],[],[],[],[],[],[],[]] Doppler=[[],[],[],[],[],[],[],[],[],[]] Vpk=[[],[],[],[],[],[],[],[],[],[]] Power_dB=[[],[],[],[],[],[],[],[],[],[]] # will be second data set, received power 9I20 filtDoppler=[[],[],[],[],[],[],[],[],[],[]] filtPower=[[],[],[],[],[],[],[],[],[],[]] LateHour=False # flag for loop going past 23:00 hours # eliminate all metadata saved at start of file - Look for UTC (CSV headers) #find first row of data0 FindUTC = 0 recordcnt = 0 freqcalc = 0 calccnt = 0 plot=0 for row in data: if (FindUTC == 0): #print('looking for UTC - row[0] =',row[0]) if (row[0] == 'UTC'): FindUTC = 1 # print('UTC found =', row[0]) else: #print('Processing record') decHours=time_string_to_decimals(row[0]) if (NewHdr != 'New'): if (calccnt < 101): calcnt = calcnt+1 freqcalc = freqcalc + (float(row[1])/100) # if decHours > 23: # LateHour=True # went past 23:00 hours if (not LateHour) or (LateHour and (decHours>23)): # Otherwise past 23:59:59. Omit time past midnight. hours[0].append(decHours) # already in float because of conversion to decimal hours. Doppler[0].append(float(row[2])) # frequency offset from col 2 Vpk[0].append (float(row[3])) # Get Volts peak from col 3 Power_dB[0].append (float(row[4])) # log power from col 4 print('nf ',0,'len hours',len(hours[0])) ############################################################################################### # Find max and min of Power_dB for graph preparation: min_power=np.amin(Power_dB[0]) # will use for graph axis min max_power=np.amax(Power_dB[0]) # will use for graph axis max min_Vpk=np.amin(Vpk[0]) # min Vpk max_Vpk=np.amax(Vpk[0]) # max Vpk min_Doppler=np.amin(Doppler[0]) # min Doppler max_Doppler=np.amax(Doppler[0]) # max Doppler print('\nDoppler min: ', min_Doppler, '; Doppler max: ', max_Doppler) print('Vpk min: ', min_Vpk, '; Vpk max: ', max_Vpk) print('dB min: ', min_power, '; dB max: ', max_power) #sys.exit(0) #%% Create an order 3 lowpass butterworth filter. # This is a digital filter (analog=False) # Filtering at .01 to .004 times the Nyquist rate seems "about right." # The filtering argument (Wn, the second argument to butter()) of.01 # represents filtering at .05 Hz, or 20 second weighted averaging. # That corresponds with the 20 second symmetric averaging window used in the 1 October 2019 # Excel spreadsheet for the Festival of Frequency Measurement data. #FILTERBREAK=.005 #filter breakpoint in Nyquist rates. N. rate here is 1/sec, so this is in Hz. FILTERBREAK=0.005 #filter breakpoint in Nyquist rates. N. rate here is 1/sec, so this is in Hz. FILTERORDER=6 b, a = butter(FILTERORDER, FILTERBREAK, analog=False, btype='low') #print (b, a) #%% # Use the just-created filter coefficients for a noncausal filtering (filtfilt is forward-backward noncausal) #print ('Filter Doppler shift data') filtDoppler[0] = filtfilt(b, a, Doppler[0]) #print ('Filter power data') filtPower[0] = filtfilt(b, a, Power_dB[0]) ################################################################################################## # sunrise sunset times in UTC sun = Sun(float(Lat), float(Long)) print(UTC_DT) SDAY=int(UTC_DT[8:10]) SMON=int(UTC_DT[5:7]) SYEAR=int(UTC_DT[0:4]) sdate = datetime.date(SYEAR, SMON, SDAY) today_sr = sun.get_sunrise_time(sdate) today_ss = sun.get_sunset_time(sdate) #print(today_sr) srh=int(format(today_sr.strftime('%H'))) srm=int(format(today_sr.strftime('%M'))) srx=srh+srm/60 ssh=int(format(today_ss.strftime('%H'))) ssm=int(format(today_ss.strftime('%M'))) ssx=ssh+ssm/60 # set up x-axis with time fig = plt.figure(figsize=(19,10)) # inches x, y with 72 dots per inch ax = fig.add_subplot(111) ax.set_xlabel('UTC Hour') ax.set_xlim(0,24) # UTC day ax.set_xticks([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24], minor=False) # plot first curve if (PlotTarget == 'Doppler'): ax.plot([srx,srx],[-1,1],'y',label='sunrise',linestyle='dashed') ax.plot([ssx,ssx],[-1,1],'b',label='sunset',linestyle='dashed') ax.plot(hours[0], filtDoppler[0], colors[0],label=Filedates[0]) # color k for black ax.set_ylabel('Doppler shift, Hz '+ Filedates[0]) ax.set_ylim([-1.0, 1.0]) # -1 to 1 Hz for Doppler shift plt.axhline(y=0, color="gray", lw=1) # plot a zero freq reference line for 0.000 Hz Doppler shift else: ax.plot([srx,srx],[-90,0],'y',label='sunrise',linestyle='dashed') ax.plot([ssx,ssx],[-90,0],'b',label='sunset',linestyle='dashed') ax.plot(hours[0], filtPower[0], colors[0],label=Filedates[0]) # color k for black ax.set_ylabel('Power, dB '+ Filedates[0]) ax.set_ylim(-90, 0) # add grid lines - RLB plt.grid(axis='both') ''' ###################################################################### read and plot files loop ''' for nf in range(1, nfiles): # splot second curve # read second file, skip header print('process file ',nf, Filenames[nf]) PrFilenames=(PROCESSDIR + Filenames[nf]) with open(PrFilenames, 'r') as dataFile: # read second set dataReader=csv.reader(dataFile) data = list(dataReader) FindUTC = 0 recordcnt = 0 freqcalc = 0 calccnt = 0 for row in data: if (FindUTC == 0): #print('looking for UTC - row[0] =',row[0]) if (row[0] == 'UTC'): FindUTC = 1 # print('UTC found =', row[0]) else: decHours=time_string_to_decimals(row[0]) hours[nf].append(decHours) # already in float because of conversion to decimal hours. Doppler[nf].append(float(row[2])) # frequency offset from col 2 Vpk[nf].append (float(row[3])) # Get Volts peak from col 3 Power_dB[nf].append (float(row[4])) # log power from col 4 # filter file data filtDoppler[nf] = filtfilt(b, a, Doppler[nf]) filtPower[nf] = filtfilt(b, a, Power_dB[nf]) # print('nf ',nf,'hours',len(hours[nf])) # print('filtDoppler',len(filtDoppler[nf])) # print(filtDoppler[nf][0:9]) # print(hours[nf][0:9]) #ax2 = ax1.twinx() if (PlotTarget == 'Doppler'): ax.plot(hours[nf], filtDoppler[nf], colors[nf], label=Filedates[nf]) # color k for black else: ax.plot(hours[nf], filtPower[nf], colors[nf], label=Filedates[nf]) # color k for black ''' ############################################################################# end for read and plot loop, start average ''' # find shortest data set, limit average to that if doavg : al=1000000 ak=0 for k in range(nfiles): templ=len(hours[k]) if templ
<gh_stars>0 # # Autogenerated by Thrift Compiler (0.9.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py:twisted # from thrift.Thrift import TType, TMessageType, TException, TApplicationException from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class ObjectInfo: """ Attributes: - object_id - object_name - object_reference - object_reference_versioned - type_string - save_date - version - saved_by - workspace_id - workspace_name - object_checksum - object_size - object_metadata """ thrift_spec = ( None, # 0 (1, TType.I64, 'object_id', None, None, ), # 1 (2, TType.STRING, 'object_name', None, None, ), # 2 (3, TType.STRING, 'object_reference', None, None, ), # 3 (4, TType.STRING, 'object_reference_versioned', None, None, ), # 4 (5, TType.STRING, 'type_string', None, None, ), # 5 (6, TType.STRING, 'save_date', None, None, ), # 6 (7, TType.I64, 'version', None, None, ), # 7 (8, TType.STRING, 'saved_by', None, None, ), # 8 (9, TType.I64, 'workspace_id', None, None, ), # 9 (10, TType.STRING, 'workspace_name', None, None, ), # 10 (11, TType.STRING, 'object_checksum', None, None, ), # 11 (12, TType.I64, 'object_size', None, None, ), # 12 (13, TType.MAP, 'object_metadata', (TType.STRING,None,TType.STRING,None), None, ), # 13 ) def __init__(self, object_id=None, object_name=None, object_reference=None, object_reference_versioned=None, type_string=None, save_date=None, version=None, saved_by=None, workspace_id=None, workspace_name=None, object_checksum=None, object_size=None, object_metadata=None,): self.object_id = object_id self.object_name = object_name self.object_reference = object_reference self.object_reference_versioned = object_reference_versioned self.type_string = type_string self.save_date = save_date self.version = version self.saved_by = saved_by self.workspace_id = workspace_id self.workspace_name = workspace_name self.object_checksum = object_checksum self.object_size = object_size self.object_metadata = object_metadata def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.object_id = iprot.readI64(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.object_name = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.object_reference = iprot.readString(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.object_reference_versioned = iprot.readString(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.type_string = iprot.readString(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.save_date = iprot.readString(); else: iprot.skip(ftype) elif fid == 7: if ftype == TType.I64: self.version = iprot.readI64(); else: iprot.skip(ftype) elif fid == 8: if ftype == TType.STRING: self.saved_by = iprot.readString(); else: iprot.skip(ftype) elif fid == 9: if ftype == TType.I64: self.workspace_id = iprot.readI64(); else: iprot.skip(ftype) elif fid == 10: if ftype == TType.STRING: self.workspace_name = iprot.readString(); else: iprot.skip(ftype) elif fid == 11: if ftype == TType.STRING: self.object_checksum = iprot.readString(); else: iprot.skip(ftype) elif fid == 12: if ftype == TType.I64: self.object_size = iprot.readI64(); else: iprot.skip(ftype) elif fid == 13: if ftype == TType.MAP: self.object_metadata = {} (_ktype1, _vtype2, _size0 ) = iprot.readMapBegin() for _i4 in xrange(_size0): _key5 = iprot.readString(); _val6 = iprot.readString(); self.object_metadata[_key5] = _val6 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ObjectInfo') if self.object_id is not None: oprot.writeFieldBegin('object_id', TType.I64, 1) oprot.writeI64(self.object_id) oprot.writeFieldEnd() if self.object_name is not None: oprot.writeFieldBegin('object_name', TType.STRING, 2) oprot.writeString(self.object_name) oprot.writeFieldEnd() if self.object_reference is not None: oprot.writeFieldBegin('object_reference', TType.STRING, 3) oprot.writeString(self.object_reference) oprot.writeFieldEnd() if self.object_reference_versioned is not None: oprot.writeFieldBegin('object_reference_versioned', TType.STRING, 4) oprot.writeString(self.object_reference_versioned) oprot.writeFieldEnd() if self.type_string is not None: oprot.writeFieldBegin('type_string', TType.STRING, 5) oprot.writeString(self.type_string) oprot.writeFieldEnd() if self.save_date is not None: oprot.writeFieldBegin('save_date', TType.STRING, 6) oprot.writeString(self.save_date) oprot.writeFieldEnd() if self.version is not None: oprot.writeFieldBegin('version', TType.I64, 7) oprot.writeI64(self.version) oprot.writeFieldEnd() if self.saved_by is not None: oprot.writeFieldBegin('saved_by', TType.STRING, 8) oprot.writeString(self.saved_by) oprot.writeFieldEnd() if self.workspace_id is not None: oprot.writeFieldBegin('workspace_id', TType.I64, 9) oprot.writeI64(self.workspace_id) oprot.writeFieldEnd() if self.workspace_name is not None: oprot.writeFieldBegin('workspace_name', TType.STRING, 10) oprot.writeString(self.workspace_name) oprot.writeFieldEnd() if self.object_checksum is not None: oprot.writeFieldBegin('object_checksum', TType.STRING, 11) oprot.writeString(self.object_checksum) oprot.writeFieldEnd() if self.object_size is not None: oprot.writeFieldBegin('object_size', TType.I64, 12) oprot.writeI64(self.object_size) oprot.writeFieldEnd() if self.object_metadata is not None: oprot.writeFieldBegin('object_metadata', TType.MAP, 13) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.object_metadata)) for kiter7,viter8 in self.object_metadata.items(): oprot.writeString(kiter7) oprot.writeString(viter8) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.object_id) value = (value * 31) ^ hash(self.object_name) value = (value * 31) ^ hash(self.object_reference) value = (value * 31) ^ hash(self.object_reference_versioned) value = (value * 31) ^ hash(self.type_string) value = (value * 31) ^ hash(self.save_date) value = (value * 31) ^ hash(self.version) value = (value * 31) ^ hash(self.saved_by) value = (value * 31) ^ hash(self.workspace_id) value = (value * 31) ^ hash(self.workspace_name) value = (value * 31) ^ hash(self.object_checksum) value = (value * 31) ^ hash(self.object_size) value = (value * 31) ^ hash(self.object_metadata) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ExternalDataUnit: """ Attributes: - resource_name - resource_url - resource_version - resource_release_date - data_url - data_id - description """ thrift_spec = ( None, # 0 (1, TType.STRING, 'resource_name', None, None, ), # 1 (2, TType.STRING, 'resource_url', None, None, ), # 2 (3, TType.STRING, 'resource_version', None, None, ), # 3 (4, TType.STRING, 'resource_release_date', None, None, ), # 4 (5, TType.STRING, 'data_url', None, None, ), # 5 (6, TType.STRING, 'data_id', None, None, ), # 6 (7, TType.STRING, 'description', None, None, ), # 7 ) def __init__(self, resource_name=None, resource_url=None, resource_version=None, resource_release_date=None, data_url=None, data_id=None, description=None,): self.resource_name = resource_name self.resource_url = resource_url self.resource_version = resource_version self.resource_release_date = resource_release_date self.data_url = data_url self.data_id = data_id self.description = description def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.resource_name = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.resource_url = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.resource_version = iprot.readString(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.resource_release_date = iprot.readString(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.data_url = iprot.readString(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.data_id = iprot.readString(); else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.description = iprot.readString(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ExternalDataUnit') if self.resource_name is not None: oprot.writeFieldBegin('resource_name', TType.STRING, 1) oprot.writeString(self.resource_name) oprot.writeFieldEnd() if self.resource_url is not None: oprot.writeFieldBegin('resource_url', TType.STRING, 2) oprot.writeString(self.resource_url) oprot.writeFieldEnd() if self.resource_version is not None: oprot.writeFieldBegin('resource_version', TType.STRING, 3) oprot.writeString(self.resource_version) oprot.writeFieldEnd() if self.resource_release_date is not None: oprot.writeFieldBegin('resource_release_date', TType.STRING, 4) oprot.writeString(self.resource_release_date) oprot.writeFieldEnd() if self.data_url is not None: oprot.writeFieldBegin('data_url', TType.STRING, 5) oprot.writeString(self.data_url) oprot.writeFieldEnd() if self.data_id is not None: oprot.writeFieldBegin('data_id', TType.STRING, 6) oprot.writeString(self.data_id) oprot.writeFieldEnd() if self.description is not None: oprot.writeFieldBegin('description', TType.STRING, 7) oprot.writeString(self.description) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.resource_name) value = (value * 31) ^ hash(self.resource_url) value = (value * 31) ^ hash(self.resource_version) value = (value * 31) ^ hash(self.resource_release_date) value = (value * 31) ^ hash(self.data_url) value = (value * 31) ^ hash(self.data_id) value = (value * 31) ^ hash(self.description) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ObjectProvenanceAction: """ Attributes: - time - service_name - service_version - service_method - method_parameters - script_name - script_version - script_command_line - input_object_references - validated_object_references - intermediate_input_ids - intermediate_output_ids - external_data - description """ thrift_spec = ( None, # 0 (1, TType.STRING, 'time', None, None, ), # 1 (2, TType.STRING, 'service_name', None, None, ), # 2 (3, TType.STRING, 'service_version', None, None, ), # 3 (4, TType.STRING, 'service_method', None, None, ), # 4 (5, TType.LIST, 'method_parameters', (TType.STRING,None), None, ),
except Exception as e: raise UnmarshallError(resp, e.message) def GetAvatars(self, username, headers=None, query_params=None, content_type="application/json"): """ List all avatars for the user It is method for GET /users/{username}/avatar """ uri = self.client.base_url + "/users/" + username + "/avatar" resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: return APIResponse(data=Avatar(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def CreateAvatarFromLink(self, data, username, headers=None, query_params=None, content_type="application/json"): """ Create a new avatar with the specified label from a link It is method for POST /users/{username}/avatar """ uri = self.client.base_url + "/users/" + username + "/avatar" resp = self.client.post(uri, data, headers, query_params, content_type) try: if resp.status_code == 201: return APIResponse(data=Avatar(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def DeleteUserBankAccount(self, username, label, headers=None, query_params=None, content_type="application/json"): """ Delete a BankAccount It is method for DELETE /users/{username}/banks/{label} """ uri = self.client.base_url + "/users/" + username + "/banks/" + label return self.client.delete(uri, None, headers, query_params, content_type) def GetUserBankAccountByLabel( self, username, label, headers=None, query_params=None, content_type="application/json" ): """ Get the details of a bank account It is method for GET /users/{username}/banks/{label} """ uri = self.client.base_url + "/users/" + username + "/banks/" + label resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: return APIResponse(data=BankAccount(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def UpdateUserBankAccount( self, data, username, label, headers=None, query_params=None, content_type="application/json" ): """ Update an existing bankaccount and label. It is method for PUT /users/{username}/banks/{label} """ uri = self.client.base_url + "/users/" + username + "/banks/" + label resp = self.client.put(uri, data, headers, query_params, content_type) try: if resp.status_code == 200: return APIResponse(data=BankAccount(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def GetUserBankAccounts(self, username, headers=None, query_params=None, content_type="application/json"): """ List of the user his bank accounts. It is method for GET /users/{username}/banks """ uri = self.client.base_url + "/users/" + username + "/banks" resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: resps = [] for elem in resp.json(): resps.append(BankAccount(elem)) return APIResponse(data=resps, response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def CreateUserBankAccount(self, data, username, headers=None, query_params=None, content_type="application/json"): """ Create new bank account It is method for POST /users/{username}/banks """ uri = self.client.base_url + "/users/" + username + "/banks" resp = self.client.post(uri, data, headers, query_params, content_type) try: if resp.status_code == 201: return APIResponse(data=BankAccount(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def GetUserContracts(self, username, headers=None, query_params=None, content_type="application/json"): """ Get the contracts where the user is 1 of the parties. Order descending by date. It is method for GET /users/{username}/contracts """ uri = self.client.base_url + "/users/" + username + "/contracts" resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: resps = [] for elem in resp.json(): resps.append(Contract(elem)) return APIResponse(data=resps, response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def CreateUserContract(self, data, username, headers=None, query_params=None, content_type="application/json"): """ Create a new contract. It is method for POST /users/{username}/contracts """ uri = self.client.base_url + "/users/" + username + "/contracts" resp = self.client.post(uri, data, headers, query_params, content_type) try: if resp.status_code == 201: return APIResponse(data=Contract(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def DeleteDigitalAssetAddress( self, label, username, headers=None, query_params=None, content_type="application/json" ): """ Removes an address It is method for DELETE /users/{username}/digitalwallet/{label} """ uri = self.client.base_url + "/users/" + username + "/digitalwallet/" + label return self.client.delete(uri, None, headers, query_params, content_type) def GetDigitalAssetAddressByLabel( self, label, username, headers=None, query_params=None, content_type="application/json" ): """ Get the details of a digital wallet address. It is method for GET /users/{username}/digitalwallet/{label} """ uri = self.client.base_url + "/users/" + username + "/digitalwallet/" + label resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: return APIResponse(data=DigitalAssetAddress(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def UpdateDigitalAssetAddress( self, data, label, username, headers=None, query_params=None, content_type="application/json" ): """ Update the label and/or value of an existing address. It is method for PUT /users/{username}/digitalwallet/{label} """ uri = self.client.base_url + "/users/" + username + "/digitalwallet/" + label return self.client.put(uri, data, headers, query_params, content_type) def GetDigitalWallet(self, username, headers=None, query_params=None, content_type="application/json"): """ List all of the user his digital wallets. It is method for GET /users/{username}/digitalwallet """ uri = self.client.base_url + "/users/" + username + "/digitalwallet" resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: resps = [] for elem in resp.json(): resps.append(DigitalAssetAddress(elem)) return APIResponse(data=resps, response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def RegisterNewDigitalAssetAddress( self, data, username, headers=None, query_params=None, content_type="application/json" ): """ Register a new digital asset address It is method for POST /users/{username}/digitalwallet """ uri = self.client.base_url + "/users/" + username + "/digitalwallet" resp = self.client.post(uri, data, headers, query_params, content_type) try: if resp.status_code == 201: return APIResponse(data=DigitalAssetAddress(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def ValidateEmailAddress( self, data, label, username, headers=None, query_params=None, content_type="application/json" ): """ Sends validation email to email address It is method for POST /users/{username}/emailaddresses/{label}/validate """ uri = self.client.base_url + "/users/" + username + "/emailaddresses/" + label + "/validate" return self.client.post(uri, data, headers, query_params, content_type) def DeleteEmailAddress(self, label, username, headers=None, query_params=None, content_type="application/json"): """ Removes an email address It is method for DELETE /users/{username}/emailaddresses/{label} """ uri = self.client.base_url + "/users/" + username + "/emailaddresses/" + label return self.client.delete(uri, None, headers, query_params, content_type) def UpdateEmailAddress( self, data, label, username, headers=None, query_params=None, content_type="application/json" ): """ Updates the label and/or value of an email address It is method for PUT /users/{username}/emailaddresses/{label} """ uri = self.client.base_url + "/users/" + username + "/emailaddresses/" + label return self.client.put(uri, data, headers, query_params, content_type) def GetEmailAddresses(self, username, headers=None, query_params=None, content_type="application/json"): """ Get a list of the user his email addresses. It is method for GET /users/{username}/emailaddresses """ uri = self.client.base_url + "/users/" + username + "/emailaddresses" resp = self.client.get(uri, None, headers, query_params, content_type) try: if resp.status_code == 200: resps = [] for elem in resp.json(): resps.append(EmailAddress(elem)) return APIResponse(data=resps, response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def RegisterNewEmailAddress(self, data, username, headers=None, query_params=None, content_type="application/json"): """ Register a new email address It is method for POST /users/{username}/emailaddresses """ uri = self.client.base_url + "/users/" + username + "/emailaddresses" resp = self.client.post(uri, data, headers, query_params, content_type) try: if resp.status_code == 201: return APIResponse(data=EmailAddress(resp.json()), response=resp) message = "unknown status code={}".format(resp.status_code) raise UnhandledAPIError(response=resp, code=resp.status_code, message=message) except ValueError as msg: raise UnmarshallError(resp, msg) except UnhandledAPIError as uae: raise uae except Exception as e: raise UnmarshallError(resp, e.message) def DeleteFacebookAccount(self, username, headers=None, query_params=None, content_type="application/json"): """ Delete the associated facebook account
ndarray(shape, self.dtype, offset=offset, strides=strides, buffer=self) def __setitem__(self, key, value): # Get info for view offset, shape, strides = self._index_helper(key) # Is this easy? if not shape: self._data[offset] = value return # Create view to set data to view = ndarray(shape, self.dtype, offset=offset, strides=strides, buffer=self) # Get data to set as a list (because getting slices from ctype # arrays yield lists anyway). The list is our "contiguous array" if isinstance(value, (float, int)): value_list = [value] * view.size elif isinstance(value, (tuple, list)): value_list = value else: if not isinstance(value, ndarray): value = array(value, copy=False) value_list = value._toflatlist() # Check if size match if view.size != len(value_list): raise ValueError('Number of elements in source does not match ' 'number of elements in target.') # Assign data in most efficient way that we can. This code # looks for the largest semi-contiguous block: the block that # we can access as a 1D array with a stepsize. subviews = [view] value_index = 0 count = 0 while subviews: subview = subviews.pop(0) step = _get_step(subview) if step: block = value_list[value_index:value_index+subview.size] s = slice(subview._offset, subview._offset + subview.size * step, step) view._data[s] = block value_index += subview.size count += 1 else: for i in range(subview.shape[0]): subviews.append(subview[i]) assert value_index == len(value_list) def __float__(self): if self.size == 1: return float(self.data[self._offset]) else: raise TypeError('Only length-1 arrays can be converted to scalar') def __int__(self): if self.size == 1: return int(self.data[self._offset]) else: raise TypeError('Only length-1 arrays can be converted to scalar') def __repr__(self): # If more than 100 elements, show short repr if self.size > 100: shapestr = 'x'.join([str(i) for i in self.shape]) return '<ndarray %s %s at 0x%x>' % (shapestr, self.dtype, id(self)) # Otherwise, try to show in nice way def _repr_r(s, axis, offset): axisindent = min(2, max(0, (self.ndim - axis - 1))) if axis < len(self.shape): s += '[' for k_index, k in enumerate(xrange(self.shape[axis])): if k_index > 0: s += ('\n ' + ' ' * axis) * axisindent offset_ = offset + k * self._strides[axis] // self.itemsize s = _repr_r(s, axis+1, offset_) if k_index < self.shape[axis] - 1: s += ', ' s += ']' else: r = repr(self.data[offset]) if '.' in r: r = ' ' + r if r.endswith('.0'): r = r[:-1] s += r return s s = _repr_r('', 0, self._offset) if self.dtype != 'float64' and self.dtype != 'int32': return "array(" + s + ", dtype='%s')" % self.dtype else: return "array(" + s + ")" def __eq__(self, other): if other.__module__.split('.')[0] == 'numpy': return other == self else: out = empty(self.shape, 'bool') out[:] = [i1==i2 for (i1, i2) in zip(self.flat, other.flat)] return out ## Private helper functions def _index_helper(self, key): # Indexing spec is located at: # http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html # Promote to tuple. if not isinstance(key, tuple): key = (key,) axis = 0 shape = [] strides = [] offset = self._offset for k in key: axissize = self._shape[axis] if isinstance(k, int): if k >= axissize: raise IndexError('index %i is out of bounds for axis %i ' 'with size %s' % (k, axis, axissize)) offset += k * self._strides[axis] // self.itemsize axis += 1 elif isinstance(k, slice): start, stop, step = k.indices(self.shape[axis]) shape.append(_ceildiv(stop - start, step)) strides.append(step * self._strides[axis]) offset += start * self._strides[axis] // self.itemsize axis += 1 elif k is Ellipsis: raise TypeError("ellipsis are not supported.") elif k is None: shape.append(1) stride = 1 for s in self._strides[axis:]: stride *= s strides.append(stride) else: raise TypeError("key elements must be instaces of int or slice.") shape.extend(self.shape[axis:]) strides.extend(self._strides[axis:]) return offset, tuple(shape), tuple(strides) def _toflatlist(self): value_list = [] subviews = [self] count = 0 while subviews: subview = subviews.pop(0) step = _get_step(subview) if step: s = slice(subview._offset, subview._offset + subview.size * step, step) value_list += self._data[s] count += 1 else: for i in range(subview.shape[0]): subviews.append(subview[i]) return value_list ## Properties @property def ndim(self): return len(self._shape) @property def size(self): return _size_for_shape(self._shape) @property def nbytes(self): return _size_for_shape(self._shape) * self.itemsize def _get_shape(self): return self._shape def _set_shape(self, newshape): if newshape == self.shape: return if self.size != _size_for_shape(newshape): raise ValueError('Total size of new array must be unchanged') if _get_step(self) == 1: # Contiguous, hooray! self._shape = tuple(newshape) self._strides = _strides_for_shape(self._shape, self.itemsize) return # Else, try harder ... This code supports adding /removing # singleton dimensions. Although it may sometimes be possible # to split a dimension in two if the contiguous blocks allow # this, we don't bother with such complex cases for now. # Squeeze shape / strides N = self.ndim shape = [self.shape[i] for i in range(N) if self.shape[i] > 1] strides = [self.strides[i] for i in range(N) if self.shape[i] > 1] # Check if squeezed shapes match newshape_ = [newshape[i] for i in range(len(newshape)) if newshape[i] > 1] if newshape_ != shape: raise AttributeError('incompatible shape for non-contiguous array') # Modify to make this data work in loop strides.append(strides[-1]) shape.append(1) # Form new strides i = -1 newstrides = [] try: for s in reversed(newshape): if s == 1: newstrides.append(strides[i] * shape[i]) else: i -= 1 newstrides.append(strides[i]) except IndexError: # Fail raise AttributeError('incompatible shape for non-contiguous array') else: # Success newstrides.reverse() self._shape = tuple(newshape) self._strides = tuple(newstrides) shape = property(_get_shape, _set_shape) # Python 2.5 compat (e.g. Jython) @property def strides(self): return self._strides @property def dtype(self): return self._dtype @property def itemsize(self): return self._itemsize @property def base(self): return self._base @property def data(self): return self._data @property def flat(self): subviews = [self] count = 0 while subviews: subview = subviews.pop(0) step = _get_step(subview) if step: s = slice(subview._offset, subview._offset + subview.size * step, step) for i in self._data[s]: yield i else: for i in range(subview.shape[0]): subviews.append(subview[i]) @property def T(self): if self.ndim < 2: return self else: return self.transpose() @property def flags(self): c_cont = _get_step(self) == 1 return dict(C_CONTIGUOUS=c_cont, F_CONTIGUOUS=(c_cont and self.ndim < 2), OWNDATA=(self._base is None), WRITEABLE=True, # todo: fix this ALIGNED=c_cont, # todo: different from contiguous? UPDATEIFCOPY=False, # We don't support this feature ) ## Methods - managemenet def fill(self, value): assert isinstance(value, (int, float)) self[:] = value def clip(self, a_min, a_max, out=None): if out is None: out = empty(self.shape, self.dtype) L = self._toflatlist() L = [min(a_max, max(a_min, x)) for x in L] out[:] = L return out def copy(self): out = empty(self.shape, self.dtype) out[:] = self return out def flatten(self): out = empty((self.size,), self.dtype) out[:] = self return out def ravel(self): return self.reshape((self.size, )) def repeat(self, repeats, axis=None): if axis: raise (TypeError, "axis argument is not supported") out = empty((self.size * repeats,), self.dtype) for i in range(repeats): out[i*self.size:(i+1)*self.size] = self return out def reshape(self, newshape): out = self.view() try: out.shape = newshape except AttributeError: out = self.copy() out.shape = newshape return out def transpose(self): # Numpy returns a view, but we cannot do that since we do not # support Fortran ordering ndim = self.ndim if ndim < 2: return self.view() shape = self.shape[::-1] out = empty(shape, self.dtype) # if ndim == 2: for i in xrange(self.shape[0]): out[:, i] = self[i, :] elif ndim == 3: for i in xrange(self.shape[0]): for j in xrange(self.shape[1]): out[:, j, i] = self[i, j, :] else: raise ValueError('Tinynumpy supports transpose up to ndim=3') return out def astype(self, dtype): out = empty(self.shape, dtype) out[:] = self def view(self, dtype=None, type=None): if dtype is None: dtype = self.dtype if dtype == self.dtype: return ndarray(self.shape, dtype, buffer=self, offset=self._offset, strides=self.strides) elif self.ndim == 1: itemsize = int(_convert_dtype(dtype, 'short')[-1]) size = self.nbytes // itemsize offsetinbytes = self._offset * self.itemsize offset = offsetinbytes // itemsize return ndarray((size, ), dtype, buffer=self, offset=offset) else: raise ValueError('new type not compatible with array.') ## Methods -
return _wrap(min(self, key=func)) def find(self, func): """ Finds the first element of the sequence that satisfies func. If no such element exists, then return None. >>> seq(["abc", "ab", "bc"]).find(lambda x: len(x) == 2) 'ab' :param func: function to find with :return: first element to satisfy func or None """ for element in self: if func(element): return element return None def flatten(self): """ Flattens a sequence of sequences to a single sequence of elements. >>> seq([[1, 2], [3, 4], [5, 6]]) [1, 2, 3, 4, 5, 6] :return: flattened sequence """ return self._transform(transformations.flatten_t()) def flat_map(self, func): """ Applies func to each element of the sequence, which themselves should be sequences. Then appends each element of each sequence to a final result >>> seq([[1, 2], [3, 4], [5, 6]]).flat_map(lambda x: x) [1, 2, 3, 4, 5, 6] >>> seq(["a", "bc", "def"]).flat_map(list) ['a', 'b', 'c', 'd', 'e', 'f'] >>> seq([[1], [2], [3]]).flat_map(lambda x: x * 2) [1, 1, 2, 2, 3, 3] :param func: function to apply to each sequence in the sequence :return: application of func to elements followed by flattening """ return self._transform(transformations.flat_map_t(func)) def group_by(self, func): """ Group elements into a list of (Key, Value) tuples where func creates the key and maps to values matching that key. >>> seq(["abc", "ab", "z", "f", "qw"]).group_by(len) [(1, ['z', 'f']), (2, ['ab', 'qw']), (3, ['abc'])] :param func: group by result of this function :return: grouped sequence """ return self._transform(transformations.group_by_t(func)) def group_by_key(self): """ Group sequence of (Key, Value) elements by Key. >>> seq([('a', 1), ('b', 2), ('b', 3), ('b', 4), ('c', 3), ('c', 0)]).group_by_key() [('a', [1]), ('c', [3, 0]), ('b', [2, 3, 4])] :return: sequence grouped by key """ return self._transform(transformations.group_by_key_t()) def reduce_by_key(self, func): """ Reduces a sequence of (Key, Value) using func on each sequence of values. >>> seq([('a', 1), ('b', 2), ('b', 3), ('b', 4), ('c', 3), ('c', 0)]) \ .reduce_by_key(lambda x, y: x + y) [('a', 1), ('c', 3), ('b', 9)] :param func: reduce each list of values using two parameter, associative func :return: Sequence of tuples where the value is reduced with func """ return self._transform(transformations.reduce_by_key_t(func)) def count_by_key(self): """ Reduces a sequence of (Key, Value) by counting each key >>> seq([('a', 1), ('b', 2), ('b', 3), ('b', 4), ('c', 3), ('c', 0)]).count_by_key() [('a', 1), ('b', 3), ('c', 2)] :return: Sequence of tuples where value is the count of each key """ return self._transform(transformations.count_by_key_t()) def count_by_value(self): """ Reduces a sequence of items by counting each unique item >>> seq(['a', 'a', 'a', 'b', 'b', 'c', 'd']).count_by_value() [('a', 3), ('b', 2), ('c', 1), ('d', 1)] :return: Sequence of tuples where value is the count of each key """ return self._transform(transformations.count_by_value_t()) def reduce(self, func, *initial): """ Reduce sequence of elements using func. API mirrors functools.reduce >>> seq([1, 2, 3]).reduce(lambda x, y: x + y) 6 :param func: two parameter, associative reduce function :param initial: single optional argument acting as initial value :return: reduced value using func """ if len(initial) == 0: return _wrap(reduce(func, self)) elif len(initial) == 1: return _wrap(reduce(func, self, initial[0])) else: raise ValueError('reduce takes exactly one optional parameter for initial value') def accumulate(self, func=add): """ Accumulate sequence of elements using func. API mirrors itertools.accumulate >>> seq([1, 2, 3]).accumulate(lambda x, y: x + y) [1, 3, 6] >>> seq(['a', 'b', 'c']).accumulate() ['a', 'ab', 'abc'] :param func: two parameter, associative accumulate function :return: accumulated values using func in sequence """ return self._transform(transformations.accumulate_t(func)) def make_string(self, separator): """ Concatenate the elements of the sequence into a string separated by separator. >>> seq([1, 2, 3]).make_string("@") '1@2@3' :param separator: string separating elements in string :return: concatenated string separated by separator """ return separator.join(str(e) for e in self) def product(self, projection=None): """ Takes product of elements in sequence. >>> seq([1, 2, 3, 4]).product() 24 >>> seq([]).product() 1 >>> seq([(1, 2), (1, 3), (1, 4)]).product(lambda x: x[0]) 1 :param projection: function to project on the sequence before taking the product :return: product of elements in sequence """ if self.empty(): if projection: return projection(1) else: return 1 if self.size() == 1: if projection: return projection(self.first()) else: return self.first() if projection: return self.map(projection).reduce(mul) else: return self.reduce(mul) def sum(self, projection=None): """ Takes sum of elements in sequence. >>> seq([1, 2, 3, 4]).sum() 10 >>> seq([(1, 2), (1, 3), (1, 4)]).sum(lambda x: x[0]) 3 :param projection: function to project on the sequence before taking the sum :return: sum of elements in sequence """ if projection: return sum(self.map(projection)) else: return sum(self) def average(self, projection=None): """ Takes the average of elements in the sequence >>> seq([1, 2]).average() 1.5 >>> seq([('a', 1), ('b', 2)]).average(lambda x: x[1]) :param projection: function to project on the sequence before taking the average :return: average of elements in the sequence """ length = self.size() if projection: return sum(self.map(projection)) / length else: return sum(self) / length def aggregate(self, *args): """ Aggregates the sequence by specified arguments. Its behavior varies depending on if one, two, or three arguments are passed. Assuming the type of the sequence is A: One Argument: argument specifies a function of the type f(current: B, next: A => result: B. current represents results computed so far, and next is the next element to aggregate into current in order to return result. Two Argument: the first argument is the seed value for the aggregation. The second argument is the same as for the one argument case. Three Argument: the first two arguments are the same as for one and two argument calls. The additional third parameter is a function applied to the result of the aggregation before returning the value. :param args: options for how to execute the aggregation :return: aggregated value """ seed = None result_lambda = identity if len(args) == 1: func = args[0] elif len(args) == 2: seed = args[0] func = args[1] elif len(args) == 3: seed = args[0] func = args[1] result_lambda = args[2] else: raise ValueError('aggregate takes 1-3 arguments, {0} were given'.format(len(args))) if len(args) == 1: return result_lambda(self.drop(1).fold_left(self.first(), func)) else: return result_lambda(self.fold_left(seed, func)) def fold_left(self, zero_value, func): """ Assuming that the sequence elements are of type A, folds from left to right starting with the seed value given by zero_value (of type A) using a function of type func(current: B, next: A) => B. current represents the folded value so far and next is the next element from the sequence to fold into current. >>> seq('a', 'b', 'c').fold_left(['start'], lambda current, next: current + [next])) ['start', 'a', 'b', 'c'] :param zero_value: zero value to reduce into :param func: Two parameter function as described by function docs :return: value from folding values with func into zero_value from left to right. """ result = zero_value for element in self: result = func(result, element) return _wrap(result) def fold_right(self, zero_value, func): """ Assuming that the sequence elements are of type A, folds from right to left starting with the seed value given by zero_value (of type A) using a function of type func(next: A, current: B) => B. current represents the folded value so far and next is the next element from the sequence to fold into current. >>> seq('a', 'b', 'c').fold_left(['start'], lambda next, current: current + [next]) ['start', 'c', 'b', a'] :param zero_value: zero value to reduce into :param func: Two parameter function as described by function docs :return: value from folding values with func into zero_value from right to left """ result = zero_value for element in self.reverse(): result = func(element, result) return _wrap(result) def zip(self, sequence): """ Zips the stored sequence with the given sequence. >>> seq([1, 2, 3]).zip([4, 5, 6]) [(1, 4), (2, 5), (3, 6)] :param sequence: second sequence to zip :return: stored sequence zipped with given sequence """ return self._transform(transformations.zip_t(sequence)) def zip_with_index(self, start=0): """ Zips the sequence to its index, with the index being the second element of each tuple. >>> seq(['a', 'b', 'c']).zip_with_index() [('a',
<gh_stars>1-10 import streamlit as st import numpy as np import pandas as pd from gspread_dataframe import get_as_dataframe, set_with_dataframe import gspread from oauth2client.service_account import ServiceAccountCredentials scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] credentials = ServiceAccountCredentials.from_json_keyfile_name( 'covidoff-ecef33b9fe0b.json', scope) gc = gspread.authorize(credentials) st.write("# **CovidOff View Data Portal**") st.write("This is the Portal for all the Data Resources. These resources are filled by general people. Verification at utmostlevel is not guaranteed. Select your required service from the below dropdown") selected = st.selectbox( "Select Service", ("Choose from the DropDown", "View Plasma Requirements", "View Plasma Donors", "View Oxygen Requirements", "View Oxygen Suppliers/Availability", "Search for Bed Availability", "Search for Remdesivr Distributors", "Other Resources")) if selected == "Choose from the DropDown": pass if selected == "View Plasma Requirements": sht2 = gc.open_by_url( 'https://docs.google.com/spreadsheets/d/1kcj_u4j9269CcdQGXuLzdU-WzrtWdR5vdOEwN7-46JM/edit#gid=0') worksheet = sht2.sheet1 df2 = get_as_dataframe(worksheet) df2 = df2[df2['phone'].notna()] df2.rename(columns={'timestamp': 'Timestamp', 'name': 'Name / Contact Person', 'phone': 'Contact Number', 'bloodGroup': 'Blood Group', 'age': 'Age', 'gender': 'Gender', 'Cities': 'City / Area / Location', 'Category': 'State / Territory', 'selectDistrict': 'District', 'email': 'Email Id'}, inplace=True) df2.fillna(0) st.write("## **Showing Plasma Requirements**") page_name = ['Choose Option', 'View Full Data', 'View Filtered Data'] page = st.radio('Choose your preferred type', page_name) if page == "Choose Option": st.info("👆 You have to choose any of the above two Options.") elif page == "View Full Data": df2 = df2.iloc[::-1] st.write(df2.iloc[:, 1:11]) elif page == "View Filtered Data": st.write( "Search the Plasma Requirements by District or Blood Group or Combined") option_t = st.checkbox('Blood Group') option_u = st.checkbox('District') if(option_t == True and option_u == True): district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS SOUTH", "Alipurduar", "BANKURA", "BIRBHUM", "COOCHBEHAR", "DARJEELING", "DINAJPUR DAKSHIN", "DINAJPUR UTTAR", "HOOGHLY", "HOWRAH", "JALPAIGURI", "Jhargram", "KALIMPONG", "KOLKATA", "MALDAH", "MEDINIPUR EAST", "MEDINIPUR WEST", "MURSHIDABAD" "NADIA", "PASCHIM BARDHAMAN", "PURBA BARDHAMAN", "PURULIA", "Not Listed" )) if district == "Not Listed": district = st.text_input('Enter Your District Name') bg = st.selectbox("Enter Blood Group", ("Select Blood Group", "A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-")) list = [] for i in range(sheet.max_row+1, 2, -1): if (df2.at[df2.index[i], 'Blood Group'] == bg) and (df2.at[df2.index[i], 'District'] == district): list.append(i-2) st.write(df2.iloc[list, 1:11]) list = [] if(option_t == True and option_u == False): bg = st.selectbox("Enter Blood Group", ("Select Blood Group", "A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-")) list = [] for i in range(len(df2.index)-1, -1, -1): if(df2.at[df2.index[i], 'Blood Group'] == bg): list.append(i) st.write(df2.iloc[list, 1:11]) list = [] if(option_t == False and option_u == True): district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS SOUTH", "Alipurduar", "BANKURA", "BIRBHUM", "COOCHBEHAR", "DARJEELING", "DINAJPUR DAKSHIN", "DINAJPUR UTTAR", "HOOGHLY", "HOWRAH", "JALPAIGURI", "Jhargram", "KALIMPONG", "KOLKATA", "MALDAH", "MEDINIPUR EAST", "MEDINIPUR WEST", "MURSHIDABAD" "NADIA", "PASCHIM BARDHAMAN", "PURBA BARDHAMAN", "PURULIA", "Not Listed" )) if district == "Not Listed": district = st.text_input('Enter Your District Name') list = [] for i in range(len(df2.index)-1, -1, -1): if(df2.at[df2.index[i], 'District'] == district): list.append(i) st.write(df2.iloc[list, 1:11]) list = [] if selected == "View Plasma Donors": sht2 = gc.open_by_url( 'https://docs.google.com/spreadsheets/d/1RR3Iu0fyzzdGkhUsOReijAIRaL1q0f6a20sHeqvQgmY/edit#gid=0') worksheet = sht2.sheet1 df2 = get_as_dataframe(worksheet) df2 = df2[df2['phone'].notna()] df2.rename(columns={'timestamp': 'Timestamp', 'name': 'Name / Contact Person', 'phone': 'Contact Number', 'bloodGroup': 'Blood Group', 'age': 'Age', 'gender': 'Gender', 'Cities': 'City / Area / Location', 'Category2': 'State / Territory', 'selectDistrict': 'District', 'email': 'Email Id'}, inplace=True) df2.fillna(0) st.write("## **Showing Plasma Donors**") page_name = ['Choose Option', 'View Full Data', 'View Filtered Data'] page = st.radio('Choose your preferred type', page_name) if page == "Choose Option": st.info("👆 You have to choose any of the above two Options.") elif page == "View Full Data": df2 = df2.iloc[::-1] st.write(df2.iloc[:, 1:10]) elif page == "View Filtered Data": st.write( "Search the Plasma Donors by District or Blood Group or Combined") option_t = st.checkbox('Blood Group') option_u = st.checkbox('District') if(option_t == True and option_u == True): district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS SOUTH", "Alipurduar", "BANKURA", "BIRBHUM", "COOCHBEHAR", "DARJEELING", "DINAJPUR DAKSHIN", "DINAJPUR UTTAR", "HOOGHLY", "HOWRAH", "JALPAIGURI", "Jhargram", "KALIMPONG", "KOLKATA", "MALDAH", "MEDINIPUR EAST", "MEDINIPUR WEST", "MURSHIDABAD" "NADIA", "PASCHIM BARDHAMAN", "PURBA BARDHAMAN", "PURULIA", "Not Listed" )) if district == "Not Listed": district = st.text_input('Enter Your District Name') bg = st.selectbox("Enter Blood Group", ("Select Blood Group", "A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-")) list = [] for i in range(sheet.max_row+1, 2, -1): if (df2.at[df2.index[i], 'Blood Group'] == bg) and (df2.at[df2.index[i], 'District'] == district): list.append(i-2) st.write(df2.iloc[list, 1:11]) list = [] if(option_t == True and option_u == False): bg = st.selectbox("Enter Blood Group", ("Select Blood Group", "A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-")) list = [] for i in range(len(df2.index)-1, -1, -1): if(df2.at[df2.index[i], 'Blood Group'] == bg): list.append(i) st.write(df2.iloc[list, 1:11]) list = [] if(option_t == False and option_u == True): district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS SOUTH", "Alipurduar", "BANKURA", "BIRBHUM", "COOCHBEHAR", "DARJEELING", "DINAJPUR DAKSHIN", "DINAJPUR UTTAR", "HOOGHLY", "HOWRAH", "JALPAIGURI", "Jhargram", "KALIMPONG", "KOLKATA", "MALDAH", "MEDINIPUR EAST", "MEDINIPUR WEST", "MURSHIDABAD" "NADIA", "PASCHIM BARDHAMAN", "PURBA BARDHAMAN", "PURULIA", "Not Listed" )) if district == "Not Listed": district = st.text_input('Enter Your District Name') list = [] for i in range(len(df2.index)-1, -1, -1): if(df2.at[df2.index[i], 'District'] == district): list.append(i) st.write(df2.iloc[list, 1:11]) list = [] if selected == "View Oxygen Requirements": sht2 = gc.open_by_url( 'https://docs.google.com/spreadsheets/d/11E1NjRZxsP7jS6MPZ_JnEnKFopwwTFyJOZRk-lKZEoc/edit#gid=0') worksheet = sht2.sheet1 df2 = get_as_dataframe(worksheet) df2 = df2[df2['phone'].notna()] df2.rename(columns={'timestamp': 'Timestamp', 'name': 'Name / Contact Person', 'phone': 'Contact Number', 'Cities': 'City / Area / Location', 'Category': 'State / Territory', 'selectDistrict': 'District', 'email': 'Email Id'}, inplace=True) df2.fillna(0) st.write("## **Showing Oxygen Requirements**") page_name = ['Choose Option', 'View Full Data', 'View Filtered Data'] page = st.radio('Choose your preferred type', page_name) if page == "Choose Option": st.info("👆 You have to choose any of the above two Options.") elif page == "View Full Data": df2 = df2.iloc[::-1] st.write(df2.iloc[:, 1:7]) elif page == "View Filtered Data": district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS SOUTH", "Alipurduar", "BANKURA", "BIRBHUM", "COOCHBEHAR", "DARJEELING", "DINAJPUR DAKSHIN", "DINAJPUR UTTAR", "HOOGHLY", "HOWRAH", "JALPAIGURI", "Jhargram", "KALIMPONG", "KOLKATA", "MALDAH", "MEDINIPUR EAST", "MEDINIPUR WEST", "MURSHIDABAD" "NADIA", "<NAME>", "<NAME>", "PURULIA", "Not Listed" )) if district == "Not Listed": district = st.text_input('Enter Your District Name') list = [] for i in range(len(df2.index)-1, -1, -1): if(df2.at[df2.index[i], 'District'] == district): list.append(i) st.write(df2.iloc[list, 1:7]) list = [] if selected == "View Oxygen Suppliers/Availability": sht2 = gc.open_by_url( 'https://docs.google.com/spreadsheets/d/1CP-vN1dU_rfn9BvcntAT15aFKIybyXGvZuXFMsFeGBk/edit#gid=0') worksheet = sht2.sheet1 df2 = get_as_dataframe(worksheet) df2 = df2[df2['phone'].notna()] df2.rename(columns={'timestamp': 'Timestamp', 'name': 'Name / Contact Person', 'phone': 'Contact Number', 'Cities': 'City / Area / Location', 'Category2': 'State / Territory', 'selectDistrict': 'District', 'email': 'Email Id'}, inplace=True) df2.fillna(0) st.write("## **Showing Oxygen Suppliers**") page_name = ['Choose Option', 'View Full Data', 'View Filtered Data'] page = st.radio('Choose your preferred type', page_name) if page == "Choose Option": st.info("👆 You have to choose any of the above two Options.") elif page == "View Full Data": df2 = df2.iloc[::-1] st.write(df2.iloc[:, 1:7]) elif page == "View Filtered Data": district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS SOUTH", "ALIPURDUAR", "BANKURA", "BIRBHUM", "COOCHBEHAR", "DARJEELING", "<NAME>", "<NAME>", "HOOGHLY", "HOWRAH", "JALPAIGURI", "Jhargram", "KALIMPONG", "KOLKATA", "MALDAH", "MEDINIPUR EAST", "MEDINIPUR WEST", "MURSHIDABAD" "NADIA", "<NAME>", "<NAME>", "PURULIA", "NOT LISTED" )) if district.title() == "Not Listed": district = st.text_input('Enter Your District Name') list = [] for i in range(len(df2.index)-1, -1, -1): if(df2.at[df2.index[i], 'District'] == district): list.append(i) st.write(df2.iloc[list, 1:7]) list = [] if selected == "Search for Bed Availability": sht2 = gc.open_by_url( 'https://docs.google.com/spreadsheets/d/1nuMaTQSXx6KcBisg3mgwMl5xh5gq8ClWI_6iYZdfC1Y/edit#gid=0') worksheet = sht2.sheet1 df2 = get_as_dataframe(worksheet) df2 = df2[df2['phone'].notna()] df2.rename(columns={'timestamp': 'Timestamp', 'name': 'Name / Contact Person', 'phone': 'Contact Number', 'beds': 'Available Beds', 'updateon': 'Data As Updated On', 'hospital': 'Hospital Name', 'pincode': 'Pincode', 'Category': 'State / Territory', 'Cities': 'City / Area / Location', 'selectDistrict': 'District'}, inplace=True) df2.fillna(0) st.write("## **Showing Bed Availability**") page_name = ['Choose Option', 'View Full Data', 'View Filtered Data'] page = st.radio('Choose your preferred type', page_name) if page == "Choose Option": st.info("👆 You have to choose any of the above two Options.") elif page == "View Full Data": df2 = df2.iloc[::-1] st.write(df2.iloc[:, 1:11]) elif page == "View Filtered Data": district = st.selectbox( "Select District", ("Select District from the Dropdown", "24 PARAGANAS NORTH", "24 PARAGANAS
now...') if args.create_batch: # ------------------------------------------------------------ # # Create batch files and exit batch(args.config_file, args.create_batch, args.batch_dir) # ------------------------------------------------------------ # else: # ------------------------------------------------------------ # # Read Configuration files config_dict = read_config(args.config_file, default_config=default_config) options = config_dict.pop('OPTIONS') global_atts = config_dict.pop('GLOBAL_ATTRIBUTES') if not options['regular_grid']: domain_dict = config_dict.pop('DOMAIN') else: domain_dict = None # set aside fields dict fields = config_dict vic2nc(options, global_atts, domain_dict, fields) # ------------------------------------------------------------ # return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def vic2nc(options, global_atts, domain_dict, fields): """ Convert ascii VIC files to netCDF format""" # determine run mode if (options['memory_mode'] == 'standard') \ and (options['chunksize'] in ['all', 'All', 'ALL', 0]): memory_mode = 'big_memory' else: memory_mode = options['memory_mode'] print("\n-------------------------------") print("Configuration File Options") print("-------------OPTIONS-------------") for pair in options.items(): print("{0}: {1}".format(*pair)) print('Fields: {0}'.format(", ".join(fields.keys()))) if domain_dict: print("-------------DOMAIN--------------") for pair in domain_dict.items(): print("{0}: {1}".format(*pair)) print("--------GLOBAL_ATTRIBUTES--------") for pair in global_atts.items(): print("{0}: {1}".format(*pair)) print("--------RUN MODE--------") print('Memory Mode: {0}'.format(memory_mode)) if memory_mode == 'standard': print('Chunksize={0}'.format(options['chunksize'])) print("---------------------------------\n") # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make output directory if not os.path.exists(options['out_directory']): os.makedirs(options['out_directory']) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Make pairs (i.e. find inds) files = glob(options['input_files']) points = get_file_coords(files) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get target grid information if domain_dict: domain = read_domain(domain_dict) target_grid_file = path.split(domain_dict['filename'])[1] global_atts['target_grid_file'] = target_grid_file else: # must be a regular grid, build from file names domain = calc_grid(points.get_lats(), points.get_lons()) target_grid_file = None domain_dict = {'y_x_dims': ['lat', 'lon']} # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get grid index locations points = get_grid_inds(domain, points) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get timestamps if options['input_file_format'].lower() == 'ascii': if ('bin_start_date' in options and 'bin_end_date' in options and 'bin_dt_sec' in options): vic_datelist, vic_ordtime = make_dates( options['bin_start_date'], options['bin_end_date'], options['bin_dt_sec'], calendar=options['calendar']) else: vic_datelist = get_dates(files[0]) vic_ordtime = date2num(vic_datelist, TIMEUNITS, calendar=options['calendar']) elif options['input_file_format'].lower() in ['binary', 'netcdf']: vic_datelist, vic_ordtime = make_dates(options['bin_start_date'], options['bin_end_date'], options['bin_dt_sec'], calendar=options['calendar']) else: raise ValueError('Unknown input file format: {}. Valid options are \ ascii or binary'.format(options['input_file_format'])) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Determine time segmentation if options['start_date']: start_date = datetime.strptime(options['start_date'], TIMESTAMPFORM) if start_date < vic_datelist[0]: print("WARNING: Start date in configuration file is before " "first date in file.") start_date = vic_datelist[0] print('WARNING: New start date is {0}'.format(start_date)) else: start_date = vic_datelist[0] if options['end_date']: end_date = datetime.strptime(options['end_date'], TIMESTAMPFORM) if end_date > vic_datelist[-1]: print("WARNING: End date in configuration file is after " "last date in file.") end_date = vic_datelist[-1] print('WARNING: New end date is {0}'.format(end_date)) else: end_date = vic_datelist[-1] # Ordinal Time start_ord = date2num(start_date, TIMEUNITS, calendar=options['calendar']) end_ord = date2num(end_date, TIMEUNITS, calendar=options['calendar']) print("netCDF Start Date: {0}".format(start_date)) print("netCDF End Date: {0}".format(end_date)) segment_dates = [] if options['time_segment'] == 'day': # calendar insensitive num_segments = np.ceil(end_ord - start_ord) if start_date.hour == 0: segment_dates = num2date(np.arange(start_ord, end_ord + 1, 1), TIMEUNITS, calendar=options['calendar']) else: # allow start at time other than 0 temp = [start_ord].append(np.arange(np.ceil(start_ord), end_ord + 1, 1)) segment_dates = num2date(temp, TIMEUNITS, calendar=options['calendar']) elif options['time_segment'] == 'month': num_segments = (end_date.year - start_date.year) * 12 \ + end_date.month - start_date.month + 1 month = start_date.month year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, month, 1)) month += 1 if month == 13: month = 1 year += 1 elif options['time_segment'] == 'year': num_segments = end_date.year - start_date.year + 1 year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, 1, 1)) year += 1 elif options['time_segment'] == 'decade': num_segments = (end_date.year - start_date.year) / 10 + 1 year = start_date.year for i in pyrange(num_segments + 1): segment_dates.append(datetime(year, 1, 1)) year += 10 elif options['time_segment'] == 'all': num_segments = 1 segment_dates = [start_date, end_date] else: raise ValueError('Unknown timesegment options \ {0}'.format(options['time_segment'])) print("Number of files: {0}".format(len(segment_dates) - 1)) assert len(segment_dates) == num_segments + 1 # Make sure the first and last dates are start/end_date segment_dates[0] = start_date segment_dates[-1] = end_date + timedelta(minutes=1) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Setup Segments segments = deque() for num in pyrange(num_segments): # Segment time bounds t0 = segment_dates[num] t1 = segment_dates[num + 1] # Get segment inds i0 = bisect_left(vic_datelist, t0) i1 = bisect_left(vic_datelist, t1) # Make segment filename (with path) if options['time_segment'] == 'day': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y-%m-%d')) elif options['time_segment'] == 'month': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y-%m')) elif options['time_segment'] == 'year': filename = "{0}.{1}.nc".format(options['out_file_prefix'], t0.strftime('%Y')) elif options['time_segment'] == 'all': filename = "{0}.{1}-{2}.nc".format(options['out_file_prefix'], t0.strftime('%Y%m%d'), t1.strftime('%Y%m%d')) filename = path.join(options['out_directory'], filename) # Setup segment and initialize netcdf segment = Segment(num, i0, i1, options['out_file_format'], filename, memory_mode=memory_mode) segment.nc_globals(**global_atts) segment.nc_time(t0, t1, vic_ordtime, options['calendar']) segment.nc_dimensions(snow_bands=options['snow_bands'], veg_tiles=options['veg_tiles'], soil_layers=options['soil_layers']) segment.nc_domain(domain) segment.nc_fields(fields, domain_dict['y_x_dims'], options['precision']) print(repr(segment)) segments.append(segment) # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # # Get column numbers and names (will help speed up reading) names = [] usecols = [] dtypes = [] bin_dtypes = [] bin_mults = [] if options['precision'] == 'double': prec = NC_DOUBLE else: prec = NC_FLOAT for name, field in fields.items(): if not np.isscalar(field['column']): # multiple levels for i, col in enumerate(field['column']): names.append(name + str(i)) usecols.append(col) if 'type' in field: if type(field['type']) == list: dtypes.extend(field['type']) else: dtypes.extend([field['type']] * len(field['column'])) else: dtypes.append([prec] * len(field['column'])) if options['input_file_format'].lower() == 'binary': if 'bin_dtype' in field: if type(field['bin_dtype']) == list: bin_dtypes.extend(field['bin_dtype']) else: bin_dtypes.extend([field['bin_dtype']] * len(field['column'])) else: raise ValueError('bin_dtype not in field: {}'.format(name)) if 'bin_mult' in field: if type(field['bin_mult']) == list: bin_mults.extend(field['bin_mult']) else: bin_mults.extend([field['bin_mult']] * len(field['column'])) else: bin_mults.extend([1.0] * len(field['column'])) else: # no levels names.append(name) usecols.append(field['column']) if 'type' in field: dtypes.append(field['type']) else: dtypes.append(prec) if options['input_file_format'].lower() == 'binary': if 'bin_dtype' in field: bin_dtypes.append(field['bin_dtype']) else: raise ValueError('bin_dtype not in field: {}'.format(name)) if 'bin_mult' in field: bin_mults.append(field['bin_mult']) else: bin_mults.append(1.0) print('setting point attributes (fileformat, names, usecols, and dtypes)') # pandas.read_table does not 'honor' the order of the columns in usecols # it simply uses them in ascending order. So the names need to be sorted # the same way. For example, if the columns in the VIC file are: # 3: prcp; 4: evap; 5: runoff; 6; baseflow; 7: sm1; 8: sm2; 9: sm3; 10: swe # and this is parsed from the configuration file as # usecols = [3, 4, 5, 6, 10, 7, 8, 9] # names=['prcp', 'evap', 'runoff', 'baseflow', 'swe', 'sm1', 'sm2', 'sm3'] # then without sorting, the netcdf file will have the wrong variables: # nc_swe will contain sm1, nc_sm1 will contain sm2, nc_sm2: sm3 and # nc_swe: sm3 # the following will ensure that the names are sorted in increasing column # order. Note that sorted(usecols) is not strictly necessary, since # apparently that is done in read_table, but it keeps the names and columns # in the same order names = [x for (y, x) in sorted(pyzip(usecols, names))] usecols = sorted(usecols) points.set_names(names) points.set_usecols(usecols) points.set_dtypes(dtypes) # set binary attributes if options['input_file_format'].lower() == 'binary': points.set_bin_dtypes(bin_dtypes) points.set_bin_mults(bin_mults) points.set_fileformat(options['input_file_format']) print('done') # ---------------------------------------------------------------- # # ---------------------------------------------------------------- # if memory_mode == 'big_memory': # ------------------------------------------------------------ # # run in big memory mode for i, segment in enumerate(segments): segments[i].allocate() while points: point = points.popleft() point.open() point.read() point.close() for segment in segments: segment.nc_add_data_to_array(point) for segment in segments: segment.nc_write_data_from_array() segment.nc_close() # ------------------------------------------------------------ # elif memory_mode == 'standard': # ------------------------------------------------------------ # # Open VIC files and put data into netcdfs chunk = Plist() while points: point = points.popleft() point.open() point.read() point.close() chunk.append(point) if len(chunk) > int(options['chunksize']) or len(points) == 0: for segment in segments: segment.nc_add_data_standard(chunk) chunk = Plist() del point # ------------------------------------------------------------ # # ------------------------------------------------------------ # # Close the netcdf files for segment in segments: segment.nc_close() # ------------------------------------------------------------ # elif memory_mode == 'original': # ------------------------------------------------------------ # # Run in original memory mode (a.k.a. vic2nc.c mode) # Open all files for point in points: point.open() while segments: segment = segments.popleft() segment.allocate() count = segment.count for point in points: point.read(count) segment.nc_add_data_to_array(point) segment.nc_write_data_from_array() segment.nc_close() for point in points: point.close() # ------------------------------------------------------------ # return # -------------------------------------------------------------------- # # -------------------------------------------------------------------- # def get_file_coords(files): """ Get list of Point objects """ points = Plist() for i, filename in enumerate(files): # fname = path.split(f)[1][-16:] # just look at last 16 characters f = filename[-22:] # just look at last 16 characters lat, lon = list(map(float, findall(r"[-+]?\d*\.\d+|\d+", f)))[-2:] points.append(Point(lat=lat, lon=lon, filename=filename)) return points # -------------------------------------------------------------------- # # --------------------------------------------------------------------
#!/usr/bin/python # -*- coding: utf-8 -* import os import glob import sys import numpy as np import getpass from ftplib import FTP import shutil import subprocess as sp import multiprocessing as mp sys.path.append(os.path.join(os.path.dirname(__file__),"../projects/tools")) import msh import executable_paths as exe import tempfile from functools import wraps import string import random from scipy.spatial.distance import cdist import time #Functions to group files by name def condition1(ref, tmp): #Return 1 if the number splitted between dots are the same return ref != tmp and ref.split(".")[1] == tmp.split(".")[1] def condition2(ref, tmp): #Return 1 if the first 3 letters are the same return ref != tmp and ref[:3] == tmp[:3] def group(files, conditionFunc): groups = [] while len(files)>0: group = [files[0]] for other in files: if conditionFunc(files[0], other): group.append(other) groups.append(group) for g in group: files.remove(g) return groups #Functions to copy files and change their names def readCSV(filename): good = [] with open(filename) as f: LINES = [l.strip().split(",") for l in f.readlines()] for i,l in enumerate(LINES): #A fermer = on ne traite pas if "fermer" in l[4] or "NON" in l[4]: good.append(0) #inférieur à 24 elif i<25: good.append(0) else: good.append(1) return good def newName(f): names = [ ["OsB","bone"], ["SkullB","bone"], ["MandB","mand"], ["BTeeB","btee"], ["HTeeB","htee"], ["Mass","mass"], ["FatB","face"] ] for n in names: if n[0] in f: return n[1] #Functions to load and process .obj and .stl def obj2Mesh(file): with open(file, "r") as f: LINES = f.readlines() mesh = msh.Mesh() mesh.verts = np.array([ [float(x) for x in l.split()[1:]] for l in LINES if l[0]=="v" ]) mesh.tris = np.array([ [int(x)-1 for x in l.split()[1:]] for l in LINES if l[0]=="f" ]) mesh.verts = np.insert(mesh.verts,3,0,axis=1) mesh.tris = np.insert(mesh.tris,3,0,axis=1) mesh.computeBBox() return mesh def stlToMesh(f): try: tmp = f[:-4] + ".obj" os.system("LC_ALL=C meshlabserver -i " + f + " -o " + tmp + " -s " + mshScript + " > /dev/null 2>&1" ) mesh = obj2Mesh(tmp) mesh.write(f[:-4] + ".mesh") os.remove(tmp) os.remove(f) return 1 except: return -1 def cutMeshInHalf(mesh): half1 = msh.Mesh() half2 = msh.Mesh() half1.verts = mesh.verts half2.verts = mesh.verts mid = (np.max(mesh.verts[:,0]) + np.min(mesh.verts[:,0]))/2. vertsMask = [v[0]<mid for v in mesh.verts] mask = [1 for i in range(len(mesh.tris))] for i,t in enumerate(mesh.tris): for v in t[:3]: mask[i] = vertsMask[v] half1.tris = np.array([t for i,t in enumerate(mesh.tris) if mask[i]]) half2.tris = np.array([t for i,t in enumerate(mesh.tris) if not mask[i]]) del mesh half1.discardUnused() half2.discardUnused() half2.verts[:,0] = 1-half2.verts[:,0] half2.computeBBox() half1.applyMatrix(half1.toUnitMatrix()) half2.applyMatrix(half2.toUnitMatrix()) return half1, half2 def nearest_neighbor(src, dst): all_dists = cdist(src, dst, 'euclidean') indices = all_dists.argmin(axis=1) distances = all_dists[np.arange(all_dists.shape[0]), indices] return distances, indices def adapt_box_to(f, maxNb=20000): shutil.copyfile(templates["box"],"box.mesh") cube = msh.Mesh("box.mesh") mesh = msh.Mesh(f) step = 1 if len(mesh.verts)<maxNb else int(len(mesh.verts)/maxNb)+1 dists, _ = nearest_neighbor(cube.verts[:,:3], mesh.verts[::step,:3]) cube.scalars = np.array(dists) cube.scaleSol(0.001, 0.5, absolute=True) cube.write("box.1.mesh") cube.writeSol("box.1.sol") err = os.system("mmg3d_O3 box.1.mesh -hgrad 1.5 > /dev/null 2>&1") if err: raise FacileError("mmg3d failure") #Decorator class FacileError(Exception): pass def debug(): def true_decorator(f): @wraps(f) def wrapped(*args, **kwargs): t = time.time() r=None print f.__name__ + " : "+'\033[94m'+"RUNNING"+'\033[0m'+" on " + str(*args) try: tmpdir = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(8)]) try: os.makedirs(tmpdir) except: print "ERROR generating temporary directory" return None os.chdir(tmpdir) r = f(*args, **kwargs) except Exception as e: print f.__name__ + " : "+'\033[91m'+"FAILURE"+'\033[0m'+" on " + str(*args) + ": " + type(e).__name__ + ": " + str(e) + ", in " + str(int(time.time() - t)) + " s" pass else: print f.__name__ + " : "+'\033[92m'+"SUCCESS"+'\033[0m'+" on " + str(*args) + ", in " + str(int(time.time() - t)) + " s" finally: os.chdir("..") shutil.rmtree(tmpdir) return r return wrapped return true_decorator dryRun = True # Run just for fun oneStep = False # If only wish to process one step def run(func, liste, parallel=True, maxi=64): if len(liste)>0: num = min( maxi, min(len(liste), mp.cpu_count()-1 )) if parallel else 1 print '\033[95m' + "## EXECUTING '" + func.__name__ + "' on " + str(len(liste)) + " cases on " + str(num) + " process(es)." + '\033[0m' if not dryRun: res = [] if parallel: pool = mp.Pool(processes=num) res = pool.map(func, liste ) else: res = [] for l in liste: res.append(func(l)) if oneStep: print '\033[95m' + "ONESTEP -> EXITING..." + '\033[0m' sys.exit() return res else: return 0 else: print '\033[95m' + "## 0 arguments to execute '" + func.__name__ + "' on, skipping..." + '\033[0m' #Functions @debug() def ftpCopy(f): ftp = FTP(IPadress, ftpUsr, ftpPwd) ftp.cwd(ftpDir) num = f.split(".")[1].zfill(3) localFile = os.path.join(directories["raw"], num + "_" + newName(f) + "." + f.split(".")[-1]) if not num + "_" + newName(f) + ".mesh" in "".join(os.listdir(directories["raw"])): if not os.path.isfile(localFile): with open(localFile, 'wb') as ff: ftp.retrbinary('RETR %s' % f, ff.write) @debug() def convertToMesh(f): if "stl" in f: if not stlToMesh(f): raise FacileError("conversion failure") @debug() def cleanMesh(f): mesh = msh.Mesh() mesh.get_infos(f) nV = mesh.numItems[0] nT = mesh.numItems[1] if nV>nT: mesh = msh.Mesh(f) mesh.discardDuplicateVertices() mesh.discardUnused() mesh.write(f) @debug() def scale(g): faceFile = [f for f in g if "face" in f][0] boneFiles = [f for f in g if f!=faceFile and "mass" not in f] bone = msh.Mesh(os.path.join(directories["raw"],boneFiles[0])) if len(boneFiles)>1: for f in boneFiles[1:]: bone.fondre(msh.Mesh(os.path.join(directories["raw"],f))) center = bone.center scale = 0.0035 for f in g: mesh = msh.Mesh(os.path.join(directories["raw"],f)) mesh.verts[:,:3] -= center mesh.verts[:,:3] *= scale mesh.verts[:,:3] += [0.5,0.5,0.5] mesh.write(os.path.join(directories["scaled"],f)) @debug() def remesh(f, hausd=0.0025): err = os.system(exe.mmgs + " " + os.path.join(directories["scaled"],f) + " -nr -nreg -hausd " + str(hausd) + " -o " + os.path.join(directories["remeshed"],f) + " > /dev/null 2>&1") if err: raise FacileError("mmgs failure") return 0 @debug() def merge(g): newBone = os.path.join(directories["merged"], g[0][:3] + "_bone.mesh") newFace = os.path.join(directories["merged"], g[0][:3] + "_face.mesh") newMass = os.path.join(directories["merged"], g[0][:3] + "_mass.mesh") faceFile = [f for f in g if "face" in f][0] face = msh.Mesh(os.path.join(directories["remeshed"], faceFile)) boneFiles = [f for f in g if f!=faceFile and "mass" not in f] if len(boneFiles)==0: return 1 bone = msh.Mesh(os.path.join(directories["remeshed"],boneFiles[0])) if len(boneFiles)>1: for f in boneFiles[1:]: bone.fondre(msh.Mesh(os.path.join(directories["remeshed"],f))) if "mass" in "".join(g): mass = msh.Mesh(os.path.join(directories["remeshed"], [f for f in g if "mass" in f][0])) mass.write(newMass) bone.write(newBone) face.write(newFace) @debug() def align(g): boneFile = [f for f in g if "bone" in f][0] faceFile = [f for f in g if "face" in f][0] massFile = [f for f in g if "mass" in f][0] if "mass" in "".join(g) else "" num = boneFile.split("/")[-1][:3] err = os.system(exe.align + " -i " + boneFile + " " + templates["bone"] + " -d 0.1 -o 0.95 > "+num+".txt")#/dev/null 2>&1") if err: raise FacileError("alignement failure") bone = msh.Mesh(boneFile) bone.applyMatrix(matFile = "mat_Super4PCS.txt") bone.applyMatrix(matFile = "mat_ICP.txt") bone.write(os.path.join(directories["aligned"], num+"_bone.mesh")) err = os.system(exe.pythonICP + " -s " + os.path.join(directories["aligned"], num+"_bone.mesh") + " -t " + templates["bone"] + " -m mat_pyICP.txt >> " + num + ".txt") if err: pass # Cannot run python alignement... else: bone.applyMatrix(matFile = "mat_pyICP.txt") bone.write(os.path.join(directories["aligned"], num+"_bone.mesh")) face = msh.Mesh(faceFile) face.applyMatrix(matFile = "mat_Super4PCS.txt") face.applyMatrix(matFile = "mat_ICP.txt") if not err: face.applyMatrix(matFile = "mat_pyICP.txt") face.write(os.path.join(directories["aligned"], num+"_face.mesh")) if ".mesh" in massFile: mass = msh.Mesh(massFile) mass.applyMatrix(matFile = "mat_Super4PCS.txt") mass.applyMatrix(matFile = "mat_ICP.txt") if not err: mass.applyMatrix(matFile = "mat_pyICP.txt") mass.write(os.path.join(directories["aligned"], num+"_mass.mesh")) return 0 @debug() def warp(f): num = f.split("/")[-1][:3] os.system("cp " + templates["sphere"] + " ./sphere.mesh") err = os.system( exe.warping + " " + f + " -p -nit 150 -load 40 > warping.txt" ) if err: raise FacileError("Warping failure") """ try: mesh = msh.Mesh(f) mesh.tris = np.array([ t for t in mesh.tris if np.max(t)<=len(mesh.verts) ]) mesh.discardUnused() mesh.write(f) err = os.system( exe.warping + " " + f + " -p -nit 150 -load 40 > warping.txt" ) if err: raise FacileError("cleaned warping failure") except: raise FacileError("warping failure") """ warped = msh.Mesh("sphere.d.mesh") ext_ref = 2 warped.tris = warped.tris[warped.tris[:,-1] != ext_ref] warped.tets = np.array([]) warped.discardUnused() warped.write(os.path.join(directories["warped"],f.split("/")[-1].split(".")[0] + ".warped.mesh")) return 0 @debug() def signedDistance(f): adapt = True if adapt: adapt_box_to(f) else: cube=msh.Mesh(cube=[0,1,0,1,0,1]) cube.write("box.mesh") err = os.system( exe.tetgen + " -pgANEF box.mesh > /dev/null 2>&1") if err: raise FacileError('tetgen failure') err = os.system( exe.mmg3d + " box.1.mesh -hausd 0.04 -hmax 0.04 > /dev/null 2>&1" ) if err: raise FacileError('mmg3d failure') err = os.system( exe.mshdist + " -ncpu 16 -noscale box.1.o.mesh " + f + " > /dev/null 2>&1") if err: raise FacileError('mshdist failure') name = f.split("/")[-1].split(".")[0] os.system("mv box.1.o.mesh " + os.path.join(directories["signed"], name + ".mesh")) os.system("mv box.1.o.sol " + os.path.join(directories["signed"], name + ".sol")) return 0 @debug() def cut(f): mesh=msh.Mesh(os.path.join(directories["raw"], f)) MAT = mesh.toUnitMatrix() mesh.applyMatrix(MAT) newF = os.path.join(directories["muscles"], f[:-5] + ".scaled.mesh") mesh.write(newF) err = remesh(newF, hausd=0.001) mesh =
import libjevois as jevois import cv2 as cv import numpy as np import sys ## Object detection and recognition using OpenCV Deep Neural Networks (DNN) # # This module runs an object detection deep neural network using the OpenCV DNN # library. Detection networks analyze a whole scene and produce a number of # bounding boxes around detected objects, together with identity labels # and confidence scores for each detected box. # # This module supports detection networks implemented in TensorFlow, Caffe, # Darknet, Torch, etc as supported by the OpenCV DNN module. # # Included with the standard JeVois distribution are: # # - OpenCV Face Detector, Caffe model # - MobileNet + SSD trained on Pascal VOC (20 object classes), Caffe model # - MobileNet + SSD trained on Coco (80 object classes), TensorFlow model # - MobileNet v2 + SSD trained on Coco (80 object classes), TensorFlow model # - Darknet Tiny YOLO v3 trained on Coco (80 object classes), Darknet model # - Darknet Tiny YOLO v2 trained on Pascal VOC (20 object classes), Darknet model # # See the module's constructor (__init__) code and select a value for \b model to switch network. Object categories are # as follows: # - The 80 COCO object categories are: person, bicycle, car, motorbike, aeroplane, bus, train, truck, boat, traffic, # fire, stop, parking, bench, bird, cat, dog, horse, sheep, cow, elephant, bear, zebra, giraffe, backpack, umbrella, # handbag, tie, suitcase, frisbee, skis, snowboard, sports, kite, baseball, baseball, skateboard, surfboard, tennis, # bottle, wine, cup, fork, knife, spoon, bowl, banana, apple, sandwich, orange, broccoli, carrot, hot, pizza, donut, # cake, chair, sofa, pottedplant, bed, diningtable, toilet, tvmonitor, laptop, mouse, remote, keyboard, cell, # microwave, oven, toaster, sink, refrigerator, book, clock, vase, scissors, teddy, hair, toothbrush. # # - The 20 Pascal-VOC object categories are: aeroplane, bicycle, bird, boat, bottle, bus, car, cat, chair, cow, # diningtable, dog, horse, motorbike, person, pottedplant, sheep, sofa, train, tvmonitor. # # Sometimes it will make mistakes! The performance of yolov3-tiny is about 33.1% correct (mean average precision) on # the COCO test set. The OpenCV Face Detector is quite fast and robust! # # This module is adapted from the sample OpenCV code: # https://github.com/opencv/opencv/blob/master/samples/dnn/object_detection.py # # More pre-trained models are available on github in opencv_extra # # # @author <NAME> # # @videomapping YUYV 640 502 20.0 YUYV 640 480 20.0 JeVois PyDetectionDNN # @email <EMAIL> # @address 880 W 1st St Suite 807, Los Angeles CA 90012, USA # @copyright Copyright (C) 2018 by <NAME> # @mainurl http://jevois.org # @supporturl http://jevois.org # @otherurl http://jevois.org # @license GPL v3 # @distribution Unrestricted # @restrictions None # @ingroup modules class GarbageTracker: # #################################################################################################### ## Constructor def __init__(self): self.confThreshold = 0.5 # Confidence threshold (0..1), higher for stricter detection confidence. self.nmsThreshold = 0.4 # Non-maximum suppression threshold (0..1), higher to remove more duplicate boxes. self.inpWidth = 160 # Resized image width passed to network self.inpHeight = 120 # Resized image height passed to network self.scale = 2/255 # Value scaling factor applied to input pixels self.mean = [127.5, 127.5, 127.5] # Mean BGR value subtracted from input image self.rgb = True # True if model expects RGB inputs, otherwise it expects BGR self.bbox = None self.tracker = cv.TrackerKCF_create() # Select one of the models: #model = 'Face' # OpenCV Face Detector, Caffe model #model = 'MobileNetV2SSD' # MobileNet v2 + SSD trained on Coco (80 object classes), TensorFlow model #model = 'MobileNetSSD' # MobileNet + SSD trained on Pascal VOC (20 object classes), Caffe model model = 'MobileNetSSDcoco' # MobileNet + SSD trained on Coco (80 object classes), TensorFlow model #model = 'YOLOv3' # Darknet Tiny YOLO v3 trained on Coco (80 object classes), Darknet model #model = 'YOLOv2' # Darknet Tiny YOLO v2 trained on Pascal VOC (20 object classes), Darknet model # You should not have to edit anything beyond this point. backend = cv.dnn.DNN_BACKEND_DEFAULT target = cv.dnn.DNN_TARGET_CPU self.classes = None classnames = None if (model == 'MobileNetSSD'): classnames = '/jevois/share/darknet/yolo/data/voc.names' modelname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.caffemodel' configname = '/jevois/share/opencv-dnn/detection/MobileNetSSD_deploy.prototxt' self.rgb = False elif (model == 'MobileNetV2SSD'): classnames = '/jevois/share/darknet/yolo/data/coco.names' modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pb' configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v2_coco_2018_03_29.pbtxt' elif (model == 'MobileNetSSDcoco'): classnames = '/jevois/share/darknet/yolo/data/coconew.names' modelname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pb' configname = '/jevois/share/opencv-dnn/detection/ssd_mobilenet_v1_coco_2017_11_17.pbtxt' self.rgb = False self.nmsThreshold = 0.1 elif (model == 'YOLOv3'): classnames = '/jevois/share/darknet/yolo/data/coco.names' modelname = '/jevois/share/darknet/yolo/weights/yolov3-tiny.weights' configname = '/jevois/share/darknet/yolo/cfg/yolov3-tiny.cfg' elif (model == 'YOLOv2'): classnames = '/jevois/share/darknet/yolo/data/voc.names' modelname = '/jevois/share/darknet/yolo/weights/yolov2-tiny-voc.weights' configname = '/jevois/share/darknet/yolo/cfg/yolov2-tiny-voc.cfg' self.inpWidth = 320 self.inpHeight = 240 else: classnames = '/jevois/share/opencv-dnn/detection/opencv_face_detector.classes' modelname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.caffemodel' configname = '/jevois/share/opencv-dnn/detection/opencv_face_detector.prototxt' self.scale = 1.0 self.mean = [104.0, 177.0, 123.0] self.rgb = False # Load names of classes if classnames: with open(classnames, 'rt') as f: self.classes = f.read().rstrip('\n').split('\n') # Load a network self.net = cv.dnn.readNet(modelname, configname) self.net.setPreferableBackend(backend) self.net.setPreferableTarget(target) self.timer = jevois.Timer('Neural detection', 10, jevois.LOG_DEBUG) self.model = model garbageclasses = ["shoe", "hat", "eye glasses", "frisbee", "bottle", "plate", "wine glass", "cup", "fork", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "fruit", "hotdog", "pizza", "donut", "cake", "vase", "scissors", "toothbrush", "cardboard", "napkin", "net", "paper", "plastic", "straw"] self.garbageclasses = garbageclasses # #################################################################################################### ## Get names of the network's output layers def getOutputsNames(self, net): layersNames = self.net.getLayerNames() return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()] # #################################################################################################### ## Analyze and draw boxes, object names, and confidence scores def postprocess(self, frame, outs): frameHeight = frame.shape[0] frameWidth = frame.shape[1] out_center_x, out_center_y = frameWidth/2, frameHeight/2 def track(classId, conf, box): # Track the last box on the list if self.bbox is None: left = box[0] top = box[1] width = box[2] height = box[3] self.bbox = (left, top, left + width, top + height) ok = self.tracker.init(frame, self.bbox) drawPred(classId, conf, left, top, left + width, top + height, color=(255, 255, 255)) else: ok, self.bbox = self.tracker.update(frame) if ok: p1 = (int(self.bbox[0]), int(self.bbox[1])) p2 = (int(self.bbox[0] + self.bbox[2]), int(self.bbox[1] + self.bbox[3])) drawPred(classId, conf, p1[0], p1[1], p2[0], p2[1], color=(255, 0, 0)) else: self.bbox = None def drawPred(classId, conf, left, top, right, bottom, color=(0,255,0)): # Draw a bounding box. cv.rectangle(frame, (left, top), (right, bottom), color, 2) label = '%.2f' % (conf * 100) # Print a label of class. if self.classes: if (classId >= len(self.classes)): label = 'Oooops id=%d: %s' % (classId, label) else: label = '%s: %s' % (self.classes[classId], label) labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.4, 1) top = max(top, labelSize[1]) cv.rectangle(frame, (left, top - labelSize[1]-2), (left + labelSize[0], top + baseLine), (255, 255, 255), cv.FILLED) cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0)) def tellRobot(out_center_x, out_center_y, serial_format="XY"): if self.bbox is None: jevois.sendSerial("stop") else: box_center_x, box_center_y = self.bbox[0] + self.bbox[2]/2, self.bbox[1] + self.bbox[3]/2 if serial_format == "XY": if out_center_x < box_center_x: move_x = box_center_x - out_center_x elif box_center_x < out_center_x: move_x = out_center_x - box_center_x elif box_center_x == out_center_x: move_x = 0 if out_center_y < box_center_y: move_y = box_center_y - out_center_y elif box_center_y < out_center_y: move_y = out_center_y - box_center_y elif box_center_y == out_center_y: move_y = 0 if move_x < 100: move_x = 100 if move_y < 100: move_y = 100 jevois.sendSerial("move {} {}".format(int(move_x), int(move_y))) else: jevois.sendSerial("Invalid Serial Format") layerNames = self.net.getLayerNames() lastLayerId = self.net.getLayerId(layerNames[-1]) lastLayer = self.net.getLayer(lastLayerId) classIds = [] confidences = [] boxes = [] if self.net.getLayer(0).outputNameToIndex('im_info') != -1: # Faster-RCNN or R-FCN # Network produces output blob with a shape 1x1xNx7 where N is a number of # detections and an every detection is a vector of values # [batchId, classId, confidence, left, top, right, bottom] for out in outs: for detection in out[0, 0]: classId = int(detection[1]) - 1 confidence = detection[2] is_garbage = self.classes[classId] in self.garbageclasses if (confidence > self.confThreshold) and (is_garbage): left = int(detection[3]) top = int(detection[4]) right = int(detection[5]) bottom = int(detection[6]) width = right - left + 1 height = bottom - top + 1 classIds.append(classId) # Skip background label confidences.append(float(confidence)) boxes.append([left, top, width, height]) elif lastLayer.type == 'DetectionOutput': # Network produces output blob with a shape 1x1xNx7 where N is a number of # detections and an every detection is a vector of values # [batchId, classId, confidence, left, top, right, bottom] for out in outs: for detection in out[0, 0]:
0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 68, "name": "powerpath", "FCPaths": [ { "wwn": "21000024FF36D406", "hostSpeed": 0 }, { "wwn": "21000024FF36D407", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 69, "name": "power_v3", "FCPaths": [ { "wwn": "20809CE37435D845", "hostSpeed": 0 }, { "wwn": "20909CE37435D845", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 89, "name": "vplex_meta_important", "FCPaths": [ { "wwn": "5000144280292012", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144280292010", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292012", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E910", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E912", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E912", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E910", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292010", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292012", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144290292010", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E912", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014429029E910", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144280292012", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "5000144280292010", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E912", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "500014428029E910", "portPos": { "node": 0, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 91, "name": "Dorado5000_51.45", "FCPaths": [ { "wwn": "200080D4A58EA53A", "hostSpeed": 0 }, { "wwn": "201080D4A58EA53A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 98, "name": "AIX6.1_LN", "descriptors": { "os": "AIX" }, "FCPaths": [ { "wwn": "10000000C9781C57", "hostSpeed": 0 }, { "wwn": "10000000C9781853", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 5, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 115, "name": "huhuihost", "descriptors": { "os": "SuSE" }, "FCPaths": [ { "wwn": "2100000E1E1A9B30", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 121, "name": "Dorado5000V3_F3", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "201880D4A58EA53A", "hostSpeed": 0 }, { "wwn": "200380D4A58EA53A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 122, "name": "DYP_RHEL", "descriptors": { "IPAddr": "172.16.31.10", "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "10000090FA76D446", "hostSpeed": 0 }, { "wwn": "10000090FA76D447", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 123, "name": "DYP_Dorado6000", "FCPaths": [ { "wwn": "2618346AC212FB94", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 124, "name": "tool_rhel6.8", "FCPaths": [ { "wwn": "21000024FF543687", "hostSpeed": 0 }, { "wwn": "21000024FF543686", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 125, "name": "OceanStor6800", "FCPaths": [ { "wwn": "2430E0979656725A", "hostSpeed": 0 }, { "wwn": "2208E0979656725A", "hostSpeed": 0 }, { "wwn": "2218E0979656725A", "hostSpeed": 0 }, { "wwn": "2428E0979656725A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 126, "name": "fyc_test", "FCPaths": [ { "wwn": "21000024FF41DE7E", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 127, "name": "huhui", "descriptors": { "os": "SuSE" }, "FCPaths": [ { "wwn": "500601610864241E", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 132, "name": "ESX172.16.58.3", "descriptors": { "os": "ESX 4.x/5.x" }, "FCPaths": [ { "wwn": "21000024FF2F3266", "hostSpeed": 0 }, { "wwn": "21000024FF2F3267", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 8, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 133, "name": "ESX89PT_suse_172.16.31.10", "descriptors": { "os": "SuSE" }, "FCPaths": [ { "wwn": "21000024FF36F1ED", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 134, "name": "SVC", "descriptors": { "os": "Exanet" }, "FCPaths": [ { "wwn": "500507680110EF7C", "hostSpeed": 0 }, { "wwn": "500507680120EF7C", "hostSpeed": 0 }, { "wwn": "500507680120EF3E", "hostSpeed": 0 }, { "wwn": "500507680110EF3E", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 3, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 135, "name": "NSS_172.16.31.10", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "21000024FF0DC381", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 137, "name": "D185_172.16.31.10", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "29A11603042D0306", "hostSpeed": 0 }, { "wwn": "28D01603042D0306", "hostSpeed": 0 }, { "wwn": "2903010203040509", "hostSpeed": 0 }, { "wwn": "2802010203040509", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 139, "name": "Dorado3000V6", "FCPaths": [ { "wwn": "2019CC64A68314D3", "hostSpeed": 0 }, { "wwn": "2009CC64A68314D3", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 141, "name": "172.16.31.10T2", "FCPaths": [ { "wwn": "10000090FA50C4DF", "hostSpeed": 0 }, { "wwn": "10000090FA50C4DE", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 142, "name": "172.16.31.10T1", "FCPaths": [], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 144, "name": "C61_51.10.58.190", "descriptors": { "os": "Red Hat Enterprise Linux" }, "FCPaths": [ { "wwn": "2210112224901223", "hostSpeed": 0 }, { "wwn": "2200112224901223", "hostSpeed": 0 }, { "wwn": "2230112224901223", "hostSpeed": 0 }, { "wwn": "2220112224901223", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 145, "name": "172.16.31.10", "FCPaths": [ { "wwn": "21000024FF754606", "hostSpeed": 0 }, { "wwn": "21000024FF1A99E1", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 146, "name": "ZTY_win2012", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "21000024FF40272B", "portPos": { "node": 1, "slot": 1, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "21000024FF40272A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 147, "name": "DoradoV6_183", "FCPaths": [ { "wwn": "240B121314151617", "hostSpeed": 0 }, { "wwn": "2409121314151617", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 148, "name": "rhev_125", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "21000024FF4BC1B7", "hostSpeed": 0 }, { "wwn": "21000024FF4BC1B6", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 150, "name": "windows2012_68", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "2101001B32B0667A", "hostSpeed": 0 }, { "wwn": "2100001B3290667A", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 151, "name": "Dorado5000V6_80", "FCPaths": [ { "wwn": "2001183D5E0F5131", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "2011183D5E0F5131", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 1, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 152, "name": "windows2012_60", "descriptors": { "os": "Windows 2012" }, "FCPaths": [ { "wwn": "21000024FF53B4BC", "hostSpeed": 0 }, { "wwn": "21000024FF53B4BD", "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 2, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 153, "name": "aix_192.168.3.11", "descriptors": { "os": "AIX" }, "FCPaths": [ { "wwn": "10000000C975804C", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 }, { "wwn": "10000000C9765E79", "portPos": { "node": 1, "slot": 0, "cardPort": 2 }, "hostSpeed": 0 } ], "iSCSIPaths": [], "persona": 5, "initiatorChapEnabled": False, "targetChapEnabled": False }, { "id": 154, "name": "Dorado5500_V6_109", "descriptors": { "IPAddr": "192.168.127.12", "os": "Windows 2012" }, "FCPaths": [ { "wwn": "221818022D189653", "portPos": { "node": 1, "slot": 0,
= True, resolve_addrs = False): p = rep.p ns = walk_model (rep, tag, m) trace = [] for (n, vc) in ns: if (n, vc) not in rep.arc_pc_envs: # this n_vc has a pre-state, but has not been emitted. # no point trying to evaluate its expressions, the # solve won't have seen them yet. continue n_nm = rep.node_count_name ((n, vc)) node = p.nodes[n] if node.kind == 'Call': exprs = list (node.args) elif node.kind == 'Basic': exprs = [expr for (_, expr) in node.upds] elif node.kind == 'Cond': exprs = [node.cond] env = rep.node_pc_envs[(tag, n, vc)][1] accs = list (set ([acc for expr in exprs for acc in expr.get_mem_accesses ()])) for (kind, addr, v, mem) in accs: addr_s = solver.smt_expr (addr, env, rep.solv) v_s = solver.smt_expr (v, env, rep.solv) addr = eval_str (addr, env, rep.solv, m) v = eval_str (v, env, rep.solv, m) m_nm = m_var_name (mem) print '%s: %s @ <%s> -- %s -- %s' % (kind, m_nm, addr, v, n_nm) if simplify: addr_s = simplify_sexp (addr_s, rep, m) v_s = simplify_sexp (v_s, rep, m) if verbose: print '\t %s -- %s' % (addr_s, v_s) if symbs: addr_n = str_to_num (addr) (hit_symbs, secs) = find_symbol (addr_n, output = False) ss = hit_symbs + secs if ss: print '\t [%s]' % ', '.join (ss) if resolve_addrs: accs = [(kind, solver.to_smt_expr (addr, env, rep.solv), solver.to_smt_expr (v, env, rep.solv), mem) for (kind, addr, v, mem) in accs] trace.extend ([(kind, addr, v, mem, n, vc) for (kind, addr, v, mem) in accs]) if node.kind == 'Call': msg = '<function call to %s at %s>' % (node.fname, n_nm) print msg trace.append (msg) return trace def simplify_sexp (smt_xp, rep, m, flatten = True): if type (smt_xp) == str: smt_xp = solver.parse_s_expression (smt_xp) if smt_xp[0] == 'ite': (_, c, x, y) = smt_xp if eval_model_bool (m, c): return simplify_sexp (x, rep, m, flatten) else: return simplify_sexp (y, rep, m, flatten) if type (smt_xp) == tuple: smt_xp = tuple ([simplify_sexp (x, rep, m, False) for x in smt_xp]) if flatten: return solver.flat_s_expression (smt_xp) else: return smt_xp def trace_mems (rep, m, verbose = False, symbs = True, tags = None): if tags == None: if rep.p.pairing: tags = reversed (rep.p.pairing.tags) else: tags = rep.p.tags () for tag in tags: print '%s mem trace:' % tag trace_mem (rep, tag, m, verbose = verbose, symbs = symbs) def trace_mems_diff (rep, m, tags = ['ASM', 'C']): asms = trace_mem (rep, tags[0], m, resolve_addrs = True) cs = trace_mem (rep, tags[1], m, resolve_addrs = True) ev = lambda expr: eval_str (expr, {}, None, m) c_upds = [(ev (addr), ev (v)) for (kind, addr, v, mem, _, _) in cs if kind == 'MemUpdate'] asm_upds = [(ev (addr), ev (v)) for (kind, addr, v, mem, _, _) in asms if kind == 'MemUpdate' and 'mem' in m_var_name (mem)] c_upd_d = dict (c_upds) asm_upd_d = dict (asm_upds) addr_ord = [addr for (addr, _) in asm_upds] + [addr for (addr, _) in c_upds if addr not in asm_upd_d] mism = [addr for addr in addr_ord if c_upd_d.get (addr) != asm_upd_d.get (addr)] return (c_upd_d == asm_upd_d, mism, c_upds, asm_upds) def get_pv_type (pv): assert pv.is_op (['PValid', 'PArrayValid']) typ_v = pv.vals[1] assert typ_v.kind == 'Type' typ = typ_v.val if pv.is_op ('PArrayValid'): return ('PArrayValid', typ, pv.vals[3]) else: return ('PValid', typ, None) def guess_pv (p, n, addr_expr): vs = syntax.get_expr_var_set (addr_expr) [pred] = p.preds[n] pvs = [] def vis (expr): if expr.is_op (['PValid', 'PArrayValid']): pvs.append (expr) p.nodes[pred].cond.visit (vis) match_pvs = [pv for pv in pvs if set.union (* [syntax.get_expr_var_set (v) for v in pv.vals[2:]]) == vs] if len (match_pvs) > 1: match_pvs = [pv for pv in match_pvs if pv.is_op ('PArrayValid')] pv = match_pvs[0] return pv def eval_pv_type (rep, (n, vc), m, data): if data[0] == 'PValid': return data else: (nm, typ, offs) = data offs = rep.to_smt_expr (offs, (n, vc)) offs = search.eval_model_expr (m, rep.solv, offs) return (nm, typ, offs) def trace_suspicious_mem (rep, m, tag = 'C'): cs = trace_mem (rep, tag, m) data = [(addr, search.eval_model_expr (m, rep.solv, rep.to_smt_expr (addr, (n, vc))), (n, vc)) for (kind, addr, v, mem, n, vc) in cs] addr_sets = {} for (addr, addr_v, _) in data: addr_sets.setdefault (addr_v, set ()) addr_sets[addr_v].add (addr) dup_addrs = set ([addr_v for addr_v in addr_sets if len (addr_sets[addr_v]) > 1]) data = [(addr, addr_v, guess_pv (rep.p, n, addr), (n, vc)) for (addr, addr_v, (n, vc)) in data if addr_v in dup_addrs] data = [(addr, addr_v, eval_pv_type (rep, (n, vc), m, get_pv_type (pv)), rep.to_smt_expr (pv, (n, vc)), n) for (addr, addr_v, pv, (n, vc)) in data] dup_addr_types = set ([addr_v for addr_v in dup_addrs if len (set ([t for (_, addr_v2, t, _, _) in data if addr_v2 == addr_v])) > 1]) res = [(addr_v, [(t, pv, n) for (_, addr_v2, t, pv, n) in data if addr_v2 == addr_v]) for addr_v in dup_addr_types] for (addr_v, insts) in res: print 'Address %s' % addr_v for (t, pv, n) in insts: print ' -- accessed with type %s at %s' % (t, n) print ' (covered by %s)' % pv return res def trace_var (rep, tag, m, v): p = rep.p ns = walk_model (rep, tag, m) vds = rep.p.compute_var_dependencies () trace = [] vs = syntax.get_expr_var_set (v) def fetch ((n, vc)): if n in vds and [(nm, typ) for (nm, typ) in vs if (nm, typ) not in vds[n]]: return None try: (_, env) = rep.get_node_pc_env ((n, vc), tag) s = solver.smt_expr (v, env, rep.solv) s_x = solver.parse_s_expression (s) ev = search.eval_model (m, s_x) return (s, solver.smt_expr (ev, {}, None)) except solver.EnvMiss, e: return None except AssertionError, e: return None val = None for (n, vc) in ns: n_nm = rep.node_count_name ((n, vc)) val2 = fetch ((n, vc)) if val2 != val: if val2 == None: print 'at %s: undefined' % n_nm else: print 'at %s:\t\t%s:\t\t%s' % (n_nm, val2[0], val2[1]) val = val2 trace.append (((n, vc), val)) if n not in p.nodes: break node = p.nodes[n] if node.kind == 'Call': msg = '<function call to %s at %s>' % (node.fname, rep.node_count_name ((n, vc))) print msg trace.append (msg) return trace def trace_deriv_ops (rep, m, tag): n_vcs = walk_model (rep, tag, m) derivs = set (('CountTrailingZeroes', 'CountLeadingZeroes', 'WordReverse')) def get_derivs (node): dvs = set () def visit (expr): if expr.is_op (derivs): dvs.add (expr) node.visit (lambda x: (), visit) return dvs for (n, vc) in n_vcs: if n not in rep.p.nodes: continue dvs = get_derivs (rep.p.nodes[n]) if not dvs: continue print '%s:' % (rep.node_count_name ((n, vc))) for dv in dvs: [x] = dv.vals x = rep.to_smt_expr (x, (n, vc)) x = eval_str (x, {}, rep.solv, m) print '\t%s: %s' % (dv.name, x) def check_pairings (): for p in pairings.itervalues (): print p['C'], p['ASM'] as_args = functions[p['ASM']].inputs c_args = functions[p['C']].inputs print as_args, c_args logic.mk_fun_inp_eqs (as_args, c_args, True) def loop_var_deps (p): return [(n, [v for v in p.var_deps[n] if p.var_deps[n][v] == 'LoopVariable']) for n in p.loop_data] def find_symbol (n, output = True): from target_objects import symbols, sections symbs = [] secs = [] if output: def p (s): print s else: p = lambda s: () for (s, (addr, size, _)) in symbols.iteritems (): if addr <= n and n < addr + size: symbs.append (s) p ('%x in %s (%x - %x)' % (n, s, addr, addr + size - 1)) for (s, (start, end)) in sections.iteritems (): if start <= n and n <= end: secs.append (s) p ('%x in section %s (%x - %x)' % (n, s, start, end)) return (symbs, secs) def assembly_point (p, n): (_, hints) = p.node_tags[n] if type (hints) != tuple or not logic.is_int (hints[1]): return None while p.node_tags[n][1][1] % 4 != 0: [n] = p.preds[n] return p.node_tags[n][1][1] def assembly_points (p, ns): ns = [assembly_point (p, n) for n in ns] ns = [n for n in ns if n != None] return ns def disassembly_lines (addrs): f = open ('%s/kernel.elf.txt' % target_objects.target_dir) addr_set = set (['%x' % addr for addr in addrs]) ss = [l.strip () for l in f if ':' in l and l.split(':', 1)[0] in addr_set] return ss def disassembly (p, n): if hasattr (n, '__iter__'): ns = set (n) else: ns = [n] addrs = sorted (set ([assembly_point (p, n) for n in ns]) - set ([None])) print 'asm %s' % ', '.join (['0x%x' % addr for addr in addrs]) for s in disassembly_lines (addrs): print s def disassembly_loop (p, n): head = p.loop_id (n) loop = p.loop_body (n) ns = sorted (set (assembly_points (p, loop))) entries = assembly_points (p, [n for n in p.preds[head] if n not in loop]) print 'Loop: [%s]' % ', '.join (['%x' % addr for addr in ns]) for s in disassembly_lines (ns): print s print 'entry from %s' % ', '.join (['%x' % addr for addr in entries]) for s in disassembly_lines (entries): print s def try_interpret_hyp (rep, hyp): try: expr = rep.interpret_hyp (hyp) solver.smt_expr (expr, {}, rep.solv) return None except: return ('Broken Hyp', hyp) def check_checks (): p = problem.last_problem[0] rep = rep_graph.mk_graph_slice (p) proof = search.last_proof[0] checks = check.proof_checks (p, proof) all_hyps = set ([hyp for (_, hyp, _) in checks] + [hyp for (hyps, _, _) in checks for hyp in hyps]) results = [try_interpret_hyp (rep, hyp) for hyp in all_hyps] return [r[1] for r in results if r] def proof_failed_groups (p = None, proof = None): if p == None: p = problem.last_problem[0] if proof == None: proof = search.last_proof[0] checks = check.proof_checks (p, proof) groups = check.proof_check_groups (checks) failed = [] for group in groups: rep = rep_graph.mk_graph_slice (p) (res, el) = check.test_hyp_group (rep, group) if not res: failed.append (group) print 'Failed element: %s' % el failed_nms = set ([s for group in failed
ts_acquiring = Cpt(EpicsSignal, "TSAcquiring", string=True, doc="0='Done' 1='Acquiring'") ts_control = Cpt(EpicsSignal, "TSControl", string=True, doc="0=Erase/Start 1=Start 2=Stop 3=Read") ts_current_point = Cpt(EpicsSignal, "TSCurrentPoint") ts_num_points = Cpt(EpicsSignal, "TSNumPoints") ts_read = Cpt(EpicsSignal, "TSRead") class ROIStatPlugin_V25(PluginBase_V25, ROIStatPlugin_V23, version=(2, 5), version_of=ROIStatPlugin): ... class ROIStatPlugin_V26(PluginBase_V26, ROIStatPlugin_V25, version=(2, 6), version_of=ROIStatPlugin): ... class ROIStatPlugin_V31(PluginBase_V31, ROIStatPlugin_V26, version=(3, 1), version_of=ROIStatPlugin): ... class ROIStatPlugin_V33(PluginBase_V33, ROIStatPlugin_V31, version=(3, 3), version_of=ROIStatPlugin): ... class ROIStatPlugin_V34(PluginBase_V34, ROIStatPlugin_V33, version=(3, 4), version_of=ROIStatPlugin): ... # --- NDROIStatN --- class ROIStatNPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" ... class ROIStatNPlugin_V22(ROIStatNPlugin, version=(2, 2), version_of=ROIStatNPlugin): bgd_width = Cpt(SignalWithRBV, "BgdWidth") max_value = Cpt(EpicsSignalRO, "MaxValue_RBV") mean_value = Cpt(EpicsSignalRO, "MeanValue_RBV") min_value = Cpt(EpicsSignalRO, "MinValue_RBV") name_ = Cpt(EpicsSignal, "Name", string=True) net = Cpt(EpicsSignalRO, "Net_RBV") reset = Cpt(EpicsSignal, "Reset", string=True, doc="") total = Cpt(EpicsSignalRO, "Total_RBV") use = Cpt(SignalWithRBV, "Use", string=True, doc="0='No' 1='Yes'") max_size = DDC_EpicsSignalRO( ("x", "MaxSizeX_RBV"), ("y", "MaxSizeY_RBV"), doc="max_size" ) min_ = DDC_SignalWithRBV( ("x", "MinX"), ("y", "MinY"), doc="min" ) size = DDC_SignalWithRBV( ("x", "SizeX"), ("y", "SizeY"), doc="size" ) class ROIStatNPlugin_V23(ROIStatNPlugin_V22, version=(2, 3), version_of=ROIStatNPlugin): ts_max_value = Cpt(EpicsSignal, "TSMaxValue") ts_mean_value = Cpt(EpicsSignal, "TSMeanValue") ts_min_value = Cpt(EpicsSignal, "TSMinValue") ts_net = Cpt(EpicsSignal, "TSNet") ts_total = Cpt(EpicsSignal, "TSTotal") class ROIStatNPlugin_V25(ROIStatNPlugin_V23, version=(2, 5), version_of=ROIStatNPlugin): ts_timestamp = Cpt(EpicsSignal, "TSTimestamp") # --- NDStats --- class StatsPlugin_V20(PluginBase_V20, StatsPlugin, version=(2, 0), version_of=StatsPlugin): ... class StatsPlugin_V22(PluginBase_V22, StatsPlugin_V20, version=(2, 2), version_of=StatsPlugin): hist_entropy = Cpt(SignalWithRBV, "HistEntropy") max_value = Cpt(SignalWithRBV, "MaxValue") mean_value = Cpt(SignalWithRBV, "MeanValue") min_value = Cpt(SignalWithRBV, "MinValue") net = Cpt(SignalWithRBV, "Net") reset = Cpt(EpicsSignal, "Reset") resets = DDC_EpicsSignal(("reset1", "Reset1"), doc="reset") sigma_value = Cpt(EpicsSignal, "SigmaValue") sigma_readout = Cpt(EpicsSignalRO, "Sigma_RBV") sigma_xy = Cpt(SignalWithRBV, "SigmaXY") total = Cpt(SignalWithRBV, "Total") max_ = DDC_SignalWithRBV( ("x", "MaxX"), ("y", "MaxY"), doc="max" ) min_ = DDC_SignalWithRBV( ("x", "MinX"), ("y", "MinY"), doc="min" ) sigma = DDC_SignalWithRBV( ("x", "SigmaX"), ("y", "SigmaY"), doc="sigma" ) # Changed type to SignalWithRBV in R2-2: centroid = DDC_SignalWithRBV( ('x', 'CentroidX'), ('y', 'CentroidY'), doc='The centroid XY' ) color_mode = Cpt(SignalWithRBV, 'ColorMode') data_type = Cpt(SignalWithRBV, 'DataType', string=True) class StatsPlugin_V25(PluginBase_V25, StatsPlugin_V22, version=(2, 5), version_of=StatsPlugin): ts_timestamp = Cpt(EpicsSignal, "TSTimestamp") class StatsPlugin_V26(PluginBase_V26, StatsPlugin_V25, version=(2, 6), version_of=StatsPlugin): centroid_total = Cpt(SignalWithRBV, "CentroidTotal") eccentricity = Cpt(SignalWithRBV, "Eccentricity") hist_above = Cpt(SignalWithRBV, "HistAbove") hist_below = Cpt(SignalWithRBV, "HistBelow") orientation = Cpt(SignalWithRBV, "Orientation") resets = DDC_EpicsSignal( ("reset1", "Reset1"), ("reset2", "Reset2"), doc="reset" ) ts_centroid_total = Cpt(EpicsSignal, "TSCentroidTotal") ts_eccentricity = Cpt(EpicsSignal, "TSEccentricity") ts_orientation = Cpt(EpicsSignal, "TSOrientation") kurtosis = DDC_SignalWithRBV( ("x", "KurtosisX"), ("y", "KurtosisY"), doc="kurtosis" ) skew = DDC_SignalWithRBV( ("x", "SkewX"), ("y", "SkewY"), doc="skew" ) ts_kurtosis = DDC_EpicsSignal( ("x", "TSKurtosisX"), ("y", "TSKurtosisY"), doc="ts_kurtosis" ) ts_skew = DDC_EpicsSignal( ("x", "TSSkewX"), ("y", "TSSkewY"), doc="ts_skew" ) class StatsPlugin_V31(PluginBase_V31, StatsPlugin_V26, version=(3, 1), version_of=StatsPlugin): ... class StatsPlugin_V32(StatsPlugin_V31, version=(3, 2), version_of=StatsPlugin): histogram_x = Cpt(EpicsSignalRO, "HistogramX_RBV") class StatsPlugin_V33(PluginBase_V33, StatsPlugin_V32, version=(3, 3), version_of=StatsPlugin): ts_acquiring = None # REMOVED ts_control = None # REMOVED ts_current_point = None # REMOVED ts_num_points = None # REMOVED ts_read = None # REMOVED ts_sigma_x = DDC_EpicsSignal( ("ts_sigma_x", "TSSigmaX"), ("ts_sigma_y", "TSSigmaY"), doc="ts_sigma") class StatsPlugin_V34(PluginBase_V34, StatsPlugin_V33, version=(3, 4), version_of=StatsPlugin): ... # --- NDFileTIFF --- class TIFFPlugin_V20(FilePlugin_V20, TIFFPlugin, version=(2, 0), version_of=TIFFPlugin): ... class TIFFPlugin_V21(FilePlugin_V21, TIFFPlugin_V20, version=(2, 1), version_of=TIFFPlugin): ... class TIFFPlugin_V22(FilePlugin_V22, TIFFPlugin_V21, version=(2, 2), version_of=TIFFPlugin): ... class TIFFPlugin_V25(FilePlugin_V25, TIFFPlugin_V22, version=(2, 5), version_of=TIFFPlugin): ... class TIFFPlugin_V26(FilePlugin_V26, TIFFPlugin_V25, version=(2, 6), version_of=TIFFPlugin): ... class TIFFPlugin_V31(FilePlugin_V31, TIFFPlugin_V26, version=(3, 1), version_of=TIFFPlugin): ... class TIFFPlugin_V33(FilePlugin_V33, TIFFPlugin_V31, version=(3, 3), version_of=TIFFPlugin): ... class TIFFPlugin_V34(FilePlugin_V34, TIFFPlugin_V33, version=(3, 4), version_of=TIFFPlugin): ... # --- NDTransform --- class TransformPlugin_V20(PluginBase_V20, TransformPlugin, version=(2, 0), version_of=TransformPlugin): array_size = DDC_SignalWithRBV( ("array_size0", "ArraySize0"), ("array_size1", "ArraySize1"), ("array_size2", "ArraySize2"), doc='Array size', ) class TransformPlugin_V21(TransformPlugin_V20, version=(2, 1), version_of=TransformPlugin): name_ = None # REMOVED origin_location = None # REMOVED t1_max_size = None # REMOVED DDC t2_max_size = None # REMOVED DDC t3_max_size = None # REMOVED DDC t4_max_size = None # REMOVED DDC types = None # REMOVED DDC width = None # Removed array_size portions height = None # Removed array_size portions depth = None # Removed array_size portions type_ = Cpt(EpicsSignal, 'Type', string=True, doc="0=None 1=Rot90 2=Rot180 3=Rot270 4=Mirror 5=Rot90Mirror 6=Rot180Mirror 7=Rot270Mirror") array_size = DDC_EpicsSignalRO( ("array_size0", "ArraySize0_RBV"), ("array_size1", "ArraySize1_RBV"), ("array_size2", "ArraySize2_RBV"), doc='Array size', ) class TransformPlugin_V22(PluginBase_V22, TransformPlugin_V21, version=(2, 2), version_of=TransformPlugin): ... class TransformPlugin_V25(PluginBase_V25, TransformPlugin_V22, version=(2, 5), version_of=TransformPlugin): ... class TransformPlugin_V26(PluginBase_V26, TransformPlugin_V25, version=(2, 6), version_of=TransformPlugin): ... class TransformPlugin_V31(PluginBase_V31, TransformPlugin_V26, version=(3, 1), version_of=TransformPlugin): ... class TransformPlugin_V33(PluginBase_V33, TransformPlugin_V31, version=(3, 3), version_of=TransformPlugin): ... class TransformPlugin_V34(PluginBase_V34, TransformPlugin_V33, version=(3, 4), version_of=TransformPlugin): ... # --- NDPva --- @register_plugin class PvaPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" _default_suffix = 'Pva1:' _suffix_re = r'Pva\d:' _plugin_type = 'NDPluginPva' class PvaPlugin_V25(PluginBase_V25, PvaPlugin, version=(2, 5), version_of=PvaPlugin): pv_name = Cpt(EpicsSignalRO, "PvName_RBV") class PvaPlugin_V26(PluginBase_V26, PvaPlugin_V25, version=(2, 6), version_of=PvaPlugin): ... class PvaPlugin_V31(PluginBase_V31, PvaPlugin_V26, version=(3, 1), version_of=PvaPlugin): ... class PvaPlugin_V33(PluginBase_V33, PvaPlugin_V31, version=(3, 3), version_of=PvaPlugin): ... class PvaPlugin_V34(PluginBase_V34, PvaPlugin_V33, version=(3, 4), version_of=PvaPlugin): ... # --- NDFFT --- @register_plugin class FFTPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" ... _default_suffix = 'FFT1:' _suffix_re = r'FFT\d:' _plugin_type = 'NDPluginFFT' class FFTPlugin_V25(PluginBase_V25, FFTPlugin, version=(2, 5), version_of=FFTPlugin): fft_abs_value = Cpt(EpicsSignal, "FFTAbsValue") fft_direction = Cpt(SignalWithRBV, "FFTDirection", string=True, doc="0='Time to freq.' 1='Freq. to time'") fft_freq_axis = Cpt(EpicsSignal, "FFTFreqAxis") fft_imaginary = Cpt(EpicsSignal, "FFTImaginary") fft_num_average = Cpt(SignalWithRBV, "FFTNumAverage") fft_num_averaged = Cpt(EpicsSignal, "FFTNumAveraged") fft_real = Cpt(EpicsSignal, "FFTReal") fft_reset_average = Cpt(EpicsSignal, "FFTResetAverage", string=True, doc="0='Done' 1='Reset'") fft_suppress_dc = Cpt(SignalWithRBV, "FFTSuppressDC", string=True, doc="0='Disable' 1='Enable'") fft_time_axis = Cpt(EpicsSignal, "FFTTimeAxis") fft_time_per_point = Cpt(SignalWithRBV, "FFTTimePerPoint") fft_time_per_point_link = Cpt(EpicsSignal, "FFTTimePerPointLink") fft_time_series = Cpt(EpicsSignal, "FFTTimeSeries") name_ = Cpt(EpicsSignal, "Name", string=True) class FFTPlugin_V26(PluginBase_V26, FFTPlugin_V25, version=(2, 6), version_of=FFTPlugin): ... class FFTPlugin_V31(PluginBase_V31, FFTPlugin_V26, version=(3, 1), version_of=FFTPlugin): ... class FFTPlugin_V33(PluginBase_V33, FFTPlugin_V31, version=(3, 3), version_of=FFTPlugin): ... class FFTPlugin_V34(PluginBase_V34, FFTPlugin_V33, version=(3, 4), version_of=FFTPlugin): ... # --- NDScatter --- @register_plugin class ScatterPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" _default_suffix = 'Scatter1:' _suffix_re = r'Scatter\d:' _plugin_type = 'NDPluginScatter' class ScatterPlugin_V31(PluginBase_V31, ScatterPlugin, version=(3, 1), version_of=ScatterPlugin): scatter_method = Cpt(SignalWithRBV, "ScatterMethod", string=True, doc="0='Round robin'") class ScatterPlugin_V32(ScatterPlugin_V31, version=(3, 2), version_of=ScatterPlugin): ... class ScatterPlugin_V33(PluginBase_V33, ScatterPlugin_V32, version=(3, 3), version_of=ScatterPlugin): ... class ScatterPlugin_V34(PluginBase_V34, ScatterPlugin_V33, version=(3, 4), version_of=ScatterPlugin): ... # --- NDPosPlugin --- @register_plugin class PosPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" _default_suffix = 'Pos1:' _suffix_re = r'Pos\d:' _plugin_type = 'NDPosPlugin' class PosPluginPlugin_V25(PluginBase_V25, PosPlugin, version=(2, 5), version_of=PosPlugin): delete = Cpt(EpicsSignal, "Delete", string=True, doc="") duplicate = Cpt(SignalWithRBV, "Duplicate") expected_id = Cpt(EpicsSignalRO, "ExpectedID_RBV") file_valid = Cpt(EpicsSignalRO, "FileValid_RBV", string=True, doc="0='No' 1='Yes'") filename = Cpt(SignalWithRBV, "Filename") id_difference = Cpt(SignalWithRBV, "IDDifference") id_name = Cpt(SignalWithRBV, "IDName", string=True) id_start = Cpt(SignalWithRBV, "IDStart") index = Cpt(EpicsSignalRO, "Index_RBV") missing = Cpt(SignalWithRBV, "Missing") mode = Cpt(SignalWithRBV, "Mode", string=True, doc="0='Discard' 1='Keep'") position_ = Cpt(EpicsSignalRO, "Position_RBV", string=True) qty = Cpt(EpicsSignalRO, "Qty_RBV") reset = Cpt(EpicsSignal, "Reset", string=True, doc="") running = Cpt(SignalWithRBV, "Running") class PosPluginPlugin_V26(PluginBase_V26, PosPluginPlugin_V25, version=(2, 6), version_of=PosPlugin): ... class PosPluginPlugin_V31(PluginBase_V31, PosPluginPlugin_V26, version=(3, 1), version_of=PosPlugin): ... class PosPluginPlugin_V33(PluginBase_V33, PosPluginPlugin_V31, version=(3, 3), version_of=PosPlugin): ... class PosPluginPlugin_V34(PluginBase_V34, PosPluginPlugin_V33, version=(3, 4), version_of=PosPlugin): ... # --- NDCircularBuff --- @register_plugin class CircularBuffPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" _default_suffix = 'CB1:' _suffix_re = r'CB\d:' _plugin_type = 'NDPluginCircularBuff' class CircularBuffPlugin_V22(PluginBase_V22, CircularBuffPlugin, version=(2, 2), version_of=CircularBuffPlugin): actual_trigger_count = Cpt(EpicsSignalRO, "ActualTriggerCount_RBV") capture = Cpt(SignalWithRBV, "Capture") current_qty = Cpt(EpicsSignalRO, "CurrentQty_RBV") post_count = Cpt(SignalWithRBV, "PostCount") post_trigger_qty = Cpt(EpicsSignalRO, "PostTriggerQty_RBV") pre_count = Cpt(SignalWithRBV, "PreCount") preset_trigger_count = Cpt(SignalWithRBV, "PresetTriggerCount") status_message = Cpt(EpicsSignal, "StatusMessage", string=True) trigger_ = Cpt(SignalWithRBV, "Trigger") trigger_a = Cpt(SignalWithRBV, "TriggerA", string=True) trigger_a_val = Cpt(EpicsSignal, "TriggerAVal") trigger_b = Cpt(SignalWithRBV, "TriggerB", string=True) trigger_b_val = Cpt(EpicsSignal, "TriggerBVal") trigger_calc = Cpt(SignalWithRBV, "TriggerCalc") trigger_calc_val = Cpt(EpicsSignal, "TriggerCalcVal") array_size_xyz = DDC_EpicsSignalRO( ("array_size_x", "ArraySizeX_RBV"), ("array_size_y", "ArraySizeY_RBV"), ("array_size_z", "ArraySizeZ_RBV"), ) class CircularBuffPlugin_V25( PluginBase_V25, CircularBuffPlugin_V22, version=(2, 5), version_of=CircularBuffPlugin ): ... class CircularBuffPlugin_V26( PluginBase_V26, CircularBuffPlugin_V25, version=(2, 6), version_of=CircularBuffPlugin ): ... class CircularBuffPlugin_V31( PluginBase_V31, CircularBuffPlugin_V26, version=(3, 1), version_of=CircularBuffPlugin ): ... class CircularBuffPlugin_V33( PluginBase_V33, CircularBuffPlugin_V31, version=(3, 3), version_of=CircularBuffPlugin ): ... class CircularBuffPlugin_V34( PluginBase_V34, CircularBuffPlugin_V33, version=(3, 4), version_of=CircularBuffPlugin ): flush_on_soft_trigger = Cpt( SignalWithRBV, "FlushOnSoftTrg", string=True, doc="0='OnNewImage' 1='Immediately'" ) # --- NDAttributeN --- class AttributeNPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" ... class AttributeNPlugin_V22(AttributeNPlugin, version=(2, 2), version_of=AttributeNPlugin): attribute_name = Cpt(SignalWithRBV, "AttrName") ts_array_value = Cpt(EpicsSignal, "TSArrayValue") value_sum = Cpt(EpicsSignalRO, "ValueSum_RBV") value = Cpt(EpicsSignalRO, "Value_RBV") class AttributeNPlugin_V26(AttributeNPlugin_V22, version=(2, 6), version_of=AttributeNPlugin): ... # --- NDAttrPlot --- class AttrPlotPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" _plugin_type = 'NDAttrPlot' class AttrPlotPlugin_V31(PluginBase_V31, AttrPlotPlugin, version=(3, 1), version_of=AttrPlotPlugin): npts = Cpt(EpicsSignal, "NPts") reset = Cpt(EpicsSignal, "Reset") class AttrPlotPlugin_V33(PluginBase_V33, AttrPlotPlugin_V31, version=(3, 3), version_of=AttrPlotPlugin): ... class AttrPlotPlugin_V34(PluginBase_V34, AttrPlotPlugin_V33, version=(3, 4), version_of=AttrPlotPlugin): ... # --- NDTimeSeriesN --- class TimeSeriesNPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" ... class TimeSeriesNPlugin_V25(TimeSeriesNPlugin, version=(2, 5), version_of=TimeSeriesNPlugin): name_ = Cpt(EpicsSignal, "Name", string=True) time_series = Cpt(EpicsSignal, "TimeSeries") # --- NDTimeSeries --- @register_plugin class TimeSeriesPlugin(Device, version_type='ADCore'): "Serves as a base class for other versions" _plugin_type = 'NDPluginTimeSeries' class TimeSeriesPlugin_V25(PluginBase_V25, TimeSeriesPlugin, version=(2, 5), version_of=TimeSeriesPlugin): ts_acquire = Cpt(EpicsSignal, "TSAcquire") ts_acquire_mode = Cpt( SignalWithRBV, "TSAcquireMode", string=True, doc="0='Fixed length' 1='Circ. buffer'" ) ts_acquiring = Cpt(EpicsSignal, "TSAcquiring", string=True, doc="0='Done' 1='Acquiring'")
aws, return the IP address for you "free" of charge # as part of the instance information for the VM. This might be returned # only after the VM creation has been completed. # # This function is genericly called after the VM has been found to be # running, to either simply verify that we have a valid IP address in # the first case above, or to ask the CSP for it and then verify it # in the second case. # # public IP value will be in args.vm_id # # This function can do other cross-checks to validate other setups like # checking if the SSH key-name returned from the CSP is the same as we # sent it. Checks like this are optional, but highly desirable. # # Returns: 0 success # 1 fails, invalid IP or can't get it # def GetIPSetupCorrectly(self, args): ''' called after 'running' status to get IP. Does nothing for Alibaba ''' debug(1, "ip: %s keyname: \"%s\"" % (args.vm_ip, args.key_name)) # Very CSP specific - may be picked up in CreateVM args.vm_ip = "1-2-3-4-imaginary.fake.com" # main responsibilty of this function return 0 ############################################################################## # CSP specific Network Security Group Functions # # ShowSecurityGroups Displays NSG (network security groups) in region # ExistingSecurityGroup Does NSG exist? # CreateSecurityGroup Creates a NSG from a name, and adds rules # DeleteSecurityGroup Deletes a NSG ############################################################################## ############################################################################## # ShowSecurityGroups # # This function shows basic information about your account's security groups # for your region. # # Intended to be informative only, as each CSP will probably supply different # type of information. # # Returns: 0 one or more Netwroks Security Groups found in region # 1 error, or no NSG's defined in region # def ShowSecurityGroups(self, args): ''' Displays all current security groups ''' # dummy list of groups to have something to display output = [] output.append({ "GroupId":"sg_dummy_1", "GroupName":"NSG_Dummy1", "Description":"Desc of Dummy1" }) output.append({ "GroupId":"sg_dummy_2", "GroupName":"NSG_Dummy2", "Description":"Desc of Dummy2" }) output.append({ "GroupId":"sg_dummy_3", "GroupName":"NSG_Dummy3", "Description":"Desc of Dummy3" }) # Have a list of security groups. display them items = len(output) for idx in range(0, items): print "%2d %-12s \"%s\" \"%s\"" % (idx, output[idx]["GroupId"], output[idx]["GroupName"], output[idx]["Description"]) if (items == 0): return 1 # no NSG's found else: return 0 # 1 or more NSG's found ############################################################################## # ExistingSecurityGroup # # Given a name of a security group in args.nsg_name, this function sees # if it currently exists on the CSP # # This entire application is written assuming that once a security group is # created, it doesn't need to really change much for the lifetime of the # universe. Therefor we don't delete them unless specificly asked for # # The purpose of this function is to decide if we need to create a Network # Security Group, or to return the id of that existing group in args.nsg_id # # Returns: 0 if security group args.nsg_name currently exists and is valid # 1 need to create a group # def ExistingSecurityGroup(self, args): ''' Does the security group name currently exist ? get it if it does''' trace(2, "\"%s\"" % (args.nsg_name)) if (args.nsg_name == "" or args.nsg_name == None or args.nsg_name == "None"): error("NetworkSecurityGroup name is \"%s\"" % args.nsg_name) return 1 args.nsg_id=None # if set, we know it exists. trace(2, "Did not find security group: \"%s\"" % args.nsg_name) return 1 ############################################################################## # CreateSecurityGroup # # Creates a full network security group by the name of args.nsg_name, saves the # value in args.nsg_id # # Any additional rules required for the security group to set up ssh, ssl and # ping are added to the group here before it is returned. # # If the CSP has object-taging feature, the new security group should be # tagged with a unique name so it can be identified later. # # IMPORTANT: if you can create a rule to make the VM pingable (a good thing # for initial development), be sure to call following in ArgOptions # so that the ping feature will be used when needed by this app # # "parser.set_defaults(pingable=1)" # def CreateSecurityGroup(self, args): ''' creates security group. saves it in args.nsg_id ''' trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id)) # CSP_Specific_CreateNSG(args.nsg_name, ...) rc = 0 time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep! if (rc != 0): # check for return code error ("Problems creating VM \"%s\"" % args.vm_name) return rc # get the NSG id of the new security group args.nsg_id = "sg_FakeNSGID" debug(1, "args.nsg_id <--- %s" % args.nsg_id) # tag the NSG id if needed (CSP specific) # CSP_Specific_TagGroup(args.nsg_id, args.nsg_name) # Security rules -- make a list of ingress and outgress rules - easy to change # slow, but this code is rarely used. understandability is more important # note unlike aws/alibaba ingress/outgress both in same rule set - as "Direction" field # Rule priority, between 100 (highest priority) and 4096 (lowest priority). Must be unique for each rule in the collection # TBD: rule for pinging? -- for aws/alibaba - this is a 'icmp' rule. Not allowed here # # The actual fields required in this table will be CSP specific rule = {} rule[0] = {"Direction":"Inbound", "Name":"AllowSSH", "IpProtocol":"tcp", "ToPort":22, "FromPort":22, "Priority":1000, "Description":"For SSH" } rule[1] = {"Direction":"Inbound", "Name":"HTTPS-in", "IpProtocol":"tcp", "ToPort":443, "FromPort":443, "Priority":1010, "Description":"For SSL" } rule[2] = {"Direction":"Outbound", "Name":"HTTPS-out", "IpProtocol":"tcp", "ToPort":443, "FromPort":443, "Priority":110, "Description":"For SSL" } rule[3] = {"Direction":"Inbound", "Name":"DIGITS6", "IpProtocol":"tcp", "ToPort":5000, "FromPort":5000,"Priority":1020, "Description":"For NVIDIA DIGITS6" } # rule[1] = {"Name":"Ping", "IpProtocol":"icmp","ToPort":-1, "FromPort":8, "Priority":2000, "Description":"To allow to be pinged" } outer_retcode = 0 for idx in range(0, len(rule)): self.Inform("CreateNSG rule %s.%s" %(args.nsg_name, rule[idx]["Name"]) ) time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep! self.Inform(" ") return outer_retcode ############################################################################## # DeleteSecurityGroup # # Delets the security group specified at args.nsg_id, and clears that value # # If group Rules attached to the NSG need to be individually deleted, that # must also be done here if not done automaticly by the CSP # def DeleteSecurityGroup(self, args): ''' deletes the security group ''' trace(2, "\"%s\" %s" % (args.nsg_name, args.nsg_id)) if (args.nsg_id == None): error("NSG %s already deleted", args.nsg_name) return(1) # CSP_Specific_DeleteNSG(args.nsg_id) rc = 0 time.sleep(1) # TEMPLATE DEVELOPMENT CODE - remove this sleep! args.nsg_id = None # remove id from args return(rc) ############################################################################## # CSP specific VM functions # # CreateVM Creates a complete fully running VM # StartVM Starts a VM if it was stopped, returns running # StopVM Stops the VM if it is currently running # RestartVM Resets VM, may not quite be same as Stop/Start # DeleteVM Removes from the CSP a running or stopped VM ############################################################################## ############################################################################## # CreateVM # # Creates a new VM, and returns when it is fully running. # # Note that due to simple way that this code saves it's peristent # data (the id, user name, ... ), only 1 instance can be created # at a time. Nothing preventing multiple VM's other than way to save/reference # the id values. The CSPClass.Delete function removes the saved references # # The "args" option specify the CSP specific name, disk size, instance type, # or any other parameter required to fully define the VM that is to be created # # Before creating the VM, effort is made to verify that all the supplied # parameters, such as the SSH key name are valid. # # Network Security Group (NSG) is created if needed. # # Returns: 0 successful, VM fully created, up and ssh-able # 1 failure, VM not created
<reponame>ashantanu/CMC """ Training MoCo and Instance Discrimination InsDis: Unsupervised feature learning via non-parametric instance discrimination MoCo: Momentum Contrast for Unsupervised Visual Representation Learning """ from __future__ import print_function import os import sys import time import torch import torch.backends.cudnn as cudnn import argparse import socket import tensorboard_logger as tb_logger from torchvision import transforms, datasets from util import adjust_learning_rate, AverageMeter from models.resnet import InsResNet50 from models.resnet import InsResNet12 from NCE.NCEAverage import MemoryInsDis from NCE.NCEAverage import MemoryMoCo from NCE.NCECriterion import NCECriterion from NCE.NCECriterion import NCESoftmaxLoss from dataset import ImageFolderInstance try: from apex import amp, optimizers except ImportError: pass """ TODO: python 3.6 ModuleNotFoundError """ def parse_option(): hostname = socket.gethostname() parser = argparse.ArgumentParser('argument for training') parser.add_argument('--print_freq', type=int, default=10, help='print frequency') parser.add_argument('--tb_freq', type=int, default=500, help='tb frequency') parser.add_argument('--save_freq', type=int, default=10, help='save frequency') parser.add_argument('--batch_size', type=int, default=128, help='batch_size') parser.add_argument('--num_workers', type=int, default=18, help='num of workers to use') parser.add_argument('--epochs', type=int, default=240, help='number of training epochs') parser.add_argument('--prefix', type=str, default='', help='prefix for folder name') # optimization parser.add_argument('--learning_rate', type=float, default=0.03, help='learning rate') parser.add_argument('--lr_decay_epochs', type=str, default='120,160,200', help='where to decay lr, can be a list') parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate') parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam') parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam') parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay') parser.add_argument('--momentum', type=float, default=0.9, help='momentum') # crop parser.add_argument('--crop', type=float, default=0.2, help='minimum crop') # dataset parser.add_argument('--dataset', type=str, default='imagenet100', choices=[ 'miniimagenet','imagenet100', 'imagenet']) parser.add_argument('--data_folder', type=str, default='../miniimagenettools/processed_images/') # resume parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') # augmentation setting parser.add_argument('--aug', type=str, default='CJ', choices=['NULL', 'CJ']) # warm up parser.add_argument('--warm', action='store_true', help='add warm-up setting') parser.add_argument('--amp', action='store_true', help='using mixed precision') parser.add_argument('--opt_level', type=str, default='O2', choices=['O1', 'O2']) # model definition parser.add_argument('--model', type=str, default='resnet50', choices=['resnet12', 'resnet50', 'resnet50x2', 'resnet50x4']) # loss function parser.add_argument('--softmax', action='store_true', help='using softmax contrastive loss rather than NCE') parser.add_argument('--nce_k', type=int, default=16384) parser.add_argument('--nce_t', type=float, default=0.07) parser.add_argument('--nce_m', type=float, default=0.5) # memory setting parser.add_argument('--moco', action='store_true', help='using MoCo (otherwise Instance Discrimination)') parser.add_argument('--alpha', type=float, default=0.999, help='exponential moving average weight') # GPU setting parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.') opt = parser.parse_args() # set the path according to the environment opt.model_path = './{}_{}_models'.format(opt.prefix, opt.dataset) opt.tb_path = './{}_{}_tensorboard'.format(opt.prefix, opt.dataset) if opt.dataset == 'imagenet': if 'alexnet' not in opt.model: opt.crop = 0.08 iterations = opt.lr_decay_epochs.split(',') opt.lr_decay_epochs = list([]) for it in iterations: opt.lr_decay_epochs.append(int(it)) opt.method = 'softmax' if opt.softmax else 'nce' prefix = '84_MoCo{}'.format(opt.alpha) if opt.moco else 'InsDis' opt.model_name = '{}_{}_{}_{}_lr_{}_decay_{}_bsz_{}_crop_{}'.format(prefix, opt.method, opt.nce_k, opt.model, opt.learning_rate, opt.weight_decay, opt.batch_size, opt.crop) if opt.warm: opt.model_name = '{}_warm'.format(opt.model_name) if opt.amp: opt.model_name = '{}_amp_{}'.format(opt.model_name, opt.opt_level) opt.model_name = '{}_aug_{}'.format(opt.model_name, opt.aug) opt.model_folder = os.path.join(opt.model_path, opt.model_name) if not os.path.isdir(opt.model_folder): os.makedirs(opt.model_folder) opt.tb_folder = os.path.join(opt.tb_path, opt.model_name) if not os.path.isdir(opt.tb_folder): os.makedirs(opt.tb_folder) return opt def moment_update(model, model_ema, m): """ model_ema = m * model_ema + (1 - m) model """ for p1, p2 in zip(model.parameters(), model_ema.parameters()): p2.data.mul_(m).add_(1-m, p1.detach().data) def get_shuffle_ids(bsz): """generate shuffle ids for ShuffleBN""" forward_inds = torch.randperm(bsz).long().cuda() backward_inds = torch.zeros(bsz).long().cuda() value = torch.arange(bsz).long().cuda() backward_inds.index_copy_(0, forward_inds, value) return forward_inds, backward_inds def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def main(): args = parse_option() if args.gpu is not None: print("Use GPU: {} for training".format(args.gpu)) # set the data loader data_folder = os.path.join(args.data_folder, 'train') image_size = 84 mean = [120.39586422 / 255.0, 115.59361427 / 255.0, 104.54012653 / 255.0] std = [70.68188272 / 255.0, 68.27635443 / 255.0, 72.54505529 / 255.0] normalize = transforms.Normalize(mean=mean, std=std) if args.aug == 'NULL': train_transform = transforms.Compose([ transforms.RandomResizedCrop(image_size, scale=(args.crop, 1.)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) elif args.aug == 'CJ': train_transform = transforms.Compose([ transforms.RandomResizedCrop(image_size, scale=(args.crop, 1.)), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) else: raise NotImplemented('augmentation not supported: {}'.format(args.aug)) train_dataset = ImageFolderInstance(data_folder, transform=train_transform, two_crop=args.moco) print(len(train_dataset)) train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.num_workers, pin_memory=True, sampler=train_sampler) # create model and optimizer n_data = len(train_dataset) if args.model == 'resnet12': model = InsResNet12() if args.moco: model_ema = InsResNet12() elif args.model == 'resnet50': model = InsResNet50() if args.moco: model_ema = InsResNet50() elif args.model == 'resnet50x2': model = InsResNet50(width=2) if args.moco: model_ema = InsResNet50(width=2) elif args.model == 'resnet50x4': model = InsResNet50(width=4) if args.moco: model_ema = InsResNet50(width=4) else: raise NotImplementedError('model not supported {}'.format(args.model)) print("Number of Params = ",count_parameters(model)) # print(model) # copy weights from `model' to `model_ema' if args.moco: moment_update(model, model_ema, 0) # set the contrast memory and criterion if args.moco: contrast = MemoryMoCo(128, n_data, args.nce_k, args.nce_t, args.softmax).cuda(args.gpu) print("Params for MemoryMoCo - ", args.nce_k, args.nce_t) else: contrast = MemoryInsDis(128, n_data, args.nce_k, args.nce_t, args.nce_m, args.softmax).cuda(args.gpu) criterion = NCESoftmaxLoss() if args.softmax else NCECriterion(n_data) criterion = criterion.cuda(args.gpu) model = model.cuda() if args.moco: model_ema = model_ema.cuda() optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay) cudnn.benchmark = True if args.amp: model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level) if args.moco: optimizer_ema = torch.optim.SGD(model_ema.parameters(), lr=0, momentum=0, weight_decay=0) model_ema, optimizer_ema = amp.initialize(model_ema, optimizer_ema, opt_level=args.opt_level) # optionally resume from a checkpoint args.start_epoch = 1 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume, map_location='cpu') # checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] + 1 model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) contrast.load_state_dict(checkpoint['contrast']) if args.moco: model_ema.load_state_dict(checkpoint['model_ema']) if args.amp and checkpoint['opt'].amp: print('==> resuming amp state_dict') amp.load_state_dict(checkpoint['amp']) print("=> loaded successfully '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) del checkpoint torch.cuda.empty_cache() else: print("=> no checkpoint found at '{}'".format(args.resume)) # tensorboard logger = tb_logger.Logger(logdir=args.tb_folder, flush_secs=2) # routine for epoch in range(args.start_epoch, args.epochs + 1): adjust_learning_rate(epoch, args, optimizer) print("==> training...") time1 = time.time() if args.moco: loss, prob = train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, args) else: loss, prob = train_ins(epoch, train_loader, model, contrast, criterion, optimizer, args) time2 = time.time() print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1)) # tensorboard logger logger.log_value('ins_loss', loss, epoch) logger.log_value('ins_prob', prob, epoch) logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch) # save model if epoch % args.save_freq == 0: print('==> Saving...') state = { 'opt': args, 'model': model.state_dict(), 'contrast': contrast.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, } if args.moco: state['model_ema'] = model_ema.state_dict() if args.amp: state['amp'] = amp.state_dict() save_file = os.path.join(args.model_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch)) torch.save(state, save_file) # help release GPU memory del state # saving the model print('==> Saving...') state = { 'opt': args, 'model': model.state_dict(), 'contrast': contrast.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, } if args.moco: state['model_ema'] = model_ema.state_dict() if args.amp: state['amp'] = amp.state_dict() save_file = os.path.join(args.model_folder, 'current.pth') torch.save(state, save_file) if epoch % args.save_freq == 0: save_file = os.path.join(args.model_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch)) torch.save(state, save_file) # help release GPU memory del state torch.cuda.empty_cache() def train_ins(epoch, train_loader, model, contrast, criterion, optimizer, opt): """ one epoch training for instance discrimination """ model.train() batch_time = AverageMeter() data_time = AverageMeter() loss_meter = AverageMeter() prob_meter = AverageMeter() end = time.time() for idx, (inputs, _, index) in enumerate(train_loader): data_time.update(time.time() - end) bsz = inputs.size(0) inputs = inputs.float() if opt.gpu is not None: inputs = inputs.cuda(opt.gpu, non_blocking=True) else: inputs = inputs.cuda() index = index.cuda(opt.gpu, non_blocking=True) # ===================forward===================== feat = model(inputs) out = contrast(feat, index) loss = criterion(out) prob = out[:, 0].mean() # ===================backward===================== optimizer.zero_grad() if opt.amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() optimizer.step() # ===================meters===================== loss_meter.update(loss.item(), bsz) prob_meter.update(prob.item(), bsz) torch.cuda.synchronize() batch_time.update(time.time() - end) end = time.time() # print info if (idx + 1) % opt.print_freq == 0: print('Train: [{0}][{1}/{2}]\t' 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'DT {data_time.val:.3f} ({data_time.avg:.3f})\t' 'loss {loss.val:.3f} ({loss.avg:.3f})\t' 'prob {prob.val:.3f} ({prob.avg:.3f})'.format( epoch, idx + 1, len(train_loader), batch_time=batch_time, data_time=data_time, loss=loss_meter, prob=prob_meter)) print(out.shape) sys.stdout.flush() return loss_meter.avg, prob_meter.avg def train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, opt): """ one epoch training for instance discrimination """ model.train() model_ema.eval() def set_bn_train(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: m.train() model_ema.apply(set_bn_train) batch_time = AverageMeter() data_time = AverageMeter() loss_meter = AverageMeter() prob_meter = AverageMeter() end = time.time() for idx, (inputs, _, index) in enumerate(train_loader): # if idx<1865:##CHANGED # continue##CHANGED data_time.update(time.time() - end) bsz = inputs.size(0) inputs = inputs.float() if opt.gpu is not None: inputs = inputs.cuda(opt.gpu, non_blocking=True) else: inputs = inputs.cuda() index = index.cuda(opt.gpu, non_blocking=True) # ===================forward===================== x1, x2 = torch.split(inputs, [3, 3], dim=1) # ids for ShuffleBN shuffle_ids, reverse_ids = get_shuffle_ids(bsz) feat_q = model(x1) with torch.no_grad(): x2 = x2[shuffle_ids] feat_k = model_ema(x2) feat_k = feat_k[reverse_ids] # out = contrast(feat_q.transpose(1,0), feat_k.transpose(1,0)) out = contrast(feat_q, feat_k) loss = criterion(out) prob = out[:, 0].mean() # ===================backward===================== optimizer.zero_grad() if opt.amp: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() optimizer.step() # ===================meters===================== loss_meter.update(loss.item(), bsz) prob_meter.update(prob.item(), bsz) moment_update(model, model_ema, opt.alpha) torch.cuda.synchronize() batch_time.update(time.time() - end) end = time.time() # print info if (idx + 1) % opt.print_freq == 0: print('Train: [{0}][{1}/{2}]\t' 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'DT {data_time.val:.3f} ({data_time.avg:.3f})\t' 'loss {loss.val:.3f} ({loss.avg:.3f})\t' 'prob {prob.val:.3f} ({prob.avg:.3f})'.format( epoch, idx + 1,
# Microsoft Azure Linux Agent # # Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import datetime import glob import json import operator import os import random import re import shutil import signal import stat import sys import tempfile import time import traceback import zipfile import azurelinuxagent.common.conf as conf import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.fileutil as fileutil import azurelinuxagent.common.version as version from azurelinuxagent.common.cgroupconfigurator import CGroupConfigurator from azurelinuxagent.common.errorstate import ErrorState, ERROR_STATE_DELTA_INSTALL from azurelinuxagent.common.event import add_event, WALAEventOperation, elapsed_milliseconds, report_event from azurelinuxagent.common.exception import ExtensionError, ProtocolError, ProtocolNotFoundError, \ ExtensionDownloadError, ExtensionOperationError, ExtensionErrorCodes from azurelinuxagent.common.future import ustr from azurelinuxagent.common.protocol import get_protocol_util from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ ExtensionStatus, \ ExtensionSubStatus, \ VMStatus, ExtHandler, \ get_properties, \ set_properties from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.utils.processutil import read_output from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION, GOAL_STATE_AGENT_VERSION, \ DISTRO_NAME, DISTRO_VERSION, PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO # HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 EXTENSION_STATUS_ERROR = 'error' EXTENSION_STATUS_SUCCESS = 'success' VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] EXTENSION_TERMINAL_STATUSES = ['error', 'success'] VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] HANDLER_PATTERN = "^([^-]+)-(\d+(?:\.\d+)*)" HANDLER_NAME_PATTERN = re.compile(HANDLER_PATTERN + "$", re.IGNORECASE) HANDLER_PKG_EXT = ".zip" HANDLER_PKG_PATTERN = re.compile(HANDLER_PATTERN + r"\.zip$", re.IGNORECASE) DEFAULT_EXT_TIMEOUT_MINUTES = 90 AGENT_STATUS_FILE = "waagent_status.json" NUMBER_OF_DOWNLOAD_RETRIES = 5 def get_traceback(e): if sys.version_info[0] == 3: return e.__traceback__ elif sys.version_info[0] == 2: ex_type, ex, tb = sys.exc_info() return tb def validate_has_key(obj, key, fullname): if key not in obj: raise ExtensionError("Missing: {0}".format(fullname)) def validate_in_range(val, valid_range, name): if val not in valid_range: raise ExtensionError("Invalid {0}: {1}".format(name, val)) def parse_formatted_message(formatted_message): if formatted_message is None: return None validate_has_key(formatted_message, 'lang', 'formattedMessage/lang') validate_has_key(formatted_message, 'message', 'formattedMessage/message') return formatted_message.get('message') def parse_ext_substatus(substatus): # Check extension sub status format validate_has_key(substatus, 'status', 'substatus/status') validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, 'substatus/status') status = ExtensionSubStatus() status.name = substatus.get('name') status.status = substatus.get('status') status.code = substatus.get('code', 0) formatted_message = substatus.get('formattedMessage') status.message = parse_formatted_message(formatted_message) return status def parse_ext_status(ext_status, data): if data is None or len(data) is None: return # Currently, only the first status will be reported data = data[0] # Check extension status format validate_has_key(data, 'status', 'status') status_data = data['status'] validate_has_key(status_data, 'status', 'status/status') status = status_data['status'] if status not in VALID_EXTENSION_STATUS: status = EXTENSION_STATUS_ERROR applied_time = status_data.get('configurationAppliedTime') ext_status.configurationAppliedTime = applied_time ext_status.operation = status_data.get('operation') ext_status.status = status ext_status.code = status_data.get('code', 0) formatted_message = status_data.get('formattedMessage') ext_status.message = parse_formatted_message(formatted_message) substatus_list = status_data.get('substatus', []) # some extensions incorrectly report an empty substatus with a null value if substatus_list is None: substatus_list = [] for substatus in substatus_list: if substatus is not None: ext_status.substatusList.append(parse_ext_substatus(substatus)) def migrate_handler_state(): """ Migrate handler state and status (if they exist) from an agent-owned directory into the handler-owned config directory Notes: - The v2.0.x branch wrote all handler-related state into the handler-owned config directory (e.g., /var/lib/waagent/Microsoft.Azure.Extensions.LinuxAsm-2.0.1/config). - The v2.1.x branch original moved that state into an agent-owned handler state directory (e.g., /var/lib/waagent/handler_state). - This move can cause v2.1.x agents to multiply invoke a handler's install command. It also makes clean-up more difficult since the agent must remove the state as well as the handler directory. """ handler_state_path = os.path.join(conf.get_lib_dir(), "handler_state") if not os.path.isdir(handler_state_path): return for handler_path in glob.iglob(os.path.join(handler_state_path, "*")): handler = os.path.basename(handler_path) handler_config_path = os.path.join(conf.get_lib_dir(), handler, "config") if os.path.isdir(handler_config_path): for file in ("State", "Status"): from_path = os.path.join(handler_state_path, handler, file.lower()) to_path = os.path.join(handler_config_path, "Handler" + file) if os.path.isfile(from_path) and not os.path.isfile(to_path): try: shutil.move(from_path, to_path) except Exception as e: logger.warn( "Exception occurred migrating {0} {1} file: {2}", handler, file, str(e)) try: shutil.rmtree(handler_state_path) except Exception as e: logger.warn("Exception occurred removing {0}: {1}", handler_state_path, str(e)) return class ExtHandlerState(object): NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" Failed = "Failed" def get_exthandlers_handler(): return ExtHandlersHandler() class ExtHandlersHandler(object): def __init__(self): self.protocol_util = get_protocol_util() self.protocol = None self.ext_handlers = None self.last_etag = None self.log_report = False self.log_etag = True self.log_process = False self.report_status_error_state = ErrorState() self.get_artifact_error_state = ErrorState(min_timedelta=ERROR_STATE_DELTA_INSTALL) def run(self): self.ext_handlers, etag = None, None try: self.protocol = self.protocol_util.get_protocol() self.ext_handlers, etag = self.protocol.get_ext_handlers() self.get_artifact_error_state.reset() except Exception as e: msg = u"Exception retrieving extension handlers: {0}".format(ustr(e)) detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) self.get_artifact_error_state.incr() if self.get_artifact_error_state.is_triggered(): add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.GetArtifactExtended, is_success=False, message="Failed to get extension artifact for over " "{0}: {1}".format(self.get_artifact_error_state.min_timedelta, msg)) self.get_artifact_error_state.reset() else: logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=detailed_msg) return try: msg = u"Handle extensions updates for incarnation {0}".format(etag) logger.verbose(msg) # Log status report success on new config self.log_report = True self.handle_ext_handlers(etag) self.last_etag = etag self.report_ext_handlers_status() self.cleanup_outdated_handlers() except Exception as e: msg = u"Exception processing extension handlers: {0}".format(ustr(e)) detailed_msg = '{0} {1}'.format(msg, traceback.extract_tb(get_traceback(e))) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=detailed_msg) return def cleanup_outdated_handlers(self): handlers = [] pkgs = [] # Build a collection of uninstalled handlers and orphaned packages # Note: # -- An orphaned package is one without a corresponding handler # directory for item in os.listdir(conf.get_lib_dir()): path = os.path.join(conf.get_lib_dir(), item) if version.is_agent_package(path) or version.is_agent_path(path): continue if os.path.isdir(path): if re.match(HANDLER_NAME_PATTERN, item) is None: continue try: eh = ExtHandler() separator = item.rfind('-') eh.name = item[0:separator] eh.properties.version = str(FlexibleVersion(item[separator + 1:])) handler = ExtHandlerInstance(eh, self.protocol) except Exception: continue if handler.get_handler_state() != ExtHandlerState.NotInstalled: continue handlers.append(handler) elif os.path.isfile(path) and \ not os.path.isdir(path[0:-len(HANDLER_PKG_EXT)]): if not re.match(HANDLER_PKG_PATTERN, item): continue pkgs.append(path) # Then, remove the orphaned packages for pkg in pkgs: try: os.remove(pkg) logger.verbose("Removed orphaned extension package {0}".format(pkg)) except OSError as e: logger.warn("Failed to remove orphaned package {0}: {1}".format(pkg, e.strerror)) # Finally, remove the directories and packages of the # uninstalled handlers for handler in handlers: handler.remove_ext_handler() pkg = os.path.join(conf.get_lib_dir(), handler.get_full_name() + HANDLER_PKG_EXT) if os.path.isfile(pkg): try: os.remove(pkg) logger.verbose("Removed extension package {0}".format(pkg)) except OSError as e: logger.warn("Failed to remove extension package {0}: {1}".format(pkg, e.strerror)) def handle_ext_handlers(self, etag=None): if not conf.get_extensions_enabled(): logger.verbose("Extension handling is disabled") return if self.ext_handlers.extHandlers is None or \ len(self.ext_handlers.extHandlers) == 0: logger.verbose("No extension handler config found") return if conf.get_enable_overprovisioning(): if not self.protocol.supports_overprovisioning(): logger.verbose("Overprovisioning is enabled but protocol does not support it.") else: artifacts_profile = self.protocol.get_artifacts_profile() if artifacts_profile and artifacts_profile.is_on_hold(): logger.info("Extension handling is on hold") return wait_until = datetime.datetime.utcnow() + datetime.timedelta(minutes=DEFAULT_EXT_TIMEOUT_MINUTES) max_dep_level = max([handler.sort_key() for handler in self.ext_handlers.extHandlers]) self.ext_handlers.extHandlers.sort(key=operator.methodcaller('sort_key')) for ext_handler in self.ext_handlers.extHandlers: self.handle_ext_handler(ext_handler, etag) # Wait for the extension installation until it is handled. # This is done for the install and enable. Not for the uninstallation. # If handled successfully, proceed with the current handler. # Otherwise, skip the rest of the extension installation. dep_level = ext_handler.sort_key() if dep_level >= 0 and dep_level < max_dep_level: if not self.wait_for_handler_successful_completion(ext_handler, wait_until): logger.warn("An extension failed or timed out, will skip processing the rest of the extensions") break def wait_for_handler_successful_completion(self, ext_handler, wait_until): ''' Check the status of the extension being handled. Wait until it has a terminal state or times out. Return True if it is handled successfully. False if not. ''' handler_i = ExtHandlerInstance(ext_handler, self.protocol) for ext in ext_handler.properties.extensions: ext_completed, status = handler_i.is_ext_handling_complete(ext) # Keep polling for the extension status until it becomes success or times out while not ext_completed and datetime.datetime.utcnow() <= wait_until: time.sleep(5) ext_completed, status = handler_i.is_ext_handling_complete(ext) # In case of timeout or terminal error state, we log it and return false # so that the extensions waiting on this one can be skipped processing if datetime.datetime.utcnow() > wait_until: msg = "Extension {0} did not reach a terminal state within the allowed timeout. Last status was {1}".format( ext.name, status) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return False if status != EXTENSION_STATUS_SUCCESS: msg = "Extension {0} did not succeed. Status was {1}".format(ext.name, status) logger.warn(msg) add_event(AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.ExtensionProcessing, is_success=False, message=msg) return False return True def handle_ext_handler(self, ext_handler, etag): ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) try: state = ext_handler.properties.state if ext_handler_i.decide_version(target_state=state) is None: version = ext_handler_i.ext_handler.properties.version name = ext_handler_i.ext_handler.name err_msg = "Unable to find version {0} in manifest for extension {1}".format(version, name) ext_handler_i.set_operation(WALAEventOperation.Download) ext_handler_i.set_handler_status(message=ustr(err_msg), code=-1) ext_handler_i.report_event(message=ustr(err_msg), is_success=False) return self.get_artifact_error_state.reset() if not ext_handler_i.is_upgrade and self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Version {0} is current for etag {1}", ext_handler_i.pkg.version, etag) self.log_etag = False return self.log_etag = True ext_handler_i.logger.info("Target handler state: {0}", state) if state == u"enabled": self.handle_enable(ext_handler_i) elif state == u"disabled": self.handle_disable(ext_handler_i) elif
None: raise ValueError( 'host/port and sock can not be specified at the same time') f1 = self.getaddrinfo( host, port, family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags) fs = [f1] if local_addr is not None: f2 = self.getaddrinfo( *local_addr, family=family, type=socket.SOCK_STREAM, proto=proto, flags=flags) fs.append(f2) else: f2 = None yield from tasks.wait(fs, loop=self) infos = f1.result() if not infos: raise OSError('getaddrinfo() returned empty list') if f2 is not None: laddr_infos = f2.result() if not laddr_infos: raise OSError('getaddrinfo() returned empty list') exceptions = [] for family, type, proto, cname, address in infos: try: sock = socket.socket(family=family, type=type, proto=proto) sock.setblocking(False) if f2 is not None: for _, _, _, _, laddr in laddr_infos: try: sock.bind(laddr) break except OSError as exc: exc = OSError( exc.errno, 'error while ' 'attempting to bind on address ' '{!r}: {}'.format( laddr, exc.strerror.lower())) exceptions.append(exc) else: sock.close() sock = None continue yield from self.sock_connect(sock, address) except OSError as exc: if sock is not None: sock.close() exceptions.append(exc) except: if sock is not None: sock.close() raise else: break else: if len(exceptions) == 1: raise exceptions[0] else: # If they all have the same str(), raise one. model = str(exceptions[0]) if all(str(exc) == model for exc in exceptions): raise exceptions[0] # Raise a combined exception so the user can see all # the various error messages. raise OSError('Multiple exceptions: {}'.format( ', '.join(str(exc) for exc in exceptions))) elif sock is None: raise ValueError( 'host and port was not specified and no sock specified') sock.setblocking(False) transport, protocol = yield from self._create_connection_transport( sock, protocol_factory, ssl, server_hostname) return transport, protocol @tasks.coroutine def _create_connection_transport(self, sock, protocol_factory, ssl, server_hostname): protocol = protocol_factory() waiter = futures.Future(loop=self) if ssl: sslcontext = None if isinstance(ssl, bool) else ssl transport = self._make_ssl_transport( sock, protocol, sslcontext, waiter, server_side=False, server_hostname=server_hostname) else: transport = self._make_socket_transport(sock, protocol, waiter) yield from waiter return transport, protocol @tasks.coroutine def create_datagram_endpoint(self, protocol_factory, local_addr=None, remote_addr=None, *, family=0, proto=0, flags=0): """Create datagram connection.""" if not (local_addr or remote_addr): if family == 0: raise ValueError('unexpected address family') addr_pairs_info = (((family, proto), (None, None)),) else: # join addresss by (family, protocol) addr_infos = collections.OrderedDict() for idx, addr in ((0, local_addr), (1, remote_addr)): if addr is not None: assert isinstance(addr, tuple) and len(addr) == 2, ( '2-tuple is expected') infos = yield from self.getaddrinfo( *addr, family=family, type=socket.SOCK_DGRAM, proto=proto, flags=flags) if not infos: raise OSError('getaddrinfo() returned empty list') for fam, _, pro, _, address in infos: key = (fam, pro) if key not in addr_infos: addr_infos[key] = [None, None] addr_infos[key][idx] = address # each addr has to have info for each (family, proto) pair addr_pairs_info = [ (key, addr_pair) for key, addr_pair in addr_infos.items() if not ((local_addr and addr_pair[0] is None) or (remote_addr and addr_pair[1] is None))] if not addr_pairs_info: raise ValueError('can not get address information') exceptions = [] for ((family, proto), (local_address, remote_address)) in addr_pairs_info: sock = None r_addr = None try: sock = socket.socket( family=family, type=socket.SOCK_DGRAM, proto=proto) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(False) if local_addr: sock.bind(local_address) if remote_addr: yield from self.sock_connect(sock, remote_address) r_addr = remote_address except OSError as exc: if sock is not None: sock.close() exceptions.append(exc) except: if sock is not None: sock.close() raise else: break else: raise exceptions[0] protocol = protocol_factory() transport = self._make_datagram_transport(sock, protocol, r_addr) return transport, protocol @tasks.coroutine def create_server(self, protocol_factory, host=None, port=None, *, family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE, sock=None, backlog=100, ssl=None, reuse_address=None): """XXX""" if isinstance(ssl, bool): raise TypeError('ssl argument must be an SSLContext or None') if host is not None or port is not None: if sock is not None: raise ValueError( 'host/port and sock can not be specified at the same time') AF_INET6 = getattr(socket, 'AF_INET6', 0) if reuse_address is None: reuse_address = os.name == 'posix' and sys.platform != 'cygwin' sockets = [] if host == '': host = None infos = yield from self.getaddrinfo( host, port, family=family, type=socket.SOCK_STREAM, proto=0, flags=flags) if not infos: raise OSError('getaddrinfo() returned empty list') completed = False try: for res in infos: af, socktype, proto, canonname, sa = res try: sock = socket.socket(af, socktype, proto) except socket.error: # Assume it's a bad family/type/protocol combination. continue sockets.append(sock) if reuse_address: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) # Disable IPv4/IPv6 dual stack support (enabled by # default on Linux) which makes a single socket # listen on both address families. if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, True) try: sock.bind(sa) except OSError as err: raise OSError(err.errno, 'error while attempting ' 'to bind on address %r: %s' % (sa, err.strerror.lower())) completed = True finally: if not completed: for sock in sockets: sock.close() else: if sock is None: raise ValueError( 'host and port was not specified and no sock specified') sockets = [sock] server = Server(self, sockets) for sock in sockets: sock.listen(backlog) sock.setblocking(False) self._start_serving(protocol_factory, sock, ssl, server) return server @tasks.coroutine def connect_read_pipe(self, protocol_factory, pipe): protocol = protocol_factory() waiter = futures.Future(loop=self) transport = self._make_read_pipe_transport(pipe, protocol, waiter) yield from waiter return transport, protocol @tasks.coroutine def connect_write_pipe(self, protocol_factory, pipe): protocol = protocol_factory() waiter = futures.Future(loop=self) transport = self._make_write_pipe_transport(pipe, protocol, waiter) yield from waiter return transport, protocol @tasks.coroutine def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=False, shell=True, bufsize=0, **kwargs): if not isinstance(cmd, (bytes, str)): raise ValueError("cmd must be a string") if universal_newlines: raise ValueError("universal_newlines must be False") if not shell: raise ValueError("shell must be True") if bufsize != 0: raise ValueError("bufsize must be 0") protocol = protocol_factory() transport = yield from self._make_subprocess_transport( protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) return transport, protocol @tasks.coroutine def subprocess_exec(self, protocol_factory, program, *args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=False, shell=False, bufsize=0, **kwargs): if universal_newlines: raise ValueError("universal_newlines must be False") if shell: raise ValueError("shell must be False") if bufsize != 0: raise ValueError("bufsize must be 0") popen_args = (program,) + args for arg in popen_args: if not isinstance(arg, (str, bytes)): raise TypeError("program arguments must be " "a bytes or text string, not %s" % type(arg).__name__) protocol = protocol_factory() transport = yield from self._make_subprocess_transport( protocol, popen_args, False, stdin, stdout, stderr, bufsize, **kwargs) return transport, protocol def set_exception_handler(self, handler): """Set handler as the new event loop exception handler. If handler is None, the default exception handler will be set. If handler is a callable object, it should have a matching signature to '(loop, context)', where 'loop' will be a reference to the active event loop, 'context' will be a dict object (see `call_exception_handler()` documentation for details about context). """ if handler is not None and not callable(handler): raise TypeError('A callable object or None is expected, ' 'got {!r}'.format(handler)) self._exception_handler = handler def default_exception_handler(self, context): """Default exception handler. This is called when an exception occurs and no exception handler is set, and can be called by a custom exception handler that wants to defer to the default behavior. context parameter has the same meaning as in `call_exception_handler()`. """ message = context.get('message') if not message: message = 'Unhandled exception in event loop' exception = context.get('exception') if exception is not None: exc_info = (type(exception), exception, exception.__traceback__) else: exc_info = False log_lines = [message] for key in sorted(context): if key in {'message', 'exception'}: continue log_lines.append('{}: {!r}'.format(key, context[key])) logger.error('\n'.join(log_lines), exc_info=exc_info) def call_exception_handler(self, context): """Call the current event loop exception handler. context is a dict object containing the following keys (new keys maybe introduced later): - 'message': Error message; - 'exception' (optional): Exception object; - 'future' (optional): Future instance; - 'handle' (optional): Handle instance; - 'protocol' (optional): Protocol instance; - 'transport' (optional): Transport instance; - 'socket' (optional): Socket instance. Note: this method should not be overloaded in subclassed event loops. For any custom exception handling, use `set_exception_handler()` method. """ if self._exception_handler is None: try: self.default_exception_handler(context) except Exception: # Second protection layer for unexpected errors # in the default implementation, as well as for subclassed # event loops with overloaded "default_exception_handler". logger.error('Exception in default exception handler', exc_info=True) else: try: self._exception_handler(self, context) except Exception as exc: # Exception in the user set custom exception handler. try: # Let's try default handler. self.default_exception_handler({ 'message': 'Unhandled error in exception handler', 'exception': exc,
them as a list""" if not l: outer = True else: outer = False l.append(gg) if gg.subgraph_list: for g in gg.subgraph_list: getGraphList(g,l) if outer: return l class EndOfGraphElement: def __init__(self): pass def getAllGraphElements(graph, l=[]): """Return all nodes and edges, including elements in subgraphs""" if not l: outer = True l.append(graph) else: outer = False for element in graph.sorted_graph_elements: if isinstance(element, pydot.Node): l.append(element) elif isinstance(element,pydot.Edge): l.append(element) elif isinstance(element, pydot.Graph): l.append(element) getAllGraphElements(element,l) else: log.warning('Unknown graph element') if outer: return l else: l.append(EndOfGraphElement()) class DotConvBase: """Dot2TeX converter base""" def __init__(self, options = {}): self.color = "" self.template = options.get('template','') self.textencoding = options.get('encoding',DEFAULT_TEXTENCODING) self.templatevars = {} self.body = "" if options.get('templatefile',''): self.loadTemplate(options['templatefile']) self.options = options if options.get('texpreproc',False) or options.get('autosize',False): self.dopreproc = True else: self.dopreproc = False def loadTemplate(self, templatefile): try: self.template = open(templatefile).read() except: pass def convertFile(self, filename): """Load dot file and convert""" pass def startFig(self): return "" def endFig(self): return "" def drawEllipse(self, drawop, style = None): return "" def drawBezier(self, drawop, style = None): return "" def drawPolygon(self, drawop, style = None): return "" def drawPolyLine(self, drawop, style = None): return "" def drawText(self, drawop, style = None): return "" def outputNodeComment(self, node): return " %% Node: %s\n" % node.name def outputEdgeComment(self, edge): src = edge.get_source() dst = edge.get_destination() if self.directedgraph: edge = '->' else: edge = '--' return " %% Edge: %s %s %s\n" % (src, edge, dst) def setColor(self, node): return "" def setStyle(self, node): return "" def drawEdge(self, edge): return "" def startNode(self, node): return "" def endNode(self,node): return "" def startGraph(self, graph): return "" def endGraph(self,graph): return "" def startEdge(self): return "" def endEdge(self): return "" def filterStyles(self, style): return style def convertColor(self, drawopcolor,pgf=False): """Convert color to a format usable by LaTeX and XColor""" # Graphviz uses the following color formats: # "#%2x%2x%2x" Red-Green-Blue (RGB) # "#%2x%2x%2x%2x" Red-Green-Blue-Alpha (RGBA) # H[, ]+S[, ]+V Hue-Saturation-Value (HSV) 0.0 <= H,S,V <= 1.0 # string color name # Is the format RBG(A)? if drawopcolor.startswith('#'): t = list(chunks(drawopcolor[1:],2)) # parallell lines not yet supported if len(t) > 6: t = t[0:3] rgb = [(round((int(n,16)/255.0),2)) for n in t] if pgf: colstr = "{rgb}{%s,%s,%s}" % tuple(rgb[0:3]) opacity = "1" if len(rgb)==4: opacity = rgb[3] return (colstr, opacity) else: return "[rgb]{%s,%s,%s}" % tuple(rgb[0:3]) elif (len(drawopcolor.split(' '))==3) or (len(drawopcolor.split(','))==3): # are the values space or comma separated? hsb = drawopcolor.split(',') if not len(hsb) == 3: hsb = drawopcolor.split(' ') if pgf: return "{hsb}{%s,%s,%s}" % tuple(hsb) else: return "[hsb]{%s,%s,%s}" % tuple(hsb) else: drawopcolor = drawopcolor.replace('grey','gray') drawopcolor = drawopcolor.replace('_','') drawopcolor = drawopcolor.replace(' ','') return drawopcolor def doDrawString(self, drawstring, drawobj): """Parse and draw drawsting Just a wrapper around doDrawOp. """ drawoperations,stat = parseDrawString(drawstring) return self.doDrawOp(drawoperations, drawobj,stat) def doDrawOp(self, drawoperations, drawobj,stat): """Excecute the operations in drawoperations""" s = "" for drawop in drawoperations: op = drawop[0] style = getattr(drawobj, 'style',None) # styles are not passed to the draw operations in the # duplicate mode if style and not self.options.get('duplicate', False): # map Graphviz styles to backend styles style = self.filterStyles(style) styles = [self.styles.get(key.strip(),key.strip()) \ for key in style.split(',') if key] style = ','.join(styles) else: style = None if op in ['e','E']: s += self.drawEllipse(drawop, style) elif op in ['p','P']: s += self.drawPolygon(drawop, style) elif op == 'L': s += self.drawPolyLine(drawop, style) elif op in ['C','c']: s += self.setColor(drawop) elif op == 'S': s += self.setStyle(drawop) elif op in ['B']: s += self.drawBezier(drawop, style) elif op in ['T']: # Need to decide what to do with the text # Note that graphviz removes the \ character from the draw # string. Use \\ instead # Todo: Use text from node|edge.label or name # Todo: What about multiline labels? text = drawop[5] ## label = getattr(drawobj,'label','\N') ## multiline = False ## if label: ## if label.find(r'\n') >= 0: ## multiline = True ## #print label ## else: ## label = "\N" ## if not multiline and label <> '\N': ## text = drawobj.label texmode = self.options.get('texmode','verbatim') if getattr(drawobj,'texmode', ''): texmode = drawobj.texmode if getattr(drawobj,'texlbl', ''): # the texlbl overrides everything text = drawobj.texlbl elif texmode == 'verbatim': # verbatim mode text = escapeTeXChars(text) pass elif texmode == 'math': # math mode text = "$%s$" % text drawop[5] = text if self.options.get('alignstr',''): drawop.append(self.options.get('alignstr')) if stat['T'] == 1 and \ self.options.get('valignmode','center')=='center': # do this for single line only # Todo: Make this optional pos = getattr(drawobj,'lp',None) or \ getattr(drawobj,'pos',None) if pos: coord = pos.split(',') if len(coord)==2: drawop[1] = coord[0] drawop[2] = coord[1] pass lblstyle = getattr(drawobj,'lblstyle',None) exstyle = getattr(drawobj,'exstyle','') if exstyle: if lblstyle: lblstyle += ',' +exstyle else: lblstyle = exstyle s += self.drawText(drawop,lblstyle) return s def doNodes(self): s = "" for node in self.nodes: self.currentnode = node dstring = getattr(node,'_draw_',"") lstring = getattr(node,'_ldraw_',"") drawstring = dstring+" "+lstring if not drawstring.strip(): continue # detect node type shape = node.shape if not shape: shape = 'ellipse' # default # extract size information x,y = node.pos.split(',') # width and height are in inches. Convert to bp units w = float(node.width)*in2bp h = float(node.height)*in2bp s += self.outputNodeComment(node) s += self.startNode(node) #drawoperations = parseDrawString(drawstring) s += self.doDrawString(drawstring, node) s += self.endNode(node) self.body += s def getEdgePoints(self, edge): points = edge.pos.split(' ') # check direction arrowstyle = '--' i = 0 if points[i].startswith('s'): p = points[0].split(',') tmp = "%s,%s" % (p[1],p[2]) if points[1].startswith('e'): points[2] =tmp else: points[1] = tmp del points[0] arrowstyle = '<-' i += 1 if points[0].startswith('e'): p = points[0].split(',') points.pop() points.append("%s,%s" % (p[1],p[2])) del points[0] arrowstyle = '->' i += 1 if i>1: arrowstyle = '<->' return arrowstyle, points def doEdges(self): s = "" s += self.setColor(('cC',"black")) for edge in self.edges: dstring = getattr(edge,'_draw_',"") lstring = getattr(edge,'_ldraw_',"") hstring = getattr(edge,'_hdraw_',"") tstring = getattr(edge,'_tdraw_',"") tlstring = getattr(edge,'_tldraw_',"") hlstring = getattr(edge,'_hldraw_',"") # Note that the order of the draw strings should be the same # as in the xdot output. drawstring = dstring + " " + hstring + " " + tstring \ + " " + lstring + " " + tlstring + " " + hlstring drawop,stat = parseDrawString(drawstring); if not drawstring.strip(): continue s += self.outputEdgeComment(edge) if self.options.get('duplicate', False): s += self.startEdge() s += self.doDrawOp(drawop, edge,stat) s += self.endEdge() else: s += self.drawEdge(edge) s += self.doDrawString(lstring+" "+tlstring+" "+hlstring, edge) self.body += s def doGraph(self): dstring = getattr(self.graph,'_draw_',"") lstring = getattr(self.graph,'_ldraw_',"") # print lstring # Avoid filling background of graphs with white if dstring.startswith('c 5 -white C 5 -white') \ and not getattr(self.graph,'style'): dstring = '' if getattr(self.graph,'_draw_',None): # bug dstring = "c 5 -black " + dstring #self.graph._draw_ pass drawstring = dstring+" "+lstring if drawstring.strip(): s = self.startGraph(self.graph) g = self.doDrawString(drawstring, self.graph) e = self.endGraph(self.graph) if g.strip(): self.body += s +g + e def setOptions(self): # process options # Warning! If graph attribute is true and command line option is false, # the graph attribute will be used. Command line option should have # precedence. self.options['alignstr'] = self.options.get('alignstr','') \ or getattr(self.maingraph,'d2talignstr','') # Todo: bad! self.options['valignmode'] = getattr(self.maingraph,'d2tvalignmode','')\ or self.options.get('valignmode','center') def convert(self, dotdata): # parse data processed by dot. log.debug('Start conversion') try: try: maingraph = parseDotData(dotdata) except: log.info('Failed first attempt to parse graph') if not self.dopreproc: log.info('Could not parse input dotdata directly. ' 'Trying to create xdot data.') try: tmpdata = createXdot(dotdata,self.options.get('prog','dot')) log.debug('xdotdata:\n'+tmpdata) maingraph = parseDotData(tmpdata) except: raise if not self.dopreproc and not hasattr(maingraph,'xdotversion'): # Older versions of Graphviz does not include the xdotversion # attribute if not (dotdata.find('_draw_') > 0 or dotdata.find('_ldraw_') > 0): # need to convert to xdot format # Warning. Pydot will not include custom attributes log.debug('Trying to create xdotdata') tmpdata = createXdot(dotdata,self.options.get('prog','dot')) log.debug('xdotdata:\n'+tmpdata) if tmpdata == None or not tmpdata.strip(): log.error('Failed to create xdotdata. Is Graphviz installed?') sys.exit(1) maingraph = parseDotData(tmpdata) else: # old version pass self.maingraph = maingraph self.pencolor
Brain instance containing the plot. """ from .._wxgui import get_app get_app(jumpstart=True) from ._brain_object import Brain, get_source_dim if isinstance(src, SourceSpace): if cmap is not None or vmin is not None or vmax is not None: raise TypeError("When plotting SourceSpace, cmap, vmin and vmax " "can not be specified (got %s)" % ', '.join((cmap, vmin, vmax))) ndvar = None source = src subject = source.subject elif isinstance(src, str): subject = src subjects_dir = mne.utils.get_subjects_dir(subjects_dir, True) ndvar = None source = None mask = False if hemi is None: hemi = 'split' else: ndvar = brain_data(src) if ndvar.has_case: ndvar = ndvar.summary() source = get_source_dim(ndvar) subject = source.subject # check that ndvar has the right dimensions if ndvar.ndim == 2 and not ndvar.has_dim('time') or ndvar.ndim > 2: raise ValueError("NDVar should have dimesions source and " "optionally time, got %r" % (ndvar,)) if title is None and name is None and ndvar.name is not None: title = "%s - %s" % (source.subject, ndvar.name) if hemi is None: if source.lh_n and source.rh_n: hemi = 'split' elif source.lh_n: hemi = 'lh' elif not source.rh_n: raise ValueError('No data') else: hemi = 'rh' elif source is None: pass elif (hemi == 'lh' and source.rh_n) or (hemi == 'rh' and source.lh_n): if ndvar is None: source = source[source._array_index(hemi)] else: ndvar = ndvar.sub(**{source.name: hemi}) source = ndvar.get_dim(source.name) if subjects_dir is None: subjects_dir = source.subjects_dir brain = Brain(subject, hemi, surf, title, cortex, views=views, w=w, h=h, axw=axw, axh=axh, foreground=foreground, background=background, subjects_dir=subjects_dir, name=name, pos=pos, source_space=source) if ndvar is not None: if ndvar.x.dtype.kind in 'ui': brain.add_ndvar_annotation(ndvar, cmap, False) else: brain.add_ndvar(ndvar, cmap, vmin, vmax, smoothing_steps, colorbar, time_label) if mask is not False: color = (0, 0, 0, 0.5) if mask is True else mask brain.add_mask(source, color, smoothing_steps, None, subjects_dir) if parallel: brain.set_parallel_view(scale=True) return brain @deprecated("0.25", brain) def surfer_brain(*args, **kwargs): pass def _voxel_brain(data, lut, vmin, vmax): """Plot spheres for volume source space Parameters ---------- data : NDVar Data to plot. lut : array Color LUT. vmin, vmax : scalar Data range. """ if data.dimnames != ('source',): raise ValueError("Can only plot 1 dimensional source space NDVars") from mayavi import mlab x, y, z = data.source.coordinates.T figure = mlab.figure() mlab.points3d(x, y, z, scale_factor=0.002, opacity=0.5) pts = mlab.points3d(x, y, z, data.x, vmin=vmin, vmax=vmax) pts.module_manager.scalar_lut_manager.lut.table = lut return figure ################################################################################ # Bin-Tables ############ # Top-level functions for fmtxt image tables and classes for Eelfigures. # - _x_bin_table_ims() wrap 'x' brain plot function # - _bin_table_ims() creates ims given a brain plot function class ImageTable(ColorBarMixin, EelFigure): # Initialize in two steps # # 1) Initialize class to generate layout # 2) Use ._res_h and ._res_w to generate images # 3) Finalize by calling ._add_ims() # def __init__(self, n_rows, n_columns, title=None, margins=None, *args, **kwargs): layout = ImLayout(n_rows * n_columns, 4/3, 2, margins, {'bottom': 0.5}, title, *args, nrow=n_rows, ncol=n_columns, autoscale=True, **kwargs) EelFigure.__init__(self, None, layout) self._n_rows = n_rows self._n_columns = n_columns self._res_w = int(round(layout.axw * layout.dpi)) self._res_h = int(round(layout.axh * layout.dpi)) def _add_ims(self, ims, column_header, cmap_params, cmap_data): for row, column in product(range(self._n_rows), range(self._n_columns)): ax = self._axes[row * self._n_columns + column] ax.imshow(ims[row][column]) # column header (time labels) if column_header: y = 0.25 / self._layout.h for i, label in enumerate(column_header): x = (0.5 + i) / self._layout.ncol self.figure.text(x, y, label, va='center', ha='center') ColorBarMixin.__init__(self, lambda: cmap_params, cmap_data) self._show() def _fill_toolbar(self, tb): ColorBarMixin._fill_toolbar(self, tb) def add_row_titles(self, titles, x=0.1, y=0, **kwargs): """Add a title for each row of images Parameters ---------- titles : sequence of str Titles, from top to bottom. x : scalar Horizontal distance from left of the figure. y : scalar Vertical distance from the top of the axes. ... Matplotlib text parameters. """ if len(titles) > self._n_rows: raise ValueError(f"titles={titles}: {len(titles)} titles for {self._n_rows} rows") y_top = self._layout.margins['top'] - y y_offset = self._layout.margins['hspace'] + self._layout.axh x_ = x / self._layout.w for i, label in enumerate(titles): y_ = 1 - (y_top + i * y_offset) / self._layout.h self.figure.text(x_, y_, label, **kwargs) self.draw() class _BinTable(EelFigure, ColorBarMixin): """Super-class""" def __init__(self, ndvar, tstart, tstop, tstep, im_func, surf, views, hemi, summary, title, foreground=None, background=None, parallel=True, smoothing_steps=None, mask=True, margins=None, *args, **kwargs): if isinstance(views, str): views = (views,) data = ndvar.bin(tstep, tstart, tstop, summary) n_columns = len(data.time) n_hemis = (data.source.lh_n > 0) + (data.source.rh_n > 0) n_rows = len(views) * n_hemis layout = ImLayout(n_rows * n_columns, 4/3, 2, margins, {'bottom': 0.5}, title, *args, nrow=n_rows, ncol=n_columns, **kwargs) EelFigure.__init__(self, None, layout) res_w = int(layout.axw * layout.dpi) res_h = int(layout.axh * layout.dpi) ims, header, cmap_params = im_func(data, surf, views, hemi, axw=res_w, axh=res_h, foreground=foreground, background=background, parallel=parallel, smoothing_steps=smoothing_steps, mask=mask) for row in range(n_rows): for column in range(n_columns): ax = self._axes[row * n_columns + column] ax.imshow(ims[row][column]) # time labels y = 0.25 / layout.h for i, label in enumerate(header): x = (0.5 + i) / layout.ncol self.figure.text(x, y, label, va='center', ha='center') ColorBarMixin.__init__(self, lambda: cmap_params, data) self._show() def _fill_toolbar(self, tb): ColorBarMixin._fill_toolbar(self, tb) class BinTable(_BinTable): """DSPM plot bin-table""" def __init__(self, ndvar, tstart=None, tstop=None, tstep=0.1, fmin=13, fmax=22, fmid=None, surf='smoothwm', views=('lat', 'med'), hemi=None, summary='sum', title=None, *args, **kwargs): im_func = partial(_dspm_bin_table_ims, fmin, fmax, fmid) _BinTable.__init__(self, ndvar, tstart, tstop, tstep, im_func, surf, views, hemi, summary, title, *args, **kwargs) class ClusterBinTable(_BinTable): """Data plotted on brain for different time bins and views Parameters ---------- ndvar : NDVar (time x source) Data to be plotted. tstart : None | scalar Time point of the start of the first bin (inclusive; None to use the first time point in ndvar). tstop : None | scalar End of the last bin (exclusive; None to end with the last time point in ndvar). tstep : scalar Size of each bin (in seconds). surf : 'inflated' | 'pial' | 'smoothwm' | 'sphere' | 'white' Freesurfer surface to use as brain geometry. views : list of str Views to display (for each hemisphere, lh first). Options are: 'rostral', 'parietal', 'frontal', 'ventral', 'lateral', 'caudal', 'medial', 'dorsal'. hemi : 'lh' | 'rh' | 'both' Which hemispheres to plot (default based on data). summary : str How to summarize data in each time bin. Can be the name of a numpy function that takes an axis parameter (e.g., 'sum', 'mean', 'max') or 'extrema' which selects the value with the maximum absolute value. Default is sum. vmax : scalar != 0 Maximum value in the colormap. Default is the maximum value in the cluster. title : str Figure title. """ def __init__(self, ndvar, tstart=None, tstop=None, tstep=0.1, surf='smoothwm', views=('lat', 'med'), hemi=None, summary='sum', vmax=None, title=None, *args, **kwargs): im_func = partial(_cluster_bin_table_ims, vmax) _BinTable.__init__(self, ndvar, tstart, tstop, tstep, im_func, surf, views, hemi, summary, title, *args, **kwargs) def dspm_bin_table(ndvar, fmin=2, fmax=8, fmid=None, tstart=None, tstop=None, tstep=0.1, surf='smoothwm', views=('lat', 'med'), hemi=None, summary='extrema', axw=300, axh=250, *args, **kwargs): """Create a table with images for time bins Parameters ---------- ndvar : NDVar (time x source) Data to be plotted. fmin, fmax : scalar >= 0 Start- and end-point for the color gradient for positive values. The gradient for negative values goes from -fmin to -fmax. Values between -fmin and fmin are transparent. fmid : None | scalar Midpoint for the color gradient. If fmid is None (default) it is set half way between fmin and fmax. tstart : None | scalar Time point of the start of the first bin (inclusive; None to use the first time point in ndvar). tstop : None | scalar End of the last bin (exclusive; None to end with the last time point in ndvar). tstep : scalar Size of each bin (in seconds). surf : 'inflated' | 'pial' | 'smoothwm' | 'sphere' | 'white' Freesurfer surface to use as brain geometry. views : list of str Views to display (for each hemisphere, lh first). Options are: 'rostral', 'parietal', 'frontal', 'ventral', 'lateral', 'caudal', 'medial', 'dorsal'. hemi : 'lh' | 'rh' | 'both' Which hemispheres to plot (default based on data). summary : str How to summarize data in each time bin. Can be the name of a numpy function that takes an axis parameter (e.g., 'sum', 'mean', 'max') or 'extrema' which selects the value with the
<filename>cloudsearch2/layer1.py # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # import boto from boto.compat import json from boto.connection import AWSQueryConnection from boto.regioninfo import RegionInfo from boto.exception import JSONResponseError from boto.cloudsearch2 import exceptions class CloudSearchConnection(AWSQueryConnection): """ Amazon CloudSearch Configuration Service You use the Amazon CloudSearch configuration service to create, configure, and manage search domains. Configuration service requests are submitted using the AWS Query protocol. AWS Query requests are HTTP or HTTPS requests submitted via HTTP GET or POST with a query parameter named Action. The endpoint for configuration service requests is region- specific: cloudsearch. region .amazonaws.com. For example, cloudsearch.us-east-1.amazonaws.com. For a current list of supported regions and endpoints, see `Regions and Endpoints`_. """ APIVersion = "2013-01-01" DefaultRegionName = "us-east-1" DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com" ResponseError = JSONResponseError _faults = { "InvalidTypeException": exceptions.InvalidTypeException, "LimitExceededException": exceptions.LimitExceededException, "InternalException": exceptions.InternalException, "DisabledOperationException": exceptions.DisabledOperationException, "ResourceNotFoundException": exceptions.ResourceNotFoundException, "BaseException": exceptions.BaseException, } def __init__(self, **kwargs): region = kwargs.pop('region', None) if not region: region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) if 'host' not in kwargs or kwargs['host'] is None: kwargs['host'] = region.endpoint sign_request = kwargs.pop('sign_request', False) self.sign_request = sign_request super(CloudSearchConnection, self).__init__(**kwargs) self.region = region def _required_auth_capability(self): return ['hmac-v4'] def build_suggesters(self, domain_name): """ Indexes the search suggestions. :type domain_name: string :param domain_name: A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). """ params = {'DomainName': domain_name, } return self._make_request( action='BuildSuggesters', verb='POST', path='/', params=params) def create_domain(self, domain_name): """ Creates a new search domain. For more information, see `Creating a Search Domain`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: A name for the domain you are creating. Allowed characters are a-z (lower-case letters), 0-9, and hyphen (-). Domain names must start with a letter or number and be at least 3 and no more than 28 characters long. """ params = {'DomainName': domain_name, } return self._make_request( action='CreateDomain', verb='POST', path='/', params=params) def define_analysis_scheme(self, domain_name, analysis_scheme): """ Configures an analysis scheme that can be applied to a `text` or `text-array` field to define language-specific text processing options. For more information, see `Configuring Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :type analysis_scheme: dict :param analysis_scheme: Configuration information for an analysis scheme. Each analysis scheme has a unique name and specifies the language of the text to be processed. The following options can be configured for an analysis scheme: `Synonyms`, `Stopwords`, `StemmingDictionary`, and `AlgorithmicStemming`. """ params = {'DomainName': domain_name, } self.build_complex_param(params, 'AnalysisScheme', analysis_scheme) return self._make_request( action='DefineAnalysisScheme', verb='POST', path='/', params=params) def define_expression(self, domain_name, expression): """ Configures an `Expression` for the search domain. Used to create new expressions and modify existing ones. If the expression exists, the new configuration replaces the old one. For more information, see `Configuring Expressions`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :type expression: dict :param expression: A named expression that can be evaluated at search time. Can be used to sort the search results, define other expressions, or return computed information in the search results. """ params = {'DomainName': domain_name, } self.build_complex_param(params, 'Expression', expression) return self._make_request( action='DefineExpression', verb='POST', path='/', params=params) def define_index_field(self, domain_name, index_field): """ Configures an `IndexField` for the search domain. Used to create new fields and modify existing ones. You must specify the name of the domain you are configuring and an index field configuration. The index field configuration specifies a unique name, the index field type, and the options you want to configure for the field. The options you can specify depend on the `IndexFieldType`. If the field exists, the new configuration replaces the old one. For more information, see `Configuring Index Fields`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :type index_field: dict :param index_field: The index field and field options you want to configure. """ params = {'DomainName': domain_name, } self.build_complex_param(params, 'IndexField', index_field) return self._make_request( action='DefineIndexField', verb='POST', path='/', params=params) def define_suggester(self, domain_name, suggester): """ Configures a suggester for a domain. A suggester enables you to display possible matches before users finish typing their queries. When you configure a suggester, you must specify the name of the text field you want to search for possible matches and a unique name for the suggester. For more information, see `Getting Search Suggestions`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :type suggester: dict :param suggester: Configuration information for a search suggester. Each suggester has a unique name and specifies the text field you want to use for suggestions. The following options can be configured for a suggester: `FuzzyMatching`, `SortExpression`. """ params = {'DomainName': domain_name, } self.build_complex_param(params, 'Suggester', suggester) return self._make_request( action='DefineSuggester', verb='POST', path='/', params=params) def delete_analysis_scheme(self, domain_name, analysis_scheme_name): """ Deletes an analysis scheme. For more information, see `Configuring Analysis Schemes`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: A string that represents the name of a domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen). :type analysis_scheme_name: string :param analysis_scheme_name: The name of the analysis scheme you want to delete. """ params = { 'DomainName': domain_name, 'AnalysisSchemeName': analysis_scheme_name, } return self._make_request( action='DeleteAnalysisScheme', verb='POST', path='/', params=params) def delete_domain(self, domain_name): """ Permanently deletes a search domain and all of its data. Once a domain has been deleted, it cannot be recovered. For more information, see `Deleting a Search Domain`_ in the Amazon CloudSearch Developer Guide . :type domain_name: string :param domain_name: The name of the domain you want to permanently delete. """ params = {'DomainName':
import numpy as np import pandas as pd import joblib import tensorflow as tf import sys import functools import os import tensorflow.keras.backend as K from matplotlib import pyplot as plt # from IPython.display import clear_output from scipy.stats import gaussian_kde, binned_statistic as binstat from tensorflow.keras.preprocessing.sequence import pad_sequences from sklearn.model_selection import ShuffleSplit, GroupShuffleSplit from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, median_absolute_error from tensorflow.keras.losses import Loss from scipy.spatial.distance import jensenshannon as js class HuberLoss(Loss): """ Custom TensorFlow Loss subclass implementing the Huber loss. """ def __init__(self, threshold: float = 1): """ :param threshold: float The Huber threshold between L1 and L2 losses. """ super().__init__() self.threshold = threshold def call(self, y_true, y_pred): error = y_true - y_pred is_small_error = tf.abs(error) <= self.threshold small_error_loss = tf.square(error) / 2 big_error_loss = self.threshold * (tf.abs(error) - (0.5 * self.threshold)) return tf.where(is_small_error, small_error_loss, big_error_loss) def root_mean_squared_error(y, y_pred, sample_weight=None): """ Compute the root mean squared error metric. """ value = mean_squared_error(y, y_pred, sample_weight=sample_weight) return np.sqrt(value) def process_input_parameters(pars, min_folds_cv=5): """ Check the consistency of the input parameters and make modifications if necessary. :param pars: argparse.Namespace An argparse namespace object containing the input parameters. :param min_folds_cv: int The minimum number of folds required for K-fold cross-validation. :return: pars, argparse.Namespace The processed version of the input namespace object. """ if len(pars.lcdir) > 1: assert len(pars.wavebands) == len(pars.lcdir), "The number of items in lcdir must either be 1 or match " \ "the number of items in wavebands." assert len(pars.wavebands) == len(pars.lcfile_suffices), \ "The number of items in wavebands and lcfile_suffices must match." if not os.path.isdir(os.path.join(pars.rootdir, pars.outdir)): os.mkdir(os.path.join(pars.rootdir, pars.outdir)) pars.hparam_grid = np.array(pars.hpars) # Check if only the CPU is to be used: if pars.cpu: os.environ["CUDA_VISIBLE_DEVICES"] = "" # Join the list elements of pars.subset into a long string: if pars.subset: pars.subset = ' '.join(pars.subset) # Check the number of meta input features: if pars.meta_input is None: pars.n_meta = 0 else: pars.n_meta = len(pars.meta_input) if pars.nn_type == 'cnn': pars.n_channels = len(pars.wavebands) else: pars.n_channels = 2 * len(pars.wavebands) if pars.weighing_by_density: print("Density weighing is ON with cutoff {}".format(pars.weighing_by_density)) else: print("Density weighing is OFF.") print("Number of input channels: {}".format(pars.n_channels)) print("Number of meta features: {}".format(pars.n_meta)) if pars.train: pars.predict = False # We want to train a regression model. if pars.pick_fold is not None: for ii in pars.pick_fold: print(type(ii)) assert isinstance(ii, int) and 0 < ii <= pars.k_fold, \ "pick_fold must be > 0 AND <= k_fold integer" assert pars.k_fold >= min_folds_cv, \ "pick_fold requires k_fold >= {}".format(min_folds_cv) pars.refit = False if not pars.cross_validate: assert len(pars.hparam_grid) == 1, "Cannot do grid-search of hyper-parameters if cross_validate is False." pars.refit = True if pars.explicit_test_frac: assert pars.refit or pars.ensemble, \ "For the evaluation of the model on the test set, 'refit' or 'ensemble' must be set." if pars.optimize_lr: pars.n_epochs = 100 pars.decay = 0.0 pars.save_model = False pars.cross_validate = False pars.refit = True return pars def read_dataset(filename: str, columns: list = None, subset_expr: str = None, input_feature_names: list = None, trim_quantiles: list = None, qlo: float = 0.25, qhi: float = 0.75, plothist: bool = False, histfig: str = "hist.png", dropna_cols: list = None, comment: str = '#', dtype=None): """ Loads, trims, and exports dataset to numpy arrays. :param filename: str The name of the input file. :param columns: list of strings Passed to the usecols parameter of pandas.read_csv() :param subset_expr: str Expression for subsetting the input data, passed as the first parameter of pandas.DataFrame.query() :param input_feature_names: list of strings An optional subset of the usecols parameter, including the names of the columns to be returned as features. If None, all columns in usecols will be returned. :param trim_quantiles: list An optional subset of the usecols parameter, including the names of the columns to be threshold-rejected beyond the quantiles specified by qlo and qhi. If None, no quantile-trimming will be performed. :param qlo: float Lower quantile for threshold rejection. :param qhi: float Upper quantile for threshold rejection. :param plothist: bool If True, the histograms of the columns in usecols will be plotted before and, if performed, after quantile trimming. :param histfig: str The name of the output histogram figure file if plothist is True. :param dropna_cols: :param comment: :param dtype: :return: """ with open(filename) as f: header = f.readline() cols = header.strip('#').split() df = pd.read_csv(filename, names=cols, header=None, sep="\s+", usecols=columns, comment=comment, dtype=dtype) if dropna_cols is not None: df.dropna(inplace=True, subset=dropna_cols) ndata = len(df) print(df.head()) print("----------\n{} lines read from {}\n".format(ndata, filename)) df_orig = df # Apply threshold rejections: if subset_expr is not None: df = df.query(subset_expr) ndata = len(df) print("{} lines after threshold rejections\n".format(ndata)) # plot histogram for each column in original dataset if plothist: fig, ax = plt.subplots(figsize=(20, 10)) fig.clf() _ = pd.DataFrame.hist(df, bins=int(np.ceil(np.cbrt(ndata) * 2)), figsize=(20, 10), grid=False, color='red', ax=ax) plt.savefig(histfig) # omit data beyond specific quantiles [qlo, qhi] if trim_quantiles is not None: dfq = df[trim_quantiles] quantiles = pd.DataFrame.quantile(dfq, q=[qlo, qhi], axis=0, numeric_only=True, interpolation='linear') print("Values at [{},{}] quantiles to be applied for data trimming:".format(qlo, qhi)) print(quantiles.sum) mask = (dfq > dfq.quantile(qlo)) & (dfq < dfq.quantile(qhi)) # print(mask) mask = mask.all(axis=1) # print(mask.shape) df = pd.DataFrame.dropna(df[mask]) ndata = len(df) print("\n{} lines remained after quantile rejection.\n".format(ndata)) # plot histogram for each column in trimmed dataset if plothist: fig, ax = plt.subplots(figsize=(20, 10)) _ = pd.DataFrame.hist(df, bins=int(np.ceil(np.cbrt(ndata) * 2)), figsize=(20, 10), grid=False, color='green', ax=ax) fig.savefig("hist_trim.png", format="png") if input_feature_names is not None: return df.loc[:, input_feature_names], df_orig else: return df, df_orig def read_time_series_for_rnn(name_list, source_dir, nts, input_wavebands, ts_file_suffix, rootdir="", periods=None, max_phase=1.0, phase_shift=None, nbins=None): print("Reading time series...", file=sys.stderr) n_data = len(name_list) scaler = StandardScaler(copy=True, with_mean=True, with_std=False) X_list = list() times_dict = dict() mags_dict = dict() phases_dict = dict() if nbins is not None: print("Light curves will be binned to max. {0} points in [0, {1:.1f}].".format(nbins, max_phase)) for iband, waveband in enumerate(input_wavebands): X = np.zeros((n_data, nts, 2)) # Input shape required by an RNN: (batch_size, time_steps, features) phases = list() times = list() mags = list() if len(source_dir) > 1: directory = source_dir[iband] else: directory = source_dir[0] for ii, name in enumerate(name_list): print('Reading data for {}\r'.format(name), end="", file=sys.stderr) pp, mm = np.genfromtxt(os.path.join(rootdir, directory, name + ts_file_suffix[iband]), unpack=True, comments='#') phasemask = (pp < max_phase) pp = pp[phasemask] mm = mm[phasemask] if phase_shift is not None: pp = get_phases(1.0, pp, shift=phase_shift, all_positive=True) inds = np.argsort(pp) pp = pp[inds] mm = mm[inds] if nbins is not None: pp, mm = binlc(pp, mm, nbins=nbins, max_y=max_phase) if periods is not None: tt = pp * periods[ii] else: tt = pp # here we only subtract the mean: mm = scaler.fit_transform(mm.reshape(-1, 1)).flatten() times.append(tt) mags.append(mm) phases.append(pp) times_padded = pad_sequences(times, maxlen=nts, dtype='float64', padding='post', truncating='post', value=-1) mags_padded = pad_sequences(mags, maxlen=nts, dtype='float64', padding='post', truncating='post', value=-1) X[:, :, 0] = times_padded X[:, :, 1] = mags_padded X_list.append(X) times_dict[waveband] = times mags_dict[waveband] = mags phases_dict[waveband] = phases # Create final data matrix for the time series: X = np.concatenate(X_list, axis=2) print("") return X, times_dict, mags_dict, phases_dict def read_time_series_for_cnn(name_list, source_dir, nts, input_wavebands, ts_file_suffix, nuse=1, rootdir="", n_aug=None): nmags = int(nts / nuse) n_data = len(name_list) if n_aug is not None: assert isinstance(n_aug, int) and n_aug > 0, \ "n_aug must be a positive integer" dict_x_ts = dict() for waveband in input_wavebands: dict_x_ts[waveband] = np.zeros((n_data, nmags)) if n_aug is not None: dict_x_ts[waveband] = np.zeros((n_data * n_aug, nmags)) groups = np.zeros((n_data * n_aug)) dict_x_ts_scaled = dict() print("Reading time series...", file=sys.stderr) for ii, name in enumerate(name_list): print('Reading data for {}\r'.format(name), end="", file=sys.stderr) for iband, waveband in enumerate(input_wavebands): if len(source_dir) > 1: directory = source_dir[iband] else: directory = source_dir[0] if n_aug is None: phases, timeseries = np.genfromtxt(os.path.join(directory, name + ts_file_suffix[iband]), unpack=True, comments='#') phases = phases[0:nts] timeseries = timeseries[0:nts] dict_x_ts[waveband][ii][:] = timeseries[nuse - 1::nuse] groups = None else: tsinput = np.genfromtxt(os.path.join(directory, name + ts_file_suffix[iband]), unpack=False, comments='#') # check if there are n_aug+1 columns in the data matrix assert tsinput.shape[1] == n_aug + 1, \ "data matrix in " + os.path.join(directory, name + ts_file_suffix[iband]) + " has wrong shape" phases = tsinput[0:nts, 0] for jj in range(n_aug): timeseries = tsinput[0:nts, jj + 1] dict_x_ts[waveband][jj + ii * n_aug][:]
the user.""" raise NotImplementedError def urllink(self, page, url): """Display a URL to the user.""" raise NotImplementedError def itemized_list(self, page, title, items): """Display an itemized list.""" raise NotImplementedError def step(self, page, text, func): """Add a step of a multi-step operation. This will indicate when it's starting and when it's complete. """ raise NotImplementedError def error(self, text, force_wait=False, done_func=None): """Display a block of error text to the user.""" raise NotImplementedError class ConsoleUI(UIToolkit): """A UI toolkit that simply prints to the console.""" def __init__(self): """Initialize the UI toolkit.""" super(UIToolkit, self).__init__() self.header_wrapper = textwrap.TextWrapper(initial_indent="* ", subsequent_indent=" ") indent_str = " " * 4 self.text_wrapper = textwrap.TextWrapper(initial_indent=indent_str, subsequent_indent=indent_str, break_long_words=False) self.error_wrapper = textwrap.TextWrapper(initial_indent="[!] ", subsequent_indent=" ", break_long_words=False) def page(self, text, allow_back=True, is_visible_func=None, on_show_func=None): """Add a new "page" to display to the user. In the console UI, we only care if we need to display or ask questions for this page. Our representation of a page in this case is simply a boolean value. If False, nothing associated with this page will be displayed to the user. """ visible = not is_visible_func or is_visible_func() if not visible: return False if on_show_func: on_show_func() print() print() print(self.header_wrapper.fill(text)) return True def prompt_input(self, page, prompt, default=None, password=<PASSWORD>, yes_no=False, optional=False, normalize_func=None, save_obj=None, save_var=None): """Prompt the user for some text. This may contain a default value.""" assert save_obj assert save_var if not page: return if yes_no: if default: prompt = '%s [Y/n]' % prompt else: prompt = '%s [y/N]' % prompt default = False elif default: self.text(page, "The default is %s" % default) prompt = "%s [%s]" % (prompt, default) elif optional: prompt = '%s (optional)' % prompt print() prompt += ": " value = None while not value: if password: temp_value = getpass.getpass(force_str(prompt)) if save_var.startswith('reenter'): if not self.confirm_reentry(save_obj, save_var, temp_value): self.error("Passwords must match.") continue value = temp_value else: value = input(prompt) if not value: if default: value = default elif optional: break if yes_no: if isinstance(value, bool): # This came from the 'default' value. norm_value = value else: assert isinstance(value, six.string_types) norm_value = value.lower() if norm_value not in (True, False, 'y', 'n', 'yes', 'no'): self.error('Must specify one of Y/y/yes or N/n/no.') value = None continue else: value = norm_value in (True, 'y', 'yes') break elif not value: self.error("You must answer this question.") if normalize_func: value = normalize_func(value) setattr(save_obj, save_var, value) def confirm_reentry(self, obj, reenter_var, value): """Confirm whether a re-entered piece of data matches. This is used to ensure that secrets and passwords are what the user intended to type. """ first_var = reenter_var.replace('reenter_', '') first_entry = getattr(site, first_var) return first_entry == value def prompt_choice(self, page, prompt, choices, save_obj=None, save_var=None): """Prompt the user for an item amongst a list of choices.""" assert save_obj assert save_var if not page: return self.text(page, "You can type either the name or the number " "from the list below.") valid_choices = [] i = 0 for choice in choices: description = '' enabled = True if isinstance(choice, six.string_types): text = choice elif len(choice) == 2: text, enabled = choice else: text, description, enabled = choice if enabled: self.text(page, "(%d) %s %s\n" % (i + 1, text, description), leading_newline=(i == 0)) valid_choices.append(text) i += 1 print() prompt += ": " choice = None while not choice: choice = input(prompt) if choice not in valid_choices: try: i = int(choice) - 1 if 0 <= i < len(valid_choices): choice = valid_choices[i] break except ValueError: pass self.error("'%s' is not a valid option." % choice) choice = None setattr(save_obj, save_var, choice) def text(self, page, text, leading_newline=True, wrap=True): """Display a block of text to the user. This will wrap the block to fit on the user's screen. """ if not page: return if leading_newline: print() if wrap: print(self.text_wrapper.fill(text)) else: print(' %s' % text) def disclaimer(self, page, text): """Display a disclaimer to the user.""" self.text(page, 'NOTE: %s' % text) def urllink(self, page, url): """Display a URL to the user.""" self.text(page, url, wrap=False) def itemized_list(self, page, title, items): """Display an itemized list.""" if title: self.text(page, "%s:" % title) for item in items: self.text(page, " * %s" % item, False) def step(self, page, text, func): """Add a step of a multi-step operation. This will indicate when it's starting and when it's complete. """ sys.stdout.write("%s ... " % text) func() print("OK") def error(self, text, force_wait=False, done_func=None): """Display a block of error text to the user.""" print() for text_block in text.split('\n'): print(self.error_wrapper.fill(text_block)) if force_wait: print() input('Press Enter to continue') if done_func: done_func() class Command(object): """An abstract command.""" needs_ui = False def add_options(self, parser): """Add any command-specific options to the parser.""" pass def run(self): """Run the command.""" pass class InstallCommand(Command): """Installer command. This command installs a new Review Board site tree and generates web server configuration files. This will ask several questions about the site before performing the installation. """ needs_ui = True def add_options(self, parser): """Add any command-specific options to the parser.""" is_windows = platform.system() == "Windows" group = OptionGroup(parser, "'install' command", self.__doc__.strip()) group.add_option('--advanced', action='store_true', dest='advanced', default=False, help='provide more advanced configuration options') group.add_option("--copy-media", action="store_true", dest="copy_media", default=is_windows, help="copy media files instead of symlinking") group.add_option("--noinput", action="store_true", default=False, help="run non-interactively using configuration " "provided in command-line options") group.add_option('--opt-out-support-data', action='store_false', default=True, dest='send_support_usage_stats', help='opt out of sending data and stats for ' 'improved user and admin support') group.add_option("--company", help="the name of the company or organization that " "owns the server") group.add_option("--domain-name", help="fully-qualified host name of the site, " "excluding the http://, port or path") group.add_option("--site-root", default="/", help="path to the site relative to the domain name") group.add_option("--static-url", default="static/", help="the URL containing the static (shipped) " "media files") group.add_option("--media-url", default="media/", help="the URL containing the uploaded media files") group.add_option("--db-type", help="database type (mysql, postgresql or sqlite3)") group.add_option("--db-name", default="reviewboard", help="database name (not for sqlite3)") group.add_option("--db-host", default="localhost", help="database host (not for sqlite3)") group.add_option("--db-user", help="database user (not for sqlite3)") group.add_option("--db-pass", help="password for the database user " "(not for sqlite3)") group.add_option("--cache-type", default='memcached', help="cache server type (memcached or file)") group.add_option("--cache-info", default='localhost:11211', help="cache identifier (memcached connection string " "or file cache directory)") group.add_option("--web-server-type", default='apache', help="web server (apache or lighttpd)") group.add_option("--web-server-port", help="port that the web server should listen on", default='80') group.add_option("--python-loader", default='wsgi', help="python loader for apache (fastcgi or wsgi)") group.add_option("--admin-user", default="admin", help="the site administrator's username") group.add_option("--admin-password", help="the site administrator's password") group.add_option("--admin-email", help="the site administrator's e-mail address") # UNIX-specific arguments if not is_windows: group.add_option("--sitelist", default=SITELIST_FILE_UNIX, help="the path to a file storing a list of " "installed sites") parser.add_option_group(group) def run(self): """Run the command.""" if not self.check_permissions(): return site.__dict__.update(options.__dict__) self.print_introduction() if self.print_missing_dependencies(): # There were required dependencies missing. Don't show any more # pages. return if not options.noinput: self.ask_domain() self.ask_site_root() if options.advanced: self.ask_shipped_media_url() self.ask_uploaded_media_url() self.ask_database_type() self.ask_database_name() self.ask_database_host() self.ask_database_login() if options.advanced: self.ask_cache_type() self.ask_cache_info() if options.advanced: self.ask_web_server_type() self.ask_python_loader() self.ask_admin_user() self.ask_support_data() # Do not ask for sitelist file, it should not be common. self.show_install_status() self.show_finished() self.show_get_more() def normalize_root_url_path(self, path): """Convert user-specified root URL paths to a normal format.""" if not path.endswith("/"): path += "/" if not path.startswith("/"): path = "/" + path return path def normalize_media_url_path(self, path): """Convert user-specified media URLs to a normal format.""" if not path.endswith("/"): path += "/" if path.startswith("/"): path = path[1:] return path def check_permissions(self): """Check that permissions are usable. If not, this will show an error to the user. """ # Make sure we can create the directory first. try: # TODO: Do some chown tests too. if os.path.exists(site.install_dir): # Remove it first, to see if we own it and to handle the # case where the directory is empty as a result of a # previously canceled install. os.rmdir(site.install_dir) os.mkdir(site.install_dir) # Don't leave a mess. We'll actually do this at the end. os.rmdir(site.install_dir) return True except OSError: # Likely a permission error. ui.error("Unable to create the %s directory. Make sure " "you're running as an administrator and that the " "directory does not contain any files." % site.install_dir, done_func=lambda: sys.exit(1)) return False def print_introduction(self): """Print an introduction to the site installer.""" page = ui.page("Welcome to the
the index depth frame down the stack. If fException is True use the exception stack (traceback). """ if fException: tb = get_traceback(base_frame, self) if tb is None: raise NoExceptionFound while tb.tb_next is not None: tb = tb.tb_next f = tb.tb_frame else: f = base_frame while f is not None: if not g_fDebug and f.f_code.co_name == 'rpdb2_import_wrapper': f = f.f_back continue if index <= 0: break f = f.f_back index -= 1 if (index < 0) or (f is None): raise InvalidFrame if (self.m_uef_lineno is not None) and (f.f_back is None): lineno = self.m_uef_lineno else: lineno = f.f_lineno if fException: tb = get_traceback(base_frame, self) while tb is not None: if tb.tb_frame == f: lineno = tb.tb_lineno break tb = tb.tb_next return (f, lineno) def get_locals_copy(self, frame_index, fException, fReadOnly): """ Get globals and locals of frame. A copy scheme is used for locals to work around a bug in Python 2.3 and 2.4 that prevents modifying the local dictionary. """ try: base_frame = self.frame_acquire() (f, lineno) = self.get_frame(base_frame, frame_index, fException) if fReadOnly: gc = copy.copy(f.f_globals) else: gc = f.f_globals try: (lc, olc) = self.m_locals_copy[f] except KeyError: if f.f_code.co_name in [MODULE_SCOPE, MODULE_SCOPE2]: lc = gc olc = gc else: lc = copy.copy(f.f_locals) olc = copy.copy(lc) if not fReadOnly: self.m_locals_copy[f] = (lc, olc) self.set_local_trace(f) return (gc, lc, olc) finally: f = None base_frame = None self.frame_release() def update_locals_copy(self): """ Update copy of locals with changes in locals. """ lct = self.m_locals_copy.get(self.m_frame, None) if lct is None: return (lc, base) = lct cr = copy.copy(self.m_frame.f_locals) for k in cr: if not k in base: lc[k] = cr[k] continue if not cr[k] is base[k]: lc[k] = cr[k] def update_locals(self): """ Update locals with changes from copy of locals. """ lct = self.m_locals_copy.pop(self.m_frame, None) if lct is None: return self.m_frame.f_locals.update(lct[0]) def __eval_breakpoint(self, frame, bp): """ Return True if the breakpoint is hit. """ if not bp.m_fEnabled: return False if bp.m_expr == '': return True try: if frame in self.m_locals_copy: l = self.m_locals_copy[frame][0] v = eval(bp.m_code, frame.f_globals, l) else: v = eval(bp.m_code, frame.f_globals, frame.f_locals) return (v != False) except: return False def set_local_trace(self, frame, fsignal_exception = False): """ Set trace callback of frame. Specialized trace methods are selected here to save switching time during actual tracing. """ if not self.m_core.m_ftrace: frame.f_trace = self.trace_dispatch_stop return if fsignal_exception: frame.f_trace = self.trace_dispatch_signal return code_context = self.m_core.get_code_context(frame) if self.m_core.is_break(self, frame): frame.f_trace = self.trace_dispatch_break elif code_context.m_fExceptionTrap or (frame.f_back is None): frame.f_trace = self.trace_dispatch_trap elif frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function: frame.f_trace = self.trace_dispatch elif frame in self.m_locals_copy: frame.f_trace = self.trace_dispatch elif frame == self.m_core.m_return_frame: frame.f_trace = self.trace_dispatch else: del frame.f_trace def set_tracers(self, fsignal_exception = False): """ Set trace callbacks for all frames in stack. """ try: try: f = self.frame_acquire() while f is not None: self.set_local_trace(f, fsignal_exception) f = f.f_back except ThreadDone: f = None finally: f = None self.frame_release() def trace_dispatch_stop(self, frame, event, arg): """ Disable tracing for this thread. """ if frame in self.m_locals_copy: self.update_locals() sys.settrace(None) sys.setprofile(None) return None def trace_dispatch_break(self, frame, event, arg): """ Trace method for breaking a thread. """ if event not in ['line', 'return', 'exception']: return frame.f_trace if event == 'exception': self.set_exc_info(arg) self.m_event = event if frame in self.m_locals_copy: self.update_locals_copy() self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) return frame.f_trace def trace_dispatch_call(self, frame, event, arg): """ Initial trace method for thread. """ if not self.m_core.m_ftrace: return self.trace_dispatch_stop(frame, event, arg) self.m_depth += 1 if self.m_depth > g_recursionlimit: sys.setprofile(self.profile_recursion) self.m_frame = frame try: self.m_code_context = self.m_core.m_code_contexts[frame.f_code] except KeyError: self.m_code_context = self.m_core.get_code_context(frame) if self.m_core.m_fBreak or (self.m_core.m_step_tid == self.m_thread_id): self.m_event = event self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) return frame.f_trace if not frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function: return None bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None) if bp is not None and self.__eval_breakpoint(frame, bp): self.m_event = event self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) return frame.f_trace return self.trace_dispatch def trace_dispatch(self, frame, event, arg): """ General trace method for thread. """ if (event == 'line'): if frame in self.m_locals_copy: self.update_locals_copy() bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None) if bp is not None and self.__eval_breakpoint(frame, bp): self.m_event = event self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) return frame.f_trace if event == 'return': if frame in self.m_locals_copy: self.update_locals_copy() if frame == self.m_core.m_return_frame: self.m_event = event self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() return None if event == 'exception': if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) if not is_py3k() and not frame.f_exc_traceback is arg[2]: (frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg return frame.f_trace return frame.f_trace def trace_dispatch_trap(self, frame, event, arg): """ Trace method used for frames in which unhandled exceptions should be caught. """ if (event == 'line'): self.m_event = event if frame in self.m_locals_copy: self.update_locals_copy() bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None) if bp is not None and self.__eval_breakpoint(frame, bp): self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) return frame.f_trace if event == 'return': last_event = self.m_event self.m_event = event if frame in self.m_locals_copy: self.update_locals_copy() if frame == self.m_core.m_return_frame: self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() if last_event == 'exception': self.m_event = last_event return None if event == 'exception': self.m_event = event if self.m_code_context.m_fExceptionTrap and self.m_core.m_ftrap: self.set_exc_info(arg) self.m_fUnhandledException = True self.m_core._break(self, frame, event, arg) if frame in self.m_locals_copy: self.update_locals() return frame.f_trace self.m_ue_lineno = frame.f_lineno if frame in self.m_locals_copy: self.update_locals() self.set_local_trace(frame) if is_py3k(): self.set_exc_info(arg) elif not frame.f_exc_traceback is arg[2]: (frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg return frame.f_trace return frame.f_trace def trace_dispatch_signal(self, frame, event, arg): #print_debug('*** trace_dispatch_signal %s, %s, %s' % (frame.f_lineno, event, repr(arg))) self.set_exc_info(arg) self.set_tracers() self.set_depth(frame) sys.setprofile(self.profile) return self.trace_dispatch_trap(frame, event, arg) def set_exc_info(self, arg): """ Set exception information. """ if arg == None: return if is_py3k(): self.m_exc_info = arg return (t, v, tb) = arg while tb is not None: f = tb.tb_frame f.f_exc_type = t f.f_exc_value = v f.f_exc_traceback = tb tb = tb.tb_next def get_exc_info(self): return self.m_exc_info def reset_exc_info(self): self.m_exc_info = None def is_breakpoint(self): """ Calc if current line is hit by breakpoint. """ bp = self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None) if bp is not None and self.__eval_breakpoint(self.m_frame, bp): return True return False def get_breakpoint(self): """ Return current line breakpoint if any. """ return self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None) class CDebuggerCore: """ Base class for the debugger. Handles basic debugger functionality. """ def __init__(self, fembedded = False): self.m_ftrace = True self.m_current_ctx = None self.m_f_first_to_break = True self.m_f_break_on_init = False self.m_builtins_hack = None self.m_timer_embedded_giveup = None self.m_threads_lock = threading.Condition() self.m_threads = {} self.m_event_dispatcher = CEventDispatcher() self.m_state_manager = CStateManager(STATE_RUNNING, self.m_event_dispatcher) self.m_ffork_into_child = False self.m_ffork_auto = False self.m_fsynchronicity = True self.m_ftrap = True self.m_fUnhandledException = False self.m_fBreak = False self.m_lastest_event = None self.m_step_tid = None self.m_next_frame = None self.m_return_frame = None self.m_saved_step = (None, None, None) self.m_saved_next = None self.m_bp_manager = CBreakPointsManager() self.m_code_contexts = {None: None} self.m_fembedded = fembedded self.m_embedded_event = threading.Event() self.m_embedded_sync_t0 = 0 self.m_embedded_sync_t1 = 0 self.m_heartbeats = {0: time.time() + 3600} def shutdown(self): self.m_event_dispatcher.shutdown() self.m_state_manager.shutdown() def is_embedded(self): return self.m_fembedded def send_fork_switch(self, sync_n): """ Notify client that debuggee is forking and that it should try to reconnect to the child. """ print_debug('Sending fork switch event') event = CEventForkSwitch(sync_n) self.m_event_dispatcher.fire_event(event) def send_exec_switch(self, sync_n): """ Notify client that debuggee is doing an exec and that it should try to reconnect (in case the exec failed). """ print_debug('Sending exec switch event') event = CEventExecSwitch(sync_n) self.m_event_dispatcher.fire_event(event) def send_event_exit(self): """ Notify client that the debuggee is shutting down. """
key_expiration=2, key_algs=sig_keys, enc_keys=enc_keys) self.write_openid_keys(self.oxauth_openid_jwks_fn, jwks) def generate_base64_string(self, lines, num_spaces): if not lines: return None plain_text = ''.join(lines) plain_b64encoded_text = base64.encodestring(plain_text.encode('utf-8')).decode('utf-8').strip() if num_spaces > 0: plain_b64encoded_text = self.reindent(plain_b64encoded_text, num_spaces) return plain_b64encoded_text def genRandomString(self, N): return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(N)) def generate_scim_configuration(self): self.scim_rs_client_jks_pass = self.getPW() self.scim_rs_client_jks_pass_encoded = self.obscure(self.scim_rs_client_jks_pass) self.scim_rs_client_jwks = self.gen_openid_jwks_jks_keys(self.scim_rs_client_jks_fn, self.scim_rs_client_jks_pass) self.templateRenderingDict['scim_rs_client_base64_jwks'] = self.generate_base64_string(self.scim_rs_client_jwks, 1) self.scim_rp_client_jwks = self.gen_openid_jwks_jks_keys(self.scim_rp_client_jks_fn, self.scim_rp_client_jks_pass) self.templateRenderingDict['scim_rp_client_base64_jwks'] = self.generate_base64_string(self.scim_rp_client_jwks, 1) def generate_oxtrust_api_configuration(self): self.api_rs_client_jks_pass_encoded = self.obscure(self.api_rs_client_jks_pass) self.api_rs_client_jwks = self.gen_openid_jwks_jks_keys(self.api_rs_client_jks_fn, self.api_rs_client_jks_pass) self.templateRenderingDict['api_rs_client_base64_jwks'] = self.generate_base64_string(self.api_rs_client_jwks, 1) self.api_rp_client_jks_pass_encoded = self.obscure(self.api_rp_client_jks_pass) self.api_rp_client_jwks = self.gen_openid_jwks_jks_keys(self.api_rp_client_jks_fn, self.api_rp_client_jks_pass) self.templateRenderingDict['api_rp_client_base64_jwks'] = self.generate_base64_string(self.api_rp_client_jwks, 1) def getDefaultOption(self, val): return 'Yes' if val else 'No' def getPrompt(self, prompt, defaultValue=None): try: if defaultValue: user_input = input("%s [%s] : " % (prompt, defaultValue)).strip() if user_input == '': return defaultValue else: return user_input else: while True: user_input = input("%s : " % prompt).strip() if user_input != '': return user_input except KeyboardInterrupt: sys.exit() except: return None def getPW(self, size=12, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase, special=''): if not special: random_password = [random.choice(chars) for _ in range(size)] else: ndigit = random.randint(1, 3) nspecial = random.randint(1, 2) ncletter = random.randint(2, 5) nsletter = size - ndigit - nspecial - ncletter random_password = [] for n, rc in ((ndigit, string.digits), (nspecial, special), (ncletter, string.ascii_uppercase), (nsletter, string.ascii_lowercase)): random_password += [random.choice(rc) for _ in range(n)] random.shuffle(random_password) return ''.join(random_password) def install_gluu_base(self): self.logIt("Installing Gluu base...") self.generate_oxtrust_api_configuration() self.generate_scim_configuration() self.ldapCertFn = self.opendj_cert_fn self.ldapTrustStoreFn = self.opendj_p12_fn self.encoded_ldapTrustStorePass = self.encoded_opendj_p12_pass if self.installSaml: self.oxTrustConfigGeneration = "true" else: self.oxTrustConfigGeneration = "false" def load_certificate_text(self, filePath): self.logIt("Load certificate %s" % filePath) f = open(filePath) certificate_text = f.read() f.close() certificate_text = certificate_text.replace('-----BEGIN CERTIFICATE-----', '').replace('-----END CERTIFICATE-----', '').strip() return certificate_text def set_jetty_param(self, jettyServiceName, jetty_param, jetty_val): self.logIt("Seeting jetty parameter {0}={1} for service {2}".format(jetty_param, jetty_val, jettyServiceName)) service_fn = os.path.join(self.jetty_base, jettyServiceName, 'start.ini') start_ini = self.readFile(service_fn) start_ini_list = start_ini.splitlines() param_ln = jetty_param + '=' + jetty_val for i, l in enumerate(start_ini_list[:]): if jetty_param in l and l[0]=='#': start_ini_list[i] = param_ln break elif l.strip().startswith(jetty_param): start_ini_list[i] = param_ln break else: start_ini_list.append(param_ln) self.writeFile(service_fn, '\n'.join(start_ini_list)) def install_oxauth(self): self.logIt("Copying oxauth.war into jetty webapps folder...") jettyServiceName = 'oxauth' self.installJettyService(self.jetty_app_configuration[jettyServiceName], True) jettyServiceWebapps = '%s/%s/webapps' % (self.jetty_base, jettyServiceName) self.copyFile('%s/oxauth.war' % self.distGluuFolder, jettyServiceWebapps) def install_oxtrust(self): self.logIt("Copying identity.war into jetty webapps folder...") jettyServiceName = 'identity' self.installJettyService(self.jetty_app_configuration[jettyServiceName], True) jettyServiceWebapps = '%s/%s/webapps' % (self.jetty_base, jettyServiceName) self.copyFile('%s/identity.war' % self.distGluuFolder, jettyServiceWebapps) # don't send header to server self.set_jetty_param(jettyServiceName, 'jetty.httpConfig.sendServerVersion', 'false') def install_scim_server(self): self.logIt("Copying scim.war into jetty webapps folder...") jettyServiceName = 'scim' self.installJettyService(self.jetty_app_configuration[jettyServiceName], True) jettyServiceWebapps = '%s/%s/webapps' % (self.jetty_base, jettyServiceName) self.copyFile('%s/scim.war' % self.distGluuFolder, jettyServiceWebapps) # don't send header to server self.set_jetty_param(jettyServiceName, 'jetty.httpConfig.sendServerVersion', 'false') def install_saml(self): if self.installSaml: self.logIt("Install SAML Shibboleth IDP v3...") # Put latest SAML templates identityWar = 'identity.war' self.createDirs('%s/conf/shibboleth3' % self.gluuBaseFolder) self.createDirs('%s/identity/conf/shibboleth3/idp' % self.jetty_base) self.createDirs('%s/identity/conf/shibboleth3/sp' % self.jetty_base) # unpack IDP3 JAR with static configs self.run([self.cmd_jar, 'xf', self.distGluuFolder + '/shibboleth-idp.jar'], '/opt') self.removeDirs('/opt/META-INF') if self.mappingLocations['user'] == 'couchbase': self.templateRenderingDict['idp_attribute_resolver_ldap.search_filter'] = '(&(|(lower(uid)=$requestContext.principalName)(mail=$requestContext.principalName))(objectClass=gluuPerson))' # Process templates self.renderTemplateInOut(self.idp3_configuration_properties, self.staticIDP3FolderConf, self.idp3ConfFolder) self.renderTemplateInOut(self.idp3_configuration_ldap_properties, self.staticIDP3FolderConf, self.idp3ConfFolder) self.renderTemplateInOut(self.idp3_configuration_saml_nameid, self.staticIDP3FolderConf, self.idp3ConfFolder) self.renderTemplateInOut(self.idp3_configuration_services, self.staticIDP3FolderConf, self.idp3ConfFolder) self.renderTemplateInOut(self.idp3_configuration_password_authn, self.staticIDP3FolderConf + '/authn', self.idp3ConfFolder + '/authn') # load certificates to update metadata self.templateRenderingDict['idp3EncryptionCertificateText'] = self.load_certificate_text(self.certFolder + '/idp-encryption.crt') self.templateRenderingDict['idp3SigningCertificateText'] = self.load_certificate_text(self.certFolder + '/idp-signing.crt') # update IDP3 metadata self.renderTemplateInOut(self.idp3_metadata, self.staticIDP3FolderMetadata, self.idp3MetadataFolder) self.idpWarFullPath = '%s/idp.war' % self.distGluuFolder jettyIdpServiceName = 'idp' jettyIdpServiceWebapps = '%s/%s/webapps' % (self.jetty_base, jettyIdpServiceName) self.installJettyService(self.jetty_app_configuration[jettyIdpServiceName], True, True) self.copyFile('%s/idp.war' % self.distGluuFolder, jettyIdpServiceWebapps) # Prepare libraries needed to for command line IDP3 utilities self.install_saml_libraries() # generate new keystore with AES symmetric key # there is one throuble with Shibboleth IDP 3.x - it doesn't load keystore from /etc/certs. It accepts %{idp.home}/credentials/sealer.jks %{idp.home}/credentials/sealer.kver path format only. cmd = [self.cmd_java,'-classpath', '"{}"'.format(os.path.join(self.idp3Folder,'webapp/WEB-INF/lib/*')), 'net.shibboleth.utilities.java.support.security.BasicKeystoreKeyStrategyTool', '--storefile', os.path.join(self.idp3Folder,'credentials/sealer.jks'), '--versionfile', os.path.join(self.idp3Folder, 'credentials/sealer.kver'), '--alias secret', '--storepass', self.shibJksPass] self.run(' '.join(cmd), shell=True) # chown -R jetty:jetty /opt/shibboleth-idp # self.run([self.cmd_chown,'-R', 'jetty:jetty', self.idp3Folder], '/opt') self.run([self.cmd_chown, '-R', 'jetty:jetty', jettyIdpServiceWebapps], '/opt') if self.persistence_type == 'couchbase': self.saml_couchbase_settings() elif self.persistence_type == 'hybrid': couchbase_mappings = self.getMappingType('couchbase') if 'user' in couchbase_mappings: self.saml_couchbase_settings() def install_saml_libraries(self): # Unpack oxauth.war to get bcprov-jdk16.jar idpWar = 'idp.war' distIdpPath = '%s/idp.war' % self.distGluuFolder tmpIdpDir = '%s/tmp/tmp_idp' % self.distFolder self.logIt("Unpacking %s..." % idpWar) self.removeDirs(tmpIdpDir) self.createDirs(tmpIdpDir) self.run([self.cmd_jar, 'xf', distIdpPath], tmpIdpDir) # Copy libraries into webapp idp3WebappLibFolder = "%s/WEB-INF/lib" % self.idp3WebappFolder self.createDirs(idp3WebappLibFolder) self.copyTree('%s/WEB-INF/lib' % tmpIdpDir, idp3WebappLibFolder) self.removeDirs(tmpIdpDir) def saml_couchbase_settings(self): # Add couchbase bean to global.xml couchbase_bean_xml_fn = '%s/couchbase/couchbase_bean.xml' % self.staticFolder global_xml_fn = '%s/global.xml' % self.idp3ConfFolder couchbase_bean_xml = self.readFile(couchbase_bean_xml_fn) global_xml = self.readFile(global_xml_fn) global_xml = global_xml.replace('</beans>', couchbase_bean_xml+'\n\n</beans>') self.writeFile(global_xml_fn, global_xml) # Add datasource.properties to idp.properties idp3_configuration_properties_fn = os.path.join(self.idp3ConfFolder, self.idp3_configuration_properties) with open(idp3_configuration_properties_fn) as r: idp3_properties = r.readlines() for i,l in enumerate(idp3_properties[:]): if l.strip().startswith('idp.additionalProperties'): idp3_properties[i] = l.strip() + ', /conf/datasource.properties\n' new_idp3_props = ''.join(idp3_properties) self.writeFile(idp3_configuration_properties_fn, new_idp3_props) data_source_properties = os.path.join(self.outputFolder, self.data_source_properties) self.copyFile(data_source_properties, self.idp3ConfFolder) def install_oxauth_rp(self): oxAuthRPWar = 'oxauth-rp.war' distOxAuthRpPath = '%s/%s' % (self.distGluuFolder, oxAuthRPWar) self.logIt("Copying oxauth-rp.war into jetty webapps folder...") jettyServiceName = 'oxauth-rp' self.installJettyService(self.jetty_app_configuration[jettyServiceName]) jettyServiceWebapps = '%s/%s/webapps' % (self.jetty_base, jettyServiceName) self.copyFile('%s/oxauth-rp.war' % self.distGluuFolder, jettyServiceWebapps) def generate_passport_configuration(self): self.passport_rs_client_jks_pass = self.getPW() self.passport_rs_client_jks_pass_encoded = self.obscure(self.passport_rs_client_jks_pass) if not self.passport_rs_client_id: self.passport_rs_client_id = '1501.' + str(uuid.uuid4()) if not self.passport_rp_client_id: self.passport_rp_client_id = '1502.' + str(uuid.uuid4()) if not self.passport_rp_ii_client_id: self.passport_rp_ii_client_id = '1503.' + str(uuid.uuid4()) if not self.passport_resource_id: self.passport_resource_id = '1504.' + str(uuid.uuid4()) self.renderTemplate(self.passport_oxtrust_config_fn) def install_passport(self): self.logIt("Installing Passport...") self.passport_rs_client_jwks = self.gen_openid_jwks_jks_keys(self.passport_rs_client_jks_fn, self.passport_rs_client_jks_pass) self.templateRenderingDict['passport_rs_client_base64_jwks'] = self.generate_base64_string(self.passport_rs_client_jwks, 1) self.passport_rp_client_jwks = self.gen_openid_jwks_jks_keys(self.passport_rp_client_jks_fn, self.passport_rp_client_jks_pass) self.templateRenderingDict['passport_rp_client_base64_jwks'] = self.generate_base64_string(self.passport_rp_client_jwks, 1) self.logIt("Rendering Passport templates") self.renderTemplate(self.passport_central_config_json) self.templateRenderingDict['passport_central_config_base64'] = self.generate_base64_ldap_file(self.passport_central_config_json) self.renderTemplate(self.ldif_passport_config) self.renderTemplate(self.ldif_passport) self.renderTemplate(self.ldif_passport_clients) if self.mappingLocations['default'] == 'ldap': self.import_ldif_opendj([self.ldif_passport, self.ldif_passport_config, self.ldif_passport_clients]) else: self.import_ldif_couchebase([self.ldif_passport, self.ldif_passport_config, self.ldif_passport_clients]) self.logIt("Preparing passport service base folders") self.run([self.cmd_mkdir, '-p', self.gluu_passport_base]) # Extract package passportArchive = 'passport.tgz' try: self.logIt("Extracting %s into %s" % (passportArchive, self.gluu_passport_base)) self.run(['tar', '--strip', '1', '-xzf', '%s/%s' % (self.distGluuFolder, passportArchive), '-C', self.gluu_passport_base, '--no-xattrs', '--no-same-owner', '--no-same-permissions']) except: self.logIt("Error encountered while extracting archive %s" % passportArchive) self.logIt(traceback.format_exc(), True) passport_modules_archive = os.path.join(self.distGluuFolder, 'passport-%s-node_modules.tar.gz' % self.githubBranchName) modules_target_dir = os.path.join(self.gluu_passport_base, 'node_modules') self.run([self.cmd_mkdir, '-p', modules_target_dir]) if os.path.exists(passport_modules_archive): self.logIt("Extracting passport node modules") self.run(['tar', '--strip', '1', '-xzf', passport_modules_archive, '-C', modules_target_dir, '--no-xattrs', '--no-same-owner', '--no-same-permissions']) else: # Install dependencies try: self.logIt("Running npm install in %s" % self.gluu_passport_base) nodeEnv = os.environ.copy() nodeEnv['PATH'] = '%s/bin:' % self.node_home + nodeEnv['PATH'] self.run(['npm', 'install', '-P'], self.gluu_passport_base, nodeEnv, True) except: self.logIt("Error encountered running npm install in %s" % self.gluu_passport_base) self.logIt(traceback.format_exc(), True) # Create logs folder self.run([self.cmd_mkdir, '-p', '%s/server/logs' % self.gluu_passport_base]) #create empty log file log_file = os.path.join(self.gluu_passport_base, 'server/logs/start.log') open(log_file,'w') self.run([self.cmd_chown, '-R', 'node:node', self.gluu_passport_base]) self.logIt("Preparing Passport OpenID RP certificate...") passport_rp_client_jwks_json = json.loads(''.join(self.passport_rp_client_jwks)) for jwks_key in passport_rp_client_jwks_json["keys"]: if jwks_key["alg"] == self.passport_rp_client_cert_alg: self.passport_rp_client_cert_alias = jwks_key["kid"] break self.export_openid_key(self.passport_rp_client_jks_fn, self.passport_rp_client_jks_pass, self.passport_rp_client_cert_alias, self.passport_rp_client_cert_fn) self.renderTemplateInOut(self.passport_config, self.templateFolder, self.configFolder) # Install passport system service script self.installNodeService('passport') # enable service at startup self.enable_service_at_start('passport') def install_gluu_components(self): if self.wrends_install: self.pbar.progress("ldap", "Installing Gluu components: LDAP", False) self.install_ldap_server() if self.cb_install: self.pbar.progress("couchbase", "Installing Gluu components: Couchbase", False) self.install_couchbase_server() if self.installHttpd: self.pbar.progress("httpd", "Installing Gluu components: HTTPD", False) self.configure_httpd() if self.installOxAuth: self.pbar.progress("oxauth", "Installing Gluu components: OxAuth", False) self.install_oxauth() if self.installOxTrust: self.pbar.progress("oxtrust", "Installing Gluu components: oxTrust", False) self.install_oxtrust() if self.installScimServer: self.pbar.progress("oxtrust", "Installing Gluu components: Scim Server", False) self.install_scim_server() if self.installSaml: self.pbar.progress("saml", "Installing Gluu components: saml", False) self.install_saml() if self.installOxAuthRP: self.pbar.progress("oxauthrp", "Installing Gluu components: OxAuthRP", False) self.install_oxauth_rp() if self.installPassport: self.pbar.progress("passport", "Installing Gluu components: Passport", False) self.install_passport() if self.installOxd: self.pbar.progress("oxd", "Installing Gluu components: oxd", False) self.install_oxd() if self.installCasa: self.pbar.progress("casa", "Installing Gluu components: Casa", False) self.install_casa() self.install_gluu_radius_base() def isIP(self, address): try: socket.inet_aton(address) return True except socket.error: return False def check_email(self, email): return re.match('^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,})$', email, re.IGNORECASE) def checkPassword(self, pwd): return re.search('^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*\W)[a-zA-Z0-9\S]{6,}$', pwd) def ldap_encode(self, password): salt = os.urandom(4) sha = hashlib.sha1(password.encode('utf-8')) sha.update(salt) digest_ = sha.digest() b64encoded = base64.b64encode(digest_+salt).decode('utf-8') encrypted_password = <PASSWORD>) return encrypted_password def createUser(self, userName, homeDir, shell='/bin/bash'): try: useradd = '/usr/sbin/useradd' cmd = [useradd, '--system', '--user-group', '--shell', shell, userName] if homeDir: cmd.insert(-1, '--create-home') cmd.insert(-1, '--home-dir') cmd.insert(-1, homeDir) else: cmd.insert(-1, '--no-create-home') self.run(cmd) if homeDir: self.logOSChanges("User %s with homedir %s was created" % (userName, homeDir)) else: self.logOSChanges("User %s without homedir was created" % (userName)) except: self.logIt("Error adding user", True) self.logIt(traceback.format_exc(), True) def createGroup(self, groupName): try: groupadd = '/usr/sbin/groupadd' self.run([groupadd, groupName]) self.logOSChanges("Group %s was created" % (groupName)) except: self.logIt("Error adding group", True) self.logIt(traceback.format_exc(), True) def addUserToGroup(self, groupName, userName): try: usermod = '/usr/sbin/usermod' self.run([usermod, '-a', '-G', groupName, userName]) self.logOSChanges("User %s was added to group %s" % (userName,groupName))
# <NAME> # new version of hex_walker_driver, first created in fall2019 project1. # moved some contents from here to hex_util or hex_walker_constants files for cleanliness and organization. # adds interpolation & threading improvments to the Leg object driver, some of the Hex_Walker driver, more improvements pending. # also improved organization of Leg object by grouping variables into lists. # also improved code reuse by making Rotator object a subclass of Leb object. import time import threading from typing import List from posedata_arms import * from posedata_walker import * from posedata_leg import * from hex_walker_constants import * from hex_util import * import frame_thread as ft from pwm_wrapper import Pwm_Wrapper #Extraneous HW_MOVE_DEBUG = 0 #toggle 0/1 to turn debug prints on/off LEG_THREAD_DEBUG = False USE_THREADING = True # TODO: make the new walk cycle not shit, then actually use it USE_NEW_WALK_CYCLE = False # Leg_Position class defined in posedata_leg.py # Hex_Walker_Position class defined in posedata_walker.py # Arms_Position class defined in posedata_arms.py # A Leg object is a group of 3 servos that are controlled as one unit. It is used for the 6 legs # and both arms of Feynman bot, and the subclass Rotator is used for the waist. The servos in the # leg can be directly and immediately set or gradually/linearly transition to the destination pose # depending on the function used. They can be individually set via angle, PWM value, or percentage. # To set all at once, the Leg_Position object must be used. When initialized it is given the I2C # address of the PWM hat it is connected to, and the PWM channels on that hat the individual servos # connect to. All servos in a Leg must be on the same PWM hat. class Leg(object): # list of functions: # __init__ # print_self # angle_to_pwm # pwm_to_angle # percent_to_angle # set_servo_percent # set_servo_pwm # set_servo_angle # set_servo_angle_thread # set_leg_position # set_leg_position_thread # abort # do_set_servo_angle def __init__(self, pwm: Pwm_Wrapper, channels: List[int], leg_num: int): # unique ID, not actually used for much, just stores the leg_num self.uid = leg_num # this can be either the new-style PWM wrapper or the old-style actual pwm object, works just the same self.pwm = pwm # running/idle flags: normal Events can only wait for a rising edge, if I want to wait for a falling edge, i need to # set up a complementary system like this. also they're really being used as flags, not as "events", but whatever. self.running_flag = threading.Event() self.idle_flag = threading.Event() self.idle_flag.set() # i want setting one/clearing other to be an atomic operation so it should have a lock object just in case self._state_flag_lock = threading.Lock() # the list of frames that the leg thread is consuming as the leg object is adding onto self.frame_queue = [] # locking object to ensure no collisions happen around the frame queue self._frame_queue_lock = threading.Lock() # locking object to ensure no collisions happen around self.curr_servo_angle/self.curr_servo_pwm # might not be necessary but couldn't hurt, technically both the leg thread and leg object can write into them self._curr_pos_lock = threading.Lock() # create and launch the thread for this leg # note: this MUST be daemon type because the thread is designed to run forever... the only way to stop it is by stopping its parent, which means it must be a daemon! # it should be able to access all of this leg's other member variables and functions # threadname = "framethread" + str(leg_num) self.framethread = threading.Thread(name="framethread_" + str(leg_num), target=ft.Frame_Thread_Func, args=(self, LEG_THREAD_DEBUG), daemon=True) # just to be safe, don't start the thread until the end of __init__ # set the channels to use for the PWM object, comes in as a list self.pwm_channels = channels # now, assign the correct constants to limit PWM values self.SERVO_PWM_LIMITS = [[0,1],[0,1],[0,1]] if leg_num == LEG_0: # leg: out in, up down, right left self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_0_TIP_SERVO_OUT, c_0_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_0_MID_SERVO_UP, c_0_MID_SERVO_DOWN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_0_ROT_SERVO_RIGHT, c_0_ROT_SERVO_LEFT] elif leg_num == LEG_1: self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_1_TIP_SERVO_OUT, c_1_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_1_MID_SERVO_UP, c_1_MID_SERVO_DOWN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_1_ROT_SERVO_RIGHT, c_1_ROT_SERVO_LEFT] elif leg_num == LEG_2: self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_2_TIP_SERVO_OUT, c_2_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_2_MID_SERVO_UP, c_2_MID_SERVO_DOWN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_2_ROT_SERVO_RIGHT, c_2_ROT_SERVO_LEFT] elif leg_num == LEG_3: self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_3_TIP_SERVO_OUT, c_3_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_3_MID_SERVO_UP, c_3_MID_SERVO_DOWN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_3_ROT_SERVO_RIGHT, c_3_ROT_SERVO_LEFT] elif leg_num == LEG_4: self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_4_TIP_SERVO_OUT, c_4_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_4_MID_SERVO_UP, c_4_MID_SERVO_DOWN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_4_ROT_SERVO_RIGHT, c_4_ROT_SERVO_LEFT] elif leg_num == LEG_5: self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_5_TIP_SERVO_OUT, c_5_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_5_MID_SERVO_UP, c_5_MID_SERVO_DOWN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_5_ROT_SERVO_RIGHT, c_5_ROT_SERVO_LEFT] elif leg_num == ARM_R: # arm: out in, out in, up down self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_R_ARM_TIP_SERVO_OUT, c_R_ARM_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_R_ARM_MID_SERVO_OUT, c_R_ARM_MID_SERVO_IN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_R_ARM_ROT_SERVO_UP, c_R_ARM_ROT_SERVO_DOWN] elif leg_num == ARM_L: self.SERVO_PWM_LIMITS[TIP_SERVO] = [c_L_ARM_TIP_SERVO_OUT, c_L_ARM_TIP_SERVO_IN] self.SERVO_PWM_LIMITS[MID_SERVO] = [c_L_ARM_MID_SERVO_OUT, c_L_ARM_MID_SERVO_IN] self.SERVO_PWM_LIMITS[ROT_SERVO] = [c_L_ARM_ROT_SERVO_UP, c_L_ARM_ROT_SERVO_DOWN] elif leg_num == WAIST: # waist: left right self.SERVO_PWM_LIMITS[WAIST_SERVO] = [c_WAIST_SERVO_PWM_LEFT, c_WAIST_SERVO_PWM_RIGHT] self.SERVO_ANGLE_LIMITS = [[0,1],[0,1],[0,1]] if leg_num == ARM_L or leg_num == ARM_R: # arm: out in, out in, up down self.SERVO_ANGLE_LIMITS[TIP_SERVO] = [ARM_TIP_SERVO_OUT_ANGLE, ARM_TIP_SERVO_IN_ANGLE] self.SERVO_ANGLE_LIMITS[MID_SERVO] = [ARM_MID_SERVO_OUT_ANGLE, ARM_MID_SERVO_IN_ANGLE] self.SERVO_ANGLE_LIMITS[ROT_SERVO] = [ARM_ROT_SERVO_UP_ANGLE, ARM_ROT_SERVO_DOWN_ANGLE] elif leg_num == WAIST: # waist: left right self.SERVO_ANGLE_LIMITS[WAIST_SERVO] = [WAIST_SERVO_LEFT_ANGLE, WAIST_SERVO_RIGHT_ANGLE] else: # leg: out in, up down, right left self.SERVO_ANGLE_LIMITS[TIP_SERVO] = [LEG_TIP_SERVO_UP_ANGLE, LEG_TIP_SERVO_DOWN_ANGLE] self.SERVO_ANGLE_LIMITS[MID_SERVO] = [LEG_MID_SERVO_UP_ANGLE, LEG_MID_SERVO_DOWN_ANGLE] self.SERVO_ANGLE_LIMITS[ROT_SERVO] = [LEG_ROT_SERVO_RIGHT_ANGLE, LEG_ROT_SERVO_LEFT_ANGLE] # declare these member variables, immediately have value overwritten... self.curr_servo_angle = [-1.0, -1.0, -1.0] self.curr_servo_pwm = [-1, -1, -1] # ...this code should overwrite the "-1"s with sensible values on bootup # NEEDS to use the non-thread versions if leg_num == ARM_L or leg_num == ARM_R: # default position is with arms fully extended self.set_leg_position(ARMS_ARM_TABLE["STRAIGHT_OUT"]) elif leg_num == WAIST: self.set_servo_angle(90, WAIST_SERVO) else: # default position is 90-degree crouch # self.set_leg_position(LEG_MISC_TABLE["STRAIGHT_OUT"]) self.set_leg_position(LEG_MISC_TABLE["INIT"]) # start the thread self.framethread.start() def print_self(self): print("leg uid : " + str(self.uid) + " ===========================") print("on channels : tip/mid/rot = " + str(self.pwm_channels)) print("servo PWMs: tip/mid/rot = " + str(self.curr_servo_pwm)) print("servo angles: tip/mid/rot = " + str(self.curr_servo_angle)) print("frame queue size: " + str(len(self.frame_queue))) # conversion functions: use linear mapping from input to output def angle_to_pwm(self, angle: float, servo: int) -> int: if servo < 0 or servo > 2: print("ERR#1: INVALID SERVO INDEX! valid values are 0 to 2") print("leg="+str(self.uid)+", servo="+str(servo)+", angle="+str(angle)) return INV_PARAM r = linear_map(self.SERVO_ANGLE_LIMITS[servo][0], self.SERVO_PWM_LIMITS[servo][0], self.SERVO_ANGLE_LIMITS[servo][1], self.SERVO_PWM_LIMITS[servo][1], angle) return round(r) def pwm_to_angle(self, pwm: int, servo: int) -> float: if servo < 0 or servo > 2: print("ERR#2: INVALID SERVO INDEX! valid values are 0 to 2") print("leg="+str(self.uid)+", servo="+str(servo)+", pwm="+str(pwm)) return INV_PARAM return linear_map(self.SERVO_PWM_LIMITS[servo][0], self.SERVO_ANGLE_LIMITS[servo][0], self.SERVO_PWM_LIMITS[servo][1], self.SERVO_ANGLE_LIMITS[servo][1], pwm) def percent_to_angle(self, percent: float, servo: int) -> float: # maps 0-100 to each servo's min and max angle values if servo < 0 or servo > 2: print("ERR#3: INVALID SERVO INDEX! valid values are 0 to 2") print("leg="+str(self.uid)+", servo="+str(servo)+", percent="+str(percent)) return INV_PARAM return linear_map(100, self.SERVO_ANGLE_LIMITS[servo][0], 0, self.SERVO_ANGLE_LIMITS[servo][1], percent) # convert-then-set functions: def set_servo_percent(self, percent: float, servo: int): # convert and pass off to set_servo_angle self.set_servo_angle(self.percent_to_angle(percent, servo), servo) def set_servo_pwm(self, pwm: int, servo: int): # convert and pass off to set_servo_angle self.set_servo_angle(self.angle_to_pwm(pwm, servo), servo) # the old-fashioned "do the thing" command: clamps value to safety limits, ensures it won't collide with any thread operations, and calls do_set_servo_angle def set_servo_angle(self, angle: float, servo: int): if servo < 0 or servo > 2: # ensure servo index is valid print("ERR#4: INVALID SERVO INDEX! valid values are 0 to 2") print("leg="+str(self.uid)+", servo="+str(servo)+", angle="+str(angle)) return INV_PARAM # wait until running_flag is clear (idle_flag is set) # this ensures that it won't conflict with the thread if it is running # you SHOULDN'T be using both the thread and the direct-set method, but better to be safe than sorry self.idle_flag.wait() # safety checking for each servo safe_angle = bidirectional_clamp(angle, self.SERVO_ANGLE_LIMITS[servo][0], self.SERVO_ANGLE_LIMITS[servo][1]) return self.do_set_servo_angle(safe_angle, servo) # creates a temporary "leg position" object to give to the leg_position_thread function # changes the given servo to the given position over the given time # OTHER MOTORS (on this leg) CANNOT CHANGE DURING THIS TIME, to change multiple motors at a time use set_leg_position_thread def set_servo_angle_thread(self, angle: float, servo: int, durr: float): if servo < 0 or servo > 2: # ensure servo index is valid print("ERR#5: INVALID SERVO INDEX! valid values are 0 to 2") print("leg="+str(self.uid)+", servo="+str(servo)+", angle="+str(angle)) return INV_PARAM # explicitly make a copy of current angles v = list(self.curr_servo_angle) # modify one entry of the copy v[servo] = angle # init the Leg_Position from the list L = Leg_Position(v[ROT_SERVO], v[MID_SERVO], v[TIP_SERVO]) self.set_leg_position_thread(L, durr) # uses the "leg_position" objects, immediate set (no threading) def set_leg_position(self, leg_position: Leg_Position): for s in GROUP_ALL_SERVOS: self.set_servo_angle(leg_position.list[s], s) # safety clamp (in angle space) # interpolate (in angle space) # adds frames to the frame queue (with lock) # sets the "running" flag unconditionally (note: no harm in setting an already set flag) # * thread will jump in with "do_set_servo_angle" when it is the correct time def set_leg_position_thread(self, leg_position: Leg_Position, durr: float): # assemble dest from the leg position dest = [0, 0, 0] # safety checking for each motor for s in GROUP_ALL_SERVOS: dest[s] = bidirectional_clamp(leg_position.list[s], self.SERVO_ANGLE_LIMITS[s][0], self.SERVO_ANGLE_LIMITS[s][1]) # if there is a queued interpolation frame, interpolate from the final frame in the queue to the desired pose. # otherwise, interpolate from current position. curr = [] with self._frame_queue_lock: if len(self.frame_queue) > 0: # be sure it is
tw = textwrap.TextWrapper(break_long_words=False, break_on_hyphens=False, width=width, initial_indent=prefix, subsequent_indent=indent) result = '\n'.join(tw.wrap(text.strip())) # XXX: Remove the dummy prefix. if indent_only: result = result[len(indent):] return result def max_name(names): if len(names) == 0: return 0 return max(len(name) for name in names) def update_params_map(parent, ret_map, width=62): """Updates `ret_map` with name:desc key-value pairs extracted from Doxygen XML node `parent`. """ params = collections.OrderedDict() for node in parent.childNodes: if node.nodeType == node.TEXT_NODE: continue name_node = find_first(node, 'parametername') if name_node.getAttribute('direction') == 'out': continue name = get_text(name_node) if name in param_exclude: continue params[name.strip()] = node max_name_len = max_name(params.keys()) + 8 # `ret_map` is a name:desc map. for name, node in params.items(): desc = '' desc_node = get_child(node, 'parameterdescription') if desc_node: desc = fmt_node_as_vimhelp( desc_node, width=width, indent=(' ' * max_name_len)) ret_map[name] = desc return ret_map def render_node(n, text, prefix='', indent='', width=62): """Renders a node as Vim help text, recursively traversing all descendants.""" global fmt_vimhelp global has_seen_preformatted def ind(s): return s if fmt_vimhelp else '' text = '' # space_preceding = (len(text) > 0 and ' ' == text[-1][-1]) # text += (int(not space_preceding) * ' ') if n.nodeName == 'preformatted': o = get_text(n, preformatted=True) ensure_nl = '' if o[-1] == '\n' else '\n' text += '>{}{}\n<'.format(ensure_nl, o) elif is_inline(n): text = doc_wrap(get_text(n), indent=indent, width=width) elif n.nodeName == 'verbatim': # TODO: currently we don't use this. The "[verbatim]" hint is there as # a reminder that we must decide how to format this if we do use it. text += ' [verbatim] {}'.format(get_text(n)) elif n.nodeName == 'listitem': for c in n.childNodes: result = render_node( c, text, indent=indent + (' ' * len(prefix)), width=width ) if is_blank(result): continue text += indent + prefix + result elif n.nodeName in ('para', 'heading'): for c in n.childNodes: if (is_inline(c) and '' != get_text(c).strip() and text and ' ' != text[-1]): text += ' ' text += render_node(c, text, indent=indent, width=width) elif n.nodeName == 'itemizedlist': for c in n.childNodes: text += '{}\n'.format(render_node(c, text, prefix='• ', indent=indent, width=width)) elif n.nodeName == 'orderedlist': i = 1 for c in n.childNodes: if is_blank(get_text(c)): text += '\n' continue text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i), indent=indent, width=width)) i = i + 1 elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'): text += '\nNote:\n ' for c in n.childNodes: text += render_node(c, text, indent=' ', width=width) text += '\n' elif n.nodeName == 'simplesect' and 'warning' == n.getAttribute('kind'): text += 'Warning:\n ' for c in n.childNodes: text += render_node(c, text, indent=' ', width=width) text += '\n' elif (n.nodeName == 'simplesect' and n.getAttribute('kind') in ('return', 'see')): text += ind(' ') for c in n.childNodes: text += render_node(c, text, indent=' ', width=width) elif n.nodeName == 'computeroutput': return get_text(n) else: raise RuntimeError('unhandled node type: {}\n{}'.format( n.nodeName, n.toprettyxml(indent=' ', newl='\n'))) return text def para_as_map(parent, indent='', width=62): """Extracts a Doxygen XML <para> node to a map. Keys: 'text': Text from this <para> element 'params': <parameterlist> map 'return': List of @return strings 'seealso': List of @see strings 'xrefs': ? """ chunks = { 'text': '', 'params': collections.OrderedDict(), 'return': [], 'seealso': [], 'xrefs': [] } # Ordered dict of ordered lists. groups = collections.OrderedDict([ ('params', []), ('return', []), ('seealso', []), ('xrefs', []), ]) # Gather nodes into groups. Mostly this is because we want "parameterlist" # nodes to appear together. text = '' kind = '' last = '' if is_inline(parent): # Flatten inline text from a tree of non-block nodes. text = doc_wrap(render_node(parent, ""), indent=indent, width=width) else: prev = None # Previous node for child in parent.childNodes: if child.nodeName == 'parameterlist': groups['params'].append(child) elif child.nodeName == 'xrefsect': groups['xrefs'].append(child) elif child.nodeName == 'simplesect': last = kind kind = child.getAttribute('kind') if kind == 'return' or (kind == 'note' and last == 'return'): groups['return'].append(child) elif kind == 'see': groups['seealso'].append(child) elif kind in ('note', 'warning'): text += render_node(child, text, indent=indent, width=width) else: raise RuntimeError('unhandled simplesect: {}\n{}'.format( child.nodeName, child.toprettyxml(indent=' ', newl='\n'))) else: if (prev is not None and is_inline(self_or_child(prev)) and is_inline(self_or_child(child)) and '' != get_text(self_or_child(child)).strip() and text and ' ' != text[-1]): text += ' ' text += render_node(child, text, indent=indent, width=width) prev = child chunks['text'] += text # Generate map from the gathered items. if len(groups['params']) > 0: for child in groups['params']: update_params_map(child, ret_map=chunks['params'], width=width) for child in groups['return']: chunks['return'].append(render_node( child, '', indent=indent, width=width)) for child in groups['seealso']: chunks['seealso'].append(render_node( child, '', indent=indent, width=width)) for child in groups['xrefs']: # XXX: Add a space (or any char) to `title` here, otherwise xrefs # ("Deprecated" section) acts very weird... title = get_text(get_child(child, 'xreftitle')) + ' ' xrefs.add(title) xrefdesc = get_text(get_child(child, 'xrefdescription')) chunks['xrefs'].append(doc_wrap(xrefdesc, prefix='{}: '.format(title), width=width) + '\n') return chunks def fmt_node_as_vimhelp(parent, width=62, indent=''): """Renders (nested) Doxygen <para> nodes as Vim :help text. NB: Blank lines in a docstring manifest as <para> tags. """ rendered_blocks = [] def fmt_param_doc(m): """Renders a params map as Vim :help text.""" max_name_len = max_name(m.keys()) + 4 out = '' for name, desc in m.items(): name = ' {}'.format('{{{}}}'.format(name).ljust(max_name_len)) out += '{}{}\n'.format(name, desc) return out.rstrip() def has_nonexcluded_params(m): """Returns true if any of the given params has at least one non-excluded item.""" if fmt_param_doc(m) != '': return True for child in parent.childNodes: para = para_as_map(child, indent, width) # Generate text from the gathered items. chunks = [para['text']] if len(para['params']) > 0 and has_nonexcluded_params(para['params']): chunks.append('\nParameters: ~') chunks.append(fmt_param_doc(para['params'])) if len(para['return']) > 0: chunks.append('\nReturn: ~') for s in para['return']: chunks.append(s) if len(para['seealso']) > 0: chunks.append('\nSee also: ~') for s in para['seealso']: chunks.append(s) for s in para['xrefs']: chunks.append(s) rendered_blocks.append(clean_lines('\n'.join(chunks).strip())) rendered_blocks.append('') return clean_lines('\n'.join(rendered_blocks).strip()) def extract_from_xml(filename, target, width): """Extracts Doxygen info as maps without formatting the text. Returns two maps: 1. Functions 2. Deprecated functions The `fmt_vimhelp` global controls some special cases for use by fmt_doxygen_xml_as_vimhelp(). (TODO: ugly :) """ global xrefs global fmt_vimhelp xrefs.clear() fns = {} # Map of func_name:docstring. deprecated_fns = {} # Map of func_name:docstring. dom = minidom.parse(filename) compoundname = get_text(dom.getElementsByTagName('compoundname')[0]) for member in dom.getElementsByTagName('memberdef'): if member.getAttribute('static') == 'yes' or \ member.getAttribute('kind') != 'function' or \ member.getAttribute('prot') == 'private' or \ get_text(get_child(member, 'name')).startswith('_'): continue loc = find_first(member, 'location') if 'private' in loc.getAttribute('file'): continue return_type = get_text(get_child(member, 'type')) if return_type == '': continue if return_type.startswith(('ArrayOf', 'DictionaryOf')): parts = return_type.strip('_').split('_') return_type = '{}({})'.format(parts[0], ', '.join(parts[1:])) name = get_text(get_child(member, 'name')) annotations = get_text(get_child(member, 'argsstring')) if annotations and ')' in annotations: annotations = annotations.rsplit(')', 1)[-1].strip() # XXX: (doxygen 1.8.11) 'argsstring' only includes attributes of # non-void functions. Special-case void functions here. if name == 'nvim_get_mode' and len(annotations) == 0: annotations += 'FUNC_API_FAST' annotations = filter(None, map(lambda x: annotation_map.get(x), annotations.split())) params = [] type_length = 0 for param in iter_children(member, 'param'): param_type = get_text(get_child(param, 'type')).strip() param_name = '' declname = get_child(param, 'declname') if declname: param_name = get_text(declname).strip() elif CONFIG[target]['mode'] == 'lua': # XXX: this is what lua2dox gives us... param_name = param_type param_type = '' if param_name in param_exclude: continue if fmt_vimhelp and param_type.endswith('*'): param_type = param_type.strip('* ') param_name = '*' + param_name type_length = max(type_length, len(param_type)) params.append((param_type, param_name)) # Handle Object Oriented style functions here. # We make sure they have "self" in the parameters, # and a parent function if return_type.startswith('function') \ and len(return_type.split(' ')) >= 2 \ and any(x[1] == 'self' for x in params): split_return = return_type.split(' ') name = f'{split_return[1]}:{name}' c_args = [] for param_type, param_name in params: c_args.append((' ' if fmt_vimhelp else '') + ( '%s %s' % (param_type.ljust(type_length), param_name)).strip()) if not fmt_vimhelp: pass else: fstem = '?' if '.' in compoundname: fstem = compoundname.split('.')[0] fstem = CONFIG[target]['module_override'].get(fstem, fstem) vimtag = CONFIG[target]['fn_helptag_fmt'](fstem, name) prefix = '%s(' % name suffix = '%s)' % ', '.join('{%s}' % a[1] for a in params if a[0] not in ('void', 'Error')) if not fmt_vimhelp: c_decl = '%s %s(%s);' % (return_type, name, ', '.join(c_args)) signature = prefix + suffix else: c_decl = textwrap.indent('%s %s(\n%s\n);' % (return_type, name, ',\n'.join(c_args)), ' ') # Minimum 8 chars between signature and vimtag lhs = (width - 8) - len(vimtag) if len(prefix) + len(suffix) > lhs: signature = vimtag.rjust(width) + '\n' signature
import os import copy import time import glob import re import datetime import argparse import csv import math from pathlib import Path from scipy.stats import gaussian_kde from XAI_utils.tp_fp import * from XAI_utils.metrics import * # XAI related imports import matplotlib.pyplot as plt import matplotlib.patches as patches import numpy as np import pandas as pd def parse_config(): parser = argparse.ArgumentParser(description='arg parser') parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment') parser.add_argument('--XC_path', type=str, default=None, required=True, help='the folder where all XC values are saved') parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader') args = parser.parse_args() return args def evaluate_metric(score_arr, label_arr, save_path, file_name, cols, score_id, thresh, cls_names, cls_labels): """ :param score_arr: array of the score being evaluated :param label_arr: array indicating TP/FP boxes :param save_path: :param file_name: :param cols: :param score_id: name of the score being evaluated (e.g., class score, XC, etc.) :param thresh: XC threshold :param cls_names: list of class names specific to a dataset :param cls_labels: class labels for the boxes (e.g., car, pedestrian, cyclist, etc.) :return: """ # plot_roc(score_arr, label_arr, save_path=save_path, thresh=thresh, measure=score_id) # plot_pr(score_arr, label_arr, save_path=save_path, thresh=thresh, measure=score_id) # plot_pr(score_arr, label_arr, save_path=save_path, thresh=thresh, measure=score_id, flip=True) if score_id == 'far_attr' or score_id == 'PAP_norm': score_arr = -1*score_arr eval_dict = get_summary_statistics(score_arr, label_arr, thresh, score_id, "all") eval_file = os.path.join(save_path, file_name) print("{} objects in total".format(len(score_arr))) cls_eval_dicts = [] class_wise_score = [] class_wise_label = [] with open(eval_file, 'w') as evalfile: writer = csv.DictWriter(evalfile, fieldnames=cols) writer.writeheader() writer.writerow(eval_dict) for cls in range(3): cls_name = cls_names[cls] cls_file_name = "{}_{}".format(cls_name, file_name) positions = np.where(cls_labels == cls) cls_score_arr = score_arr[positions] cls_tp_fp_arr = label_arr[positions] class_wise_score.append(cls_score_arr) class_wise_label.append(cls_tp_fp_arr) print("{} {} objects".format(len(cls_score_arr), cls_name)) # plot_roc(cls_score_arr, cls_tp_fp_arr, save_path=save_path, thresh=thresh, measure=score_id, cls_name=cls_name) # plot_pr(cls_score_arr, cls_tp_fp_arr, save_path=save_path, thresh=thresh, measure=score_id, cls_name=cls_name) # plot_pr(cls_score_arr, cls_tp_fp_arr, save_path=save_path, thresh=thresh, measure=score_id, cls_name=cls_name, # flip=True) cls_eval_dict = get_summary_statistics(cls_score_arr, cls_tp_fp_arr, thresh, score_id, cls_name) cls_eval_dicts.append(eval_dict) # eval_file = os.path.join(save_path, cls_file_name) writer.writerow(cls_eval_dict) plot_multi_roc(cls_names, score_arr, label_arr, class_wise_score, class_wise_label, save_path=save_path, thresh=thresh, measure=score_id) plot_multi_pr(cls_names, score_arr, label_arr, class_wise_score, class_wise_label, save_path=save_path, thresh=thresh, measure=score_id) plot_multi_pr(cls_names, score_arr, label_arr, class_wise_score, class_wise_label, save_path=save_path, thresh=thresh, measure=score_id, flip=True) return eval_dict, cls_eval_dicts def wsum_experiment(all_cls_score_arr, XC_arr, label_arr, save_path, file_name, cols, score_id, thresh, cls_names, cls_labels): """ :param cls_score_arr: array of the class scores :param label_arr: array indicating TP/FP boxes :param save_path: :param file_name: :param cols: :param score_id: name of the score being evaluated (e.g., class score, XC, etc.) :param thresh: XC threshold :param cls_names: list of class names specific to a dataset :param cls_labels: class labels for the boxes (e.g., car, pedestrian, cyclist, etc.) :return: """ eval_file = os.path.join(save_path, file_name) min_fpr_95 = 100 max_aupr = 0 max_aupr_opposite = 0 max_auroc = 0 min_fpr_95_cls = [100, 100, 100] max_aupr_cls = [0, 0, 0] max_aupr_opposite_cls = [0, 0, 0] max_auroc_cls = [0, 0, 0] min_fpr_95_w = -1 max_aupr_w = -1 max_aupr_opposite_w = -1 max_auroc_w = -1 min_fpr_95_cls_w = [-1, -1, -1] max_aupr_cls_w = [-1, -1, -1] max_aupr_opposite_cls_w = [-1, -1, -1] max_auroc_cls_w = [-1, -1, -1] with open(eval_file, 'w') as evalfile: writer = csv.DictWriter(evalfile, fieldnames=cols) writer.writeheader() for i in range(0, 101, 1): w_XC = i * 0.01 w_cls_score = 1.00 - w_XC score_arr = np.multiply(XC_arr, w_XC) + np.multiply(all_cls_score_arr, w_cls_score) eval_dict = get_summary_statistics_wsum(score_arr, label_arr, thresh, score_id, "all", w_XC, w_cls_score) # print("{} objects in total".format(len(score_arr))) if eval_dict['fpr_at_95_tpr'] < min_fpr_95: min_fpr_95 = eval_dict['fpr_at_95_tpr'] min_fpr_95_w = w_XC if eval_dict['auroc'] > max_auroc: max_auroc = eval_dict['auroc'] max_auroc_w = w_XC if eval_dict['aupr_in'] > max_aupr: max_aupr = eval_dict['aupr_in'] max_aupr_w = w_XC if eval_dict['aupr_out'] > max_aupr_opposite: max_aupr_opposite = eval_dict['aupr_out'] max_aupr_opposite_w = w_XC cls_eval_dicts = [] writer.writerow(eval_dict) for cls in range(3): cls_name = cls_names[cls] positions = np.where(cls_labels == cls) cls_score_arr = score_arr[positions] cls_tp_fp_arr = label_arr[positions] cls_eval_dict = get_summary_statistics_wsum(cls_score_arr, cls_tp_fp_arr, thresh, score_id, cls_name, w_XC, w_cls_score) if cls_eval_dict['fpr_at_95_tpr'] < min_fpr_95_cls[cls]: min_fpr_95_cls[cls] = cls_eval_dict['fpr_at_95_tpr'] min_fpr_95_cls_w[cls] = w_XC if cls_eval_dict['auroc'] > max_auroc_cls[cls]: max_auroc_cls[cls] = cls_eval_dict['auroc'] max_auroc_cls_w[cls] = w_XC if cls_eval_dict['aupr_in'] > max_aupr_cls[cls]: max_aupr_cls[cls] = cls_eval_dict['aupr_in'] max_aupr_cls_w[cls] = w_XC if cls_eval_dict['aupr_out'] > max_aupr_opposite_cls[cls]: max_aupr_opposite_cls[cls] = cls_eval_dict['aupr_out'] max_aupr_opposite_cls_w[cls] = w_XC cls_eval_dicts.append(eval_dict) writer.writerow(cls_eval_dict) result_dict = { 'min_fpr_95': min_fpr_95, 'max_aupr': max_aupr, 'max_aupr_opposite': max_aupr_opposite, 'max_auroc': max_auroc, 'min_fpr_95_cls': min_fpr_95_cls, 'max_aupr_cls': max_aupr_cls, 'max_aupr_opposite_cls': max_aupr_opposite_cls, 'max_auroc_cls': max_auroc_cls, 'min_fpr_95_w': min_fpr_95_w, 'max_aupr_w': max_aupr_w, 'max_aupr_opposite_w': max_aupr_opposite_w, 'max_auroc_w': max_auroc_w, 'min_fpr_95_cls_w': min_fpr_95_cls_w, 'max_aupr_cls_w': max_aupr_cls_w, 'max_aupr_opposite_cls_w': max_aupr_opposite_cls_w, 'max_auroc_cls_w': max_auroc_cls_w } for entry in result_dict: print("{}: {}".format(entry, result_dict[entry])) def main(): """ important variables: :return: """ use_XQ = False #TODO: change the class and label terms dist_n_pts = False # If True, we are evaluating dist and pts as well XC_only = False # If False, then evaluate the top class score as well, otherwise, skip top class score skip_xc = False # If False, evaluate the XC score scatter_plot = False legacy_file = True # If True, skip far_attr and pap w_sum_explore = False dataset_name = "KITTI" cls_name_list = [] xc_term = 'XQ' score_term = "pred_score" # options: class_score pred_score label_term = "pred_label" # options: class_label pred_label pts_term = 'pts' # options: pts pts_in_box dist_term = 'dist' # options: dist dist_to_ego if not use_XQ: xc_term = 'xc' if dataset_name == "KITTI": cls_name_list = ['Car', 'Pedestrian', 'Cyclist'] elif dataset_name == "CADC": cls_name_list = ['Car', 'Pedestrian', 'Truck'] elif dataset_name == "Waymo": cls_name_list = ['Vehicle', 'Pedestrian', 'Cyclist'] use_margin = True XAI_sum = False XAI_cnt = not XAI_sum ignore_thresh_list = [0.0] # ignore_thresh_list = [0.0, 0.0333, 0.0667, 0.1, 0.1333, 0.1667, 0.2] start_time = time.time() high_rez = True args = parse_config() # get the date and time to create a folder for the specific time when this script is run now = datetime.datetime.now() dt_string = now.strftime("%b_%d_%Y_%H_%M_%S") # get current working directory cwd = os.getcwd() rez_string = 'LowResolution' if high_rez: rez_string = 'HighResolution' # create directory to store results just for this run, include the method in folder name XC_path = args.XC_path XC_folder = XC_path.split("XAI_results/", 1)[1] print("XC_folder: {}".format(XC_folder)) metric_result_path = os.path.join(cwd, 'XAI_results/{}_metrics_analysis_{}'.format(XC_folder, dt_string)) print('\nmetric_result_path: {}'.format(metric_result_path)) metric_res_path_str = str(metric_result_path) os.mkdir(metric_result_path) os.chmod(metric_res_path_str, 0o777) print('\n ok so far') try: """ 1. Read in XC from the TP file 2. Read in XC from the FP file 3. Concatenate into one array, with TP labeled 1 and FP labeled 0 """ # print("start trying") XC_thresh_list = ['0.1'] # ['0.0', '0.0333', '0.0667', '0.1', '0.1333', '0.1667', '0.2'] for thresh in XC_thresh_list: print("\nthresh: {}".format(thresh)) XC_list = [] far_attr_list = [] PAP_list = [] score_list = [] TP_FP_label = [] cls_label_list = [] pts_list = [] dist_list = [] found = False tp_name = "tp_xq_thresh{}.csv".format(thresh) fp_name = "fp_xq_thresh{}.csv".format(thresh) if skip_xc: tp_name = "tp_pts.csv" fp_name = "fp_pts.csv" tp_data = None fp_data = None print('\n created file names to search') for root, dirs, files in os.walk(XC_path): print('processing files: ') for name in files: print(os.path.join(root, name)) if name == tp_name: found = True tp_data = pd.read_csv(os.path.join(root, name)) if not skip_xc: # print("type(tp_data['XQ']): {}".format(type(tp_data['XQ']))) XC_list.append(tp_data[xc_term]) if not legacy_file: far_attr_list.append(tp_data['far_attr']) PAP_list.append(tp_data['pap']) score_list.append(tp_data[score_term]) TP_FP_label.append(np.ones(len(tp_data[score_term]))) cls_label_list.append(tp_data[label_term]) if dist_n_pts: pts_list.append(tp_data[pts_term]) dist_list.append(-1 * tp_data[dist_term]) print("Number of TP instances for each class:") print("class 0: {}".format(np.count_nonzero(tp_data[label_term] == 0))) print("class 1: {}".format(np.count_nonzero(tp_data[label_term] == 1))) print("class 2: {}".format(np.count_nonzero(tp_data[label_term] == 2))) elif name == fp_name: found = True fp_data = pd.read_csv(os.path.join(root, name)) if not skip_xc: XC_list.append(fp_data[xc_term]) if not legacy_file: far_attr_list.append(fp_data['far_attr']) PAP_list.append(fp_data['pap']) score_list.append(fp_data[score_term]) TP_FP_label.append(np.zeros(len(fp_data[score_term]))) cls_label_list.append(fp_data[label_term]) if dist_n_pts: pts_list.append(fp_data[pts_term]) dist_list.append(-1 * fp_data[dist_term]) print("Number of FP instances for each class:") print("class 0: {}".format(np.count_nonzero(fp_data[label_term] == 0))) print("class 1: {}".format(np.count_nonzero(fp_data[label_term] == 1))) print("class 2: {}".format(np.count_nonzero(fp_data[label_term] == 2))) if found: if not skip_xc: XC_arr = np.concatenate(XC_list) print("len(XC_arr): {}".format(len(XC_arr))) score_arr = np.concatenate(score_list) TP_FP_arr = np.concatenate(TP_FP_label) cls_label_arr = np.concatenate(cls_label_list) print("len(TP_FP_arr): {}".format(len(TP_FP_arr))) eval_cols = ['XQ_thresh', 'measure', 'class', 'fpr_at_95_tpr', 'detection_error', 'auroc', 'aupr_out', 'aupr_in'] XC_eval_file = "XQ_eval_metrics_thresh{}.csv".format(thresh) far_attr_eval_file = "far_attr_eval_metrics_thresh{}.csv".format(thresh) PAP_eval_file = "PAP_eval_metrics_thresh{}.csv".format(thresh) pts_eval_file = "pts_eval_metrics_thresh{}.csv".format(thresh) dist_eval_file = "dist_eval_metrics_thresh{}.csv".format(thresh) if not skip_xc: XC_dict, XC_cls_dicts = evaluate_metric(XC_arr, TP_FP_arr, metric_result_path, XC_eval_file, eval_cols, 'XQ', thresh, cls_name_list, cls_label_arr) if not legacy_file: far_attr_arr = np.concatenate(far_attr_list) PAP_arr = np.concatenate(PAP_list) far_attr_dict, far_attr_cls_dicts = evaluate_metric(far_attr_arr, TP_FP_arr, metric_result_path, far_attr_eval_file, eval_cols, 'far_attr', thresh, cls_name_list, cls_label_arr) PAP_dict, PAP_cls_dicts = evaluate_metric(PAP_arr, TP_FP_arr, metric_result_path, PAP_eval_file, eval_cols, 'PAP', thresh, cls_name_list, cls_label_arr) if dist_n_pts: pts_arr = np.concatenate(pts_list) dist_arr = np.concatenate(dist_list) pts_dict, pts_cls_dicts = evaluate_metric( pts_arr, TP_FP_arr, metric_result_path, pts_eval_file, eval_cols, 'pts', thresh, cls_name_list, cls_label_arr) dist_dict, dist_cls_dicts = evaluate_metric( dist_arr, TP_FP_arr, metric_result_path, dist_eval_file, eval_cols, 'dist', thresh, cls_name_list, cls_label_arr) # cls_score_arr, XC_arr, label_arr, save_path, file_name, cols, score_id, thresh, cls_names, cls_labels if w_sum_explore: exp_cols = ['w_xq', 'w_cls_score', 'XQ_thresh', 'measure', 'class', 'fpr_at_95_tpr',
os.path.join(filepath,filename)) if len(lst) > 0 or ndtype: if len(newst.ndarray[0]) > 0 or len(newst) > 1: logger.info('write: writing %s' % filename) #print("Here", num2date(newst.ndarray[0][0]), newst.ndarray) success = writeFormat(newst, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,version=version,gin=gin,datatype=datatype, useg=useg,skipcompression=skipcompression,compression=compression, addflags=addflags,headonly=headonly,kind=kind) starttime = endtime endtime = endtime + cov t5 = datetime.utcnow() #print "write - written:", t5-t3 #print "write - End:", t5 else: filename = filenamebegins + filenameends # remove any eventually existing null byte filename = filename.replace('\x00','') if debug: print ("Writing file:", filename) success = writeFormat(self, os.path.join(filepath,filename),format_type,mode=mode,keys=keys,absinfo=absinfo,fitfunc=fitfunc,fitdegree=fitdegree, knotstep=knotstep,meanh=meanh,meanf=meanf,deltaF=deltaF,diff=diff,baseparam=baseparam, year=year,extradays=extradays,skipcompression=skipcompression,compression=compression, addflags=addflags,headonly=headonly,kind=kind) return success def idf2xyz(self,**kwargs): """ DEFINITION: Converts inclination, declination, intensity (idf) data to xyz (i,d in 0.00000 deg (or gon)), f in nT Working only for ndarrays PARAMETERS: optional keywords: unit (string) can be deg or gon """ unit = kwargs.get('unit') keys = kwargs.get('keys') if not len(self.ndarray[0]) > 0: print("idf2xyz: no data found") if not keys: keys = ['x','y','z'] if not len(keys) == 3: print("idf2xyz: invalid keys provided") indx = KEYLIST.index(keys[0]) indy = KEYLIST.index(keys[1]) indz = KEYLIST.index(keys[2]) if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. dc = self.ndarray[indy].astype(float)*np.pi/(180.*ang_fac) ic = self.ndarray[indx].astype(float)*np.pi/(180.*ang_fac) self.ndarray[indx] = self.ndarray[indz].astype(float)*np.cos(dc)*np.cos(ic) self.ndarray[indy] = self.ndarray[indz].astype(float)*np.sin(dc)*np.cos(ic) self.ndarray[indz] = self.ndarray[indz].astype(float)*np.sin(ic) self.header['col-x'] = 'X' self.header['col-y'] = 'Y' self.header['col-z'] = 'Z' self.header['unit-col-x'] = 'nT' self.header['unit-col-y'] = 'nT' self.header['unit-col-z'] = 'nT' self.header['DataComponents'] = self.header['DataComponents'].replace('IDF','XYZ') return self def xyz2idf(self,**kwargs): """ DEFINITION: Converts x,y,z (all in nT) to inclination, declination, intensity (idf) (i,d in 0.00000 deg (or gon)), f in nT Working only for ndarrays PARAMETERS: optional keywords: unit (string) can be deg or gon """ keys = kwargs.get('keys') if not len(self.ndarray[0]) > 0: print("xyz2idf: no data found") if not keys: keys = ['x','y','z'] if not len(keys) == 3: print("xyz2idf: invalid keys provided") indx = KEYLIST.index(keys[0]) indy = KEYLIST.index(keys[1]) indz = KEYLIST.index(keys[2]) unit = kwargs.get('unit') if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. h = np.sqrt(self.ndarray[indx].astype(float)**2 + self.ndarray[indy].astype(float)**2) i = (180.*ang_fac)/np.pi * np.arctan2(self.ndarray[indz].astype(float), h) d = (180.*ang_fac)/np.pi * np.arctan2(self.ndarray[indy].astype(float), self.ndarray[indx].astype(float)) f = np.sqrt(self.ndarray[indx].astype(float)**2+self.ndarray[indy].astype(float)**2+self.ndarray[indz].astype(float)**2) self.ndarray[indx] = i self.ndarray[indy] = d self.ndarray[indz] = f self.header['col-x'] = 'I' self.header['col-y'] = 'D' self.header['col-z'] = 'F' self.header['unit-col-x'] = 'deg' self.header['unit-col-y'] = 'deg' self.header['unit-col-z'] = 'nT' self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','IDF') return self def xyz2hdz(self,**kwargs): """ DEFINITION: Converts x,y,z (all in nT) to horizontal, declination, z (hdz) (d in 0.00000 deg (or gon)), h,z in nT Working only for ndarrays PARAMETERS: optional keywords: unit (string) can be deg or gon """ keys = kwargs.get('keys') if not len(self.ndarray[0]) > 0: print("xyz2hdz: no data found") if not keys: keys = ['x','y','z'] if not len(keys) == 3: print("xyz2hdz: invalid keys provided") indx = KEYLIST.index(keys[0]) indy = KEYLIST.index(keys[1]) indz = KEYLIST.index(keys[2]) unit = kwargs.get('unit') if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. h = np.sqrt(self.ndarray[indx].astype(float)**2 + self.ndarray[indy].astype(float)**2) d = (180.*ang_fac) / np.pi * np.arctan2(self.ndarray[indy].astype(float), self.ndarray[indx].astype(float)) self.ndarray[indx] = h self.ndarray[indy] = d #dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2) #dD = 180/Pi*(dY*X/(X^2 + Y^2) - dX*Y/(X^2 + Y^2)) self.header['col-x'] = 'H' self.header['col-y'] = 'D' self.header['unit-col-x'] = 'nT' self.header['unit-col-y'] = 'deg' self.header['DataComponents'] = self.header['DataComponents'].replace('XYZ','HDZ') return self def hdz2xyz(self,**kwargs): """ DEFINITION: Converts h,d,z (h,z in nT, d in deg) to xyz Working only for ndarrays PARAMETERS: optional keywords: unit (string) can be deg or gon keys (list) list of three keys which hold h,d,z values """ keys = kwargs.get('keys') if not len(self.ndarray[0]) > 0: print("hdz2xyz: no data found") if not keys: keys = ['x','y','z'] if not len(keys) == 3: print("hdz2xyz: invalid keys provided") indx = KEYLIST.index(keys[0]) indy = KEYLIST.index(keys[1]) indz = KEYLIST.index(keys[2]) unit = kwargs.get('unit') if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. dc = self.ndarray[indy].astype(float)*np.pi/(180.*ang_fac) prevxcol = self.ndarray[indx].astype(float) self.ndarray[indx] = prevxcol * (np.cos(dc)) self.ndarray[indy] = prevxcol * (np.sin(dc)) #self.ndarray[indx] = self.ndarray[indx].astype(float) /np.sqrt((np.tan(dc))**2 + 1) #self.ndarray[indy] = np.sqrt(self.ndarray[indx].astype(float)**2 - xtmp**2) #print self.ndarray[indy] #self.ndarray[indx] = xtmp self.header['col-x'] = 'X' self.header['col-y'] = 'Y' self.header['col-z'] = 'Z' self.header['unit-col-x'] = 'nT' self.header['unit-col-y'] = 'nT' self.header['unit-col-z'] = 'nT' self.header['DataComponents'] = self.header['DataComponents'].replace('HDZ','XYZ') return DataStream(self,self.header,self.ndarray) class PyMagLog(object): """ Looging class for warning messages and analysis steps. logger and warnings are lists of strings. They contain full text information for file and screen output """ def __init__(self, logger=[], warnings=[], process=[], proc_count=0): self.logger = logger self.warnings = warnings self.process = process self.proc_count = proc_count def __getitem__(self, key): return self.key def addwarn(self, warnmsg): self.warnings.append(warnmsg) def addlog(self, logmsg): self.logger.append(logmsg) def addpro(self, promsg): self.process.append(promsg) def clearpro(self): process = [] def clearlog(self): logger = [] def clearwarn(self): warnings = [] def addcount(self, num, maxnum): """ creates an integer number relative to maxnum ranging from 0 to 100 assuming num starting at zero """ self.proc_count = int(np.round(num*100/maxnum)) def clearcount(self): self.proc_count = 0 def _removeduplicates(self,content): return list(set(content)) """ def sendLogByMail(self,loglist,**kwargs): smtpserver = kwargs.get('smtpserver') sender = kwargs.get('sender') user = kwargs.get('user') pwd = <PASSWORD>('<PASSWORD>') destination = kwargs.get('destination') subject = kwargs.get('subject') if not smtpserver: smtpserver = 'smtp.internet.at' if not sender: sender = '<EMAIL>' if not destination: destination = ['<EMAIL>'] if not user: user = "FrauMusterfrau" if not pwd: pwd = "<PASSWORD>" if not subject: subject= 'MagPy Log from %s' % datetime.utcnow() # typical values for text_subtype are plain, html, xml text_subtype = 'plain' content = '\n'.join(''.join(line) for line in loglist) try: msg = MIMEText(content, text_subtype) msg['Subject']= subject msg['From'] = sender # some SMTP servers will do this automatically, not all smtp = SMTP() smtp.set_debuglevel(False) smtp.connect(smtpserver, 587) smtp.ehlo() smtp.starttls() smtp.ehlo() smtp.login(user, pwd) try: smtp.sendmail(sender, destination, msg.as_string()) finally: smtp.close() except Exception as exc: raise ValueError( "mail failed; %s" % str(exc) ) # give a error message """ def combineWarnLog(self,warning,log): comlst = ['Warning:'] comlst.extend(self._removeduplicates(warning)) comlst.extend(['Non-critical info:']) comlst.extend(self._removeduplicates(log)) return comlst class LineStruct(object): def __init__(self, time=float('nan'), x=float('nan'), y=float('nan'), z=float('nan'), f=float('nan'), dx=float('nan'), dy=float('nan'), dz=float('nan'), df=float('nan'), t1=float('nan'), t2=float('nan'), var1=float('nan'), var2=float('nan'), var3=float('nan'), var4=float('nan'), var5=float('nan'), str1='-', str2='-', str3='-', str4='-', flag='0000000000000000-', comment='-', typ="xyzf", sectime=float('nan')): #def __init__(self): #- at the end of flag is important to be recognized as string """ self.time=float('nan') self.x=float('nan') self.y=float('nan') self.z=float('nan') self.f=float('nan') self.dx=float('nan') self.dy=float('nan') self.dz=float('nan') self.df=float('nan') self.t1=float('nan') self.t2=float('nan') self.var1=float('nan') self.var2=float('nan') self.var3=float('nan') self.var4=float('nan') self.var5=float('nan') self.str1='' self.str2='' self.str3='' self.str4='' self.flag='0000000000000000-' self.comment='-' self.typ="xyzf" self.sectime=float('nan') """ self.time = time self.x = x self.y = y self.z = z self.f = f self.dx = dx self.dy = dy self.dz = dz self.df = df self.t1 = t1 self.t2 = t2 self.var1 = var1 self.var2 = var2 self.var3 = var3 self.var4 = var4 self.var5 = var5 self.str1 = str1 self.str2 = str2 self.str3 = str3 self.str4 = str4 self.flag = flag self.comment = comment self.typ = typ self.sectime = sectime def __repr__(self): return repr((self.time, self.x, self.y, self.z, self.f, self.dx, self.dy, self.dz, self.df, self.t1, self.t2, self.var1, self.var2, self.var3, self.var4, self.var5, self.str1, self.str2, self.str3, self.str4, self.flag, self.comment, self.typ)) def __getitem__(self, index): key = KEYLIST[index] return getattr(self, key) def __setitem__(self, index, value): key = KEYLIST[index] setattr(self, key.lower(), value) def idf2xyz(self,**kwargs): """ keyword: unit: (string) can be deg or gon """ unit = kwargs.get('unit') if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. dc = self.y*np.pi/(180.*ang_fac) ic = self.x*np.pi/(180.*ang_fac) self.x = self.z*np.cos(dc)*np.cos(ic) self.y = self.z*np.sin(dc)*np.cos(ic) self.z = self.z*np.sin(ic) return self def xyz2idf(self,**kwargs): """ keyword: unit: (string) can be deg or gon """ unit = kwargs.get('unit') if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. h = np.sqrt(self.x**2 + self.y**2) i = (180.*ang_fac)/np.pi * math.atan2(self.z, h) d = (180.*ang_fac)/np.pi * math.atan2(self.y, self.x) f = np.sqrt(self.x**2+self.y**2+self.z**2) self.x = i self.y = d self.z = f return self def xyz2hdz(self,**kwargs): """ keyword: unit: (string) can be deg or gon """ unit = kwargs.get('unit') if unit == 'gon': ang_fac = 400./360. elif unit == 'rad': ang_fac = np.pi/180. else: ang_fac = 1. h = np.sqrt(self.x**2 + self.y**2) d = (180.*ang_fac) / np.pi * math.atan2(self.y, self.x) self.x = h self.y = d #dH = dX*X/sqrt(X^2 + Y^2) + dY*Y/sqrt(X^2 + Y^2) #dD =
<filename>Stanford/10_BinarySearchTrees/red_black_tree.py # red_black_tree.py import red_black_node as rbn from typing import Tuple class RedBlackTree: """ A class used to represent a red-black binary search tree. Attributes: Methods: insert(key) Inserts an element into the search tree. _rebalance(node) Rebalances the red-black search tree after a node is inserted. _case1(node) A helper function for rebalancing a specific case where the parent of the node and the node's sibling are red. _case2(node) A helper function for rebalancing a specific case where the parent of the node is red and the node's sibling is black or None. _left_rotation(key) Reorganizes a section of the search tree so the parent node, x, becomes the left child of it's original right child, y, and y becomes the parent of x. _right_rotation(key) Reorganizes a section of the search tree so the parent node, x, becomes the right child of its original left child, y, and y becomes the parent of x. delete_instance(key) Deletes an instance of the node from the search tree if the key exists. That is, if the node was inserted multiple timesinto the tree it only removes one instance. If delete_instance is called on a node with only one instance delete_instance will delete the node from the red-black search tree. delete(key) Deletes a node from the search tree if the key exists. _case3(node) A helper function for determining how to arrange the red-black tree after a node deletion. The node being deleted is black and either has no children or two children. traverse() -> list Prints the keys of the search tree in ascending order, for example, 1, 2, 3, 4, ..., n. successor(key) -> key Provides the given key's closest node in value that is greater than the key if it exists in the search tree. predecessor(key) -> key Provides the given key's closest node in value that is less than the key if it exists in the search tree. max() -> key Provides the maximum value that exists in the search tree. min() -> key Provides the minimum vlaue that exists in the search tree. contains(key) -> Tuple[node, node] Checks if a value exists in the search tree. """ def __init__(self): """ Parameters: None """ self.root = None def insert(self, key): """ Inserts a node into the search tree. Parameters: key: The key of the node you wish to insert. """ if key == None: return new_node = rbn.Node(key) # Check if there is nothing in the tree. if self.root == None: self.root = new_node # Paint it black. self.root.recolor() return # Find where the node should be inserted. found_node, parent = self.contains(new_node.key) if found_node != None: found_node.add_instance() elif new_node.key != parent.key: new_node.parent = parent if new_node.key < parent.key: parent.left = new_node else: # new_node.key > parent.key parent.right = new_node self._rebalance(new_node) def _rebalance(self, node): """ Ensures the search tree remains balanced. Parameters: node: The node where rebalancing should start. """ # Easy case: node's parent is black. if node != self.root and not node.parent.is_red: return # Now we have to keep propagating changes up the tree since # node's parent is red and there cannot be two reds in a # parent-child relationship. while node.parent.is_red and node != self.root: grandparent = node.parent.parent # Determine the rebalancing case if grandparent.right == None or grandparent.left == None: self._case2(node) elif grandparent.right.is_red and grandparent.left.is_red: self._case1(node) else: self._case2(node) # Have to reassign grandparent to to rebalancing grandparent = node.parent.parent # Do not continue, the root does not have a grandparent. if grandparent == self.root or grandparent == None: break else: node = node.parent # After propagating ensure the root of the tree remains black. if self.root.is_red: self.root.recolor() def _case1(self, node): """ The parent of the node and the parent's sibling are red. Leave node as red. The grandparent of red must be black since the parent of node is originally red. Color the grandparent of node red and the grandparent's left and right children black. Parameters: node: The node originating the first case of node reorganization. """ grandparent = node.parent.parent grandparent.recolor() grandparent.left.recolor() grandparent.right.recolor() def _case2(self, node): """ The parent of the node is red and the parent's sibling is black or None. Rotate node's parent in the opposite direction of node so node occupies the original parent's position. Then recolor node and node's new parent. Parameters: node: The node originating the second case of node reorganization. """ grandparent = node.parent.parent # Figure out which way to rotate. if node.parent == grandparent.right: if node == node.parent.right: self._left_rotation(grandparent) node.parent.recolor() node.parent.left.recolor() else: self._right_rotation(node.parent) self._case2(node.right) else: if node == node.parent.left: self._right_rotation(grandparent) node.parent.recolor() node.parent.right.recolor() else: self._left_rotation(node.parent) self._case2(node.left) def _left_rotation(self, node): """ Conducts a left rotation causing the given node to move left down the tree and brings its right child into the vacated position. A left C /\\ ----------> / B C rotation A of A / B Parameters: node: The parent node to rotate out of position. """ # Adjust the child pointers for the nodes due to the rotation. # The node's right child will become the node's parent with # a left rotation new_parent = node.right # Since the new_parent is greater than node, the new_parent's # left pointer will adjust to point to node and node's right # pointer must be adjusted to point to the soon-to-be orphaned # left node of new_parent. node.right = new_parent.left if new_parent.left != None: new_parent.left.parent = node # Adjust the parent pointers for the nodes due to the rotation. if node.parent == None: self.root = new_parent # Paint it black if self.root.is_red: self.root.recolor() else: new_parent.parent = node.parent if node == node.parent.left: node.parent.left = new_parent else: node.parent.right = new_parent new_parent.left = node node.parent = new_parent def _right_rotation(self, node): """ Conducts a right rotation causing the given node to move right down the tree and brings its left child into the vacated position. A right B /\\ ----------> \\ B C rotation A of A \\ C Parameters: node: The parent node to rotate out of position. """ # Adjust the child pointers for the nodes due to the rotation. # The node's left child will become the node's parent with # a right rotation. new_parent = node.left # Since the new_parent is less than node, the new_parent's # right pointer will adjust to point to node and node's left # pointer must be adjusted to point to the soon-to-be orphaned # right node of new_parent. node.left = new_parent.right if new_parent.right != None: new_parent.right.parent = node # Adjust the parent pointers for the nodes due to the rotation. if node.parent == None: self.root = new_parent # Paint it black if self.root.is_red: self.root.recolor() else: new_parent.parent = node.parent if node == node.parent.left: node.parent.left = new_parent else: node.parent.right = new_parent new_parent.right = node node.parent = new_parent def delete_instance(self, key): """ Deletes an instance of a node in the red-black search tree. That is, if there is more than one instance delete_instance decrements the number of instances of the node. If this method is called when only one instance exists the delete method gets called to completely remove the node from the search tree. Parameters: key: The key of the node you wish to delete an instance of. """ node, _ = self.contains(key) if node == None: return else: node.remove_instance() if node.instances < 1: self.delete(key) if node == self.root: self.root = None def delete(self, key): """ Completely removes a node from a red-black search tree regardless of the number of instances the node possesses. Parameters: key: The key of the node you wish to delete from the search tree. """ node, parent = self.contains(key) if node == None: return # Case 1: node being deleted is red with no children if node.is_red
current_actual_value=(1,)) n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__), current_actual_value=self.info_MDP.action_space.n) elif(self.regressor_type == 'q_regressor'): output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__), current_actual_value=(self.info_MDP.action_space.n,)) n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__), current_actual_value=self.info_MDP.action_space.n) elif(self.regressor_type == 'generic_regressor'): output_shape = Categorical(hp_name='output_shape', obj_name='output_shape_'+str(self.model.__name__), current_actual_value=self.info_MDP.action_space.shape) #to have a generic regressor I must not specify n_actions n_actions = Categorical(hp_name='n_actions', obj_name='n_actions_'+str(self.model.__name__), current_actual_value=None) tmp_structured_algo_params, dict_to_add = self.model_specific_set_params(new_params=new_params, mdp_info=mdp_info, input_shape=input_shape, output_shape=output_shape, n_actions=n_actions) final_dict_of_params = {**tmp_structured_algo_params, **dict_to_add} self.algo_params = final_dict_of_params tmp_new_params = self.get_params() if(tmp_new_params is not None): self.algo_params_upon_instantiation = copy.deepcopy(tmp_new_params) else: self.logger.error(msg='There was an error getting the parameters!') return False return True else: self.logger.error(msg='Cannot set parameters: \'new_params\' is \'None\'!') return False class ModelGenerationMushroomOnlinePPO(ModelGenerationMushroomOnlineAC): """ This Class implements a specific online model generation algorithm: PPO. This Class wraps the PPO method implemented in MushroomRL. cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/algorithms/actor_critic/deep_actor_critic/ppo.py This Class inherits from the Class ModelGenerationMushroomOnlineAC. """ def __init__(self, eval_metric, obj_name, regressor_type='generic_regressor', seeder=2, algo_params=None, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process', deterministic_output_policy=True): """ Parameters ---------- algo_params: This is either None or a dictionary containing all the needed parameters. The default is None. If None then the following parameters will be used: 'policy': either BoltzmannTorchPolicy(beta=0.001) or GaussianTorchPolicy(std_0=1), 'network': one hidden layer, 16 neurons, 'input_shape': self.info_MDP.observation_space.shape, 'n_actions': None, 'output_shape': self.info_MDP.action_space.shape, 'actor_class': Adam, 'actor_lr': 3e-4, 'critic_class': Adam, 'critic_lr': 3e-4, 'loss': F.mse_loss, 'n_epochs_policy': 10, 'batch_size': 64, 'eps_ppo': 0.2, 'lam': 0.95, 'ent_coeff': 0, 'n_epochs': 10, 'n_steps': None, 'n_steps_per_fit': None, 'n_episodes': 500, 'n_episodes_per_fit': 50 regressor_type: This is a string and it can either be: 'action_regressor', 'q_regressor' or 'generic_regressor'. This is used to pick one of the 3 possible kind of regressor made available by MushroomRL. Note that if you want to use a 'q_regressor' then the picked regressor must be able to perform multi-target regression, as a single regressor is used for all actions. The default is 'generic_regressor'. deterministic_output_policy: If this is True then the output policy will be rendered deterministic else if False nothing will be done. Note that the policy is made deterministic only at the end of the learn() method. Non-Parameters Members ---------------------- fully_instantiated: This is True if the block is fully instantiated, False otherwise. It is mainly used to make sure that when we call the learn method the model generation blocks have been fully instantiated as they undergo two stage initialisation being info_MDP unknown at the beginning of the pipeline. info_MDP: This is a dictionary compliant with the parameters needed in input to all MushroomRL model generation algorithms. It containts the observation space, the action space, the MDP horizon and the MDP gamma. algo_object: This is the object containing the actual model generation algorithm. algo_params_upon_instantiation: This a copy of the original value of algo_params, namely the value of algo_params that the object got upon creation. This is needed for re-loading objects. model: This is used in set_params in the generic Class ModelGenerationMushroomOnline. With this member we avoid re-writing for each Class inheriting from the Class ModelGenerationMushroomOnline the set_params method. In this Class this member equals to PPO, which is the Class of MushroomRL implementing PPO. core: This is used to contain the Core object of MushroomRL needed to run online RL algorithms. The other parameters and non-parameters members are described in the Class Block. """ super().__init__(eval_metric=eval_metric, obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs, job_type=job_type) self.works_on_online_rl = True self.works_on_offline_rl = False self.works_on_box_action_space = True self.works_on_discrete_action_space = True self.works_on_box_observation_space = True self.works_on_discrete_observation_space = True self.regressor_type = regressor_type #this block has parameters and I may want to tune them: self.is_parametrised = True self.algo_params = algo_params self.deterministic_output_policy = deterministic_output_policy self.fully_instantiated = False self.info_MDP = None self.algo_object = None self.algo_params_upon_instantiation = copy.deepcopy(self.algo_params) self.model = PPO self.core = None #seeds torch torch.manual_seed(self.seeder) torch.cuda.manual_seed(self.seeder) if torch.cuda.is_available(): self.can_use_cuda = True else: self.can_use_cuda = False #this seeding is needed for the policy of MushroomRL. Indeed the evaluation at the start of the learn method is done #using the policy and in the method draw_action, np.random is called! np.random.seed(self.seeder) def _default_network(self): """ This method creates a default Network with 1 hidden layer and ReLU activation functions. Returns ------- Network: the Class wrapper representing the default network. """ class Network(nn.Module): def __init__(self, input_shape, output_shape, **kwargs): super(Network, self).__init__() n_input = input_shape[-1] n_output = output_shape[0] self.hl0 = nn.Linear(n_input, 16) self.hl1 = nn.Linear(16, 16) self.hl2 = nn.Linear(16, n_output) nn.init.xavier_uniform_(self.hl0.weight, gain=nn.init.calculate_gain('relu')) nn.init.xavier_uniform_(self.hl1.weight, gain=nn.init.calculate_gain('relu')) nn.init.xavier_uniform_(self.hl2.weight, gain=nn.init.calculate_gain('relu')) def forward(self, state, **kwargs): h = F.relu(self.hl0(state.float())) h = F.relu(self.hl1(h)) return self.hl2(h) return Network def full_block_instantiation(self, info_MDP): """ Parameters ---------- info_MDP: This is an object of Class mushroom_rl.environment.MDPInfo. It contains the action and observation spaces, gamma and the horizon of the MDP. Returns ------- This method returns True if the algo_params were set successfully, and False otherwise. """ self.info_MDP = info_MDP if(self.algo_params is None): network = Categorical(hp_name='network', obj_name='network_'+str(self.model.__name__), current_actual_value=self._default_network()) actor_class = Categorical(hp_name='actor_class', obj_name='actor_class_'+str(self.model.__name__), current_actual_value=optim.Adam) actor_lr = Real(hp_name='actor_lr', obj_name='actor_lr_'+str(self.model.__name__), current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) critic_class = Categorical(hp_name='critic_class', obj_name='critic_class_'+str(self.model.__name__), current_actual_value=optim.Adam) critic_lr = Real(hp_name='critic_lr', obj_name='critic_lr_'+str(self.model.__name__), current_actual_value=3e-4, range_of_values=[1e-5, 1e-3], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) loss = Categorical(hp_name='loss', obj_name='loss_'+str(self.model.__name__), current_actual_value=F.mse_loss) n_epochs_policy = Integer(hp_name='n_epochs_policy', obj_name='n_epochs_policy_'+str(self.model.__name__), current_actual_value=10, range_of_values=[1, 100], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) batch_size = Integer(hp_name='batch_size', obj_name='batch_size_'+str(self.model.__name__), current_actual_value=64, range_of_values=[8, 64], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) eps_ppo = Real(hp_name='eps_ppo', obj_name='eps_ppo_'+str(self.model.__name__), current_actual_value=0.2, range_of_values=[0.08,0.35], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) lam = Real(hp_name='lam', obj_name='lam_'+str(self.model.__name__), current_actual_value=0.95, range_of_values=[0.85, 0.99], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) ent_coeff = Real(hp_name='ent_coeff', obj_name='ent_coeff_'+str(self.model.__name__), current_actual_value=0, range_of_values=[0, 0.02], to_mutate=True, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) n_epochs = Integer(hp_name='n_epochs', current_actual_value=10, range_of_values=[1,50], to_mutate=True, obj_name='n_epochs_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) n_steps = Integer(hp_name='n_steps', current_actual_value=None, to_mutate=False, obj_name='n_steps_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) n_steps_per_fit = Integer(hp_name='n_steps_per_fit', current_actual_value=None, to_mutate=False, obj_name='n_steps_per_fit_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) n_episodes = Integer(hp_name='n_episodes', current_actual_value=500, range_of_values=[10,1000], to_mutate=True, obj_name='n_episodes_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) n_episodes_per_fit = Integer(hp_name='n_episodes_per_fit', current_actual_value=50, range_of_values=[1,1000], to_mutate=True, obj_name='n_episodes_per_fit_'+str(self.model.__name__), seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) dict_of_params = {'actor_class': actor_class, 'actor_lr': actor_lr, 'network': network, 'critic_class': critic_class, 'critic_lr': critic_lr, 'loss': loss, 'n_epochs_policy': n_epochs_policy, 'batch_size': batch_size, 'eps_ppo': eps_ppo, 'lam': lam, 'ent_coeff': ent_coeff, 'n_epochs': n_epochs, 'n_steps': n_steps, 'n_steps_per_fit': n_steps_per_fit, 'n_episodes': n_episodes, 'n_episodes_per_fit': n_episodes_per_fit } self.algo_params = dict_of_params is_set_param_success = self.set_params(new_params=self.algo_params) if(not is_set_param_success): err_msg = 'There was an error setting the parameters of a'+'\''+str(self.__class__.__name__)+'\' object!' self.logger.error(msg=err_msg) self.fully_instantiated = False self.is_learn_successful = False return False self.logger.info(msg='\''+str(self.__class__.__name__)+'\' object fully instantiated!') self.fully_instantiated = True return True def model_specific_set_params(self, new_params, mdp_info, input_shape, output_shape, n_actions): """ Parameters ---------- new_params: These are the new parameters to set in the RL algorithm. It is a flat dictionary containing objects of Class HyperParameter. mdp_info: This is an object of Class mushroom_rl.environment.MDPInfo: it contains the action space, the observation space and gamma and the horizon of the MDP. input_shape: The shape of the observation space. output_shape: The shape of the action space. n_actions: If the space is Discrete this is the number of actions. Returns ------- tmp_structured_algo_params: A structured dictionary containing the parameters that are strictly part of the RL algorithm. dict_to_add: A flat dictionary containing parameters needed in the method learn() that are not strictly part of the RL algorithm, like the number of epochs and the number of episodes. """ if(isinstance(self.info_MDP.action_space, Discrete)): #check if there is the beta parameter for the BoltzmannTorchPolicy if('beta' not in list(new_params.keys())): new_params['beta'] = Real(hp_name='beta', obj_name='beta_'+str(self.model.__name__), current_actual_value=0.001, range_of_values=[0.0001, 0.9], to_mutate=False, seeder=self.seeder, log_mode=self.log_mode, checkpoint_log_path=self.checkpoint_log_path, verbosity=self.verbosity) o_policy = BoltzmannTorchPolicy(network=new_params['network'].current_actual_value, input_shape=input_shape.current_actual_value, output_shape=output_shape.current_actual_value, beta=new_params['beta'].current_actual_value,
# PlayStation RSD export plug-in v1.00 # Written by Lameguy64/TheCodingBrony of Meido-Tek Productions. # # Notes: # - All polygons of the model must only be triangles (3 point polygons) or # quads (4 point polygons). # - Textures must be applied through the UV/Image Editor panel as materials are not # used since it is less convenient for editing/real-time preview. # - UV coords must not exceed the bounds of the texture. Wrapped/tiled textures are # not supported. # - All associated textures files while modelling must have a filename with no # spaces and less than 8 characters long (not including extension) due to RSDLINK # being a DOS program. Also, all associated texture files must be converted to # PlayStation TIM format with a separate tool and configured with TIMTOOL for the # textures to render correctly when the RSD file is converted into a TMD file. # # Known issues: # - Sometimes, exported textured quads might have their UV coords rotated by 180 # degress. This happens very rarely however and can be fixed easily by swapping # the UV coords of the glitched quad around. # # ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### """ This script exports PlayStation SDK compatible RSD files from Blender. It supports normals, colors and textured polygons. Only one mesh can be exported at a time. """ import os import bpy from bpy.props import (CollectionProperty, StringProperty, BoolProperty, EnumProperty, FloatProperty, ) from bpy_extras.io_utils import (ImportHelper, ExportHelper, axis_conversion, ) bl_info = { "name": "PlayStation RSD", "author": "Jobert '<NAME> (Lameguy64/TheCodingBrony)", "blender": (2,6,9), "version": (1,0,0), "location": "File > Export", "description": "Export to PlayStation SDK compatible RSD model format", "category": "Import-Export" } class ExportRSD(bpy.types.Operator, ExportHelper): """Export a single object as a PlayStation SDK compatible RSD model file with """ \ """normals, colors and textured polygons mostly intact""" bl_idname = "export_mesh.rsd"; bl_label = "Export RSD"; filename_ext = ".rsd"; filter_glob = StringProperty(default="*.rsd;*.ply;*.mat", options={'HIDDEN'}) # Export options apply_modifiers = BoolProperty( name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default=True, ) write_normals = BoolProperty( name="Normals", description="Export Normals for smooth and " "hard shaded faces, disabling this option will " "disable light source calculation for all polygons", default=True, ) optimize_normals = BoolProperty( name="Optimize Normals", description="Optimize vertex normals by discarding " "duplicate values, don't use this option " "if you plan to animate your model using MIME " "vertex animation with normals", default=True, ) colored_tpolys = BoolProperty( name="Colored Textured Faces", description="Export all textured faces as vertex colored, " "light source calculation on such faces will be " "disabled however due to library limitations", default=False, ) translucent_polys = BoolProperty( name="Translucent", description="Set all exported faces to be rendered translucently", default=False, ) global_scale = FloatProperty( name="Scale", min=0.01, max=1000.0, default=1.0, ) # The actual export routine def execute(self, context): # Get file names for the output files filepath = self.filepath filepath = bpy.path.ensure_ext(filepath, self.filename_ext) PLYfilepath = bpy.path.ensure_ext(filepath, '.ply') MATfilepath = bpy.path.ensure_ext(filepath, '.mat') # Perform some pre-processing prior to exporting scene = context.scene obj = context.active_object mesh = obj.to_mesh(scene, self.apply_modifiers, 'PREVIEW') if not mesh.tessfaces and mesh.polygons: mesh.calc_tessface() # Check if vertex colors and UV maps are present mesh_has_vcols = bool(mesh.tessface_vertex_colors) mesh_has_uvs = bool(mesh.tessface_uv_textures) # To simplify things later on if mesh_has_vcols: mesh_vcols = mesh.tessface_vertex_colors.active if mesh_vcols: mesh_vcols = mesh_vcols.data else: mesh_vcols = None if mesh_has_uvs: mesh_uvs = mesh.tessface_uv_textures.active if mesh_uvs: mesh_uvs = mesh.tessface_uv_textures[0].data else: mesh_uvs = None mesh_verts = mesh.vertices mesh_polys = mesh.polygons # Check faces to see if there are non 3 or 4 point polygons for p in mesh_polys: if (len(p.vertices)<3) or (len(p.vertices)>4): raise Exception("Error, polygons must only have 3 or 4 points") # Copy normals into a simplified array for convenience if self.write_normals: mesh_norms = [] for n in mesh_verts: addNorm = True # Optimize normals by eliminating duplicate entries if self.optimize_normals: if len(mesh_norms)>0: for ns in mesh_norms: if (n.normal.x == ns.x) and (n.normal.y == ns.y) and (n.normal.z == ns.z): addNorm = False break if addNorm: mesh_norms.append(n.normal) # Scan through all faces for their assigned textures if mesh_uvs: tex_table = [] tex_list = [] for uv in mesh_uvs: if uv.image is not None: addTex = True tex_baseNoExtName = bpy.path.basename(uv.image.filepath).rsplit('.')[0] tex_fileName = bpy.path.ensure_ext(tex_baseNoExtName, '.tim') if len(tex_list)>0: for c,t in enumerate(tex_list): if t==tex_fileName: tex_table.append(c+1) addTex = False break if addTex: tex_list.append(tex_fileName) tex_table.append(len(tex_list)) else: tex_table.append(0) else: tex_table = None tex_list = None # Prepare header comment strings rsd_header1 = "# Created by Blender %s - " \ "www.blender.org, source file: %r\n" % \ (bpy.app.version_string, os.path.basename(bpy.data.filepath)) rsd_header2 = "# Exported using PlayStation RSD plug-in by " \ "Jobert 'Lameguy' Villamor (Lameguy64/TheCodingBrony)\r\n" # Write PLY file (contains vertex, normal, and face indices) file = open(PLYfilepath, "w") fw = file.write # Write header fw("%s%s" % (rsd_header1, rsd_header2)) fw("@PLY940102\n") fw("# Vertex count, normal count, polygon count\n") if self.write_normals: fw("%d %d %d\n" % (len(mesh_verts), len(mesh_norms), len(mesh_polys))) else: fw("%d 1 %d\n" % (len(mesh_verts), len(mesh_polys))) # Write vertex coords fw("# Vertices\n") for v in mesh_verts: fw("%E %E %E\n" % (v.co.x, -v.co.z, v.co.y)) # Write normal coords fw("# Normals\n") if self.write_normals: for n in mesh_norms: fw("%E %E %E\n" % (n.x, -n.z, n.y)) else: fw("%E %E %E\n" % (0, 0, 0)) # Set global polygon flags poly_flags = 0 if self.write_normals==False: # No light source calc. poly_flags = 1 if self.translucent_polys: # Translucent poly_flags = poly_flags | (1<<2) # Write faces fw("# Polygons\n") for p in mesh_polys: normals_index = [ 0, 0, 0, 0 ] # Triangles if len(p.vertices)==3: fw("0 %d %d %d 0 " % (p.vertices[0], p.vertices[2], p.vertices[1])) if self.write_normals: if self.optimize_normals: for i,n in enumerate(mesh_norms): if (mesh_verts[p.vertices[0]].normal.x == n.x and mesh_verts[p.vertices[0]].normal.y == n.y and mesh_verts[p.vertices[0]].normal.z == n.z): normals_index[0] = i if (mesh_verts[p.vertices[1]].normal.x == n.x and mesh_verts[p.vertices[1]].normal.y == n.y and mesh_verts[p.vertices[1]].normal.z == n.z): normals_index[1] = i if (mesh_verts[p.vertices[2]].normal.x == n.x and mesh_verts[p.vertices[2]].normal.y == n.y and mesh_verts[p.vertices[2]].normal.z == n.z): normals_index[2] = i i += 1 else: normals_index[0] = p.vertices[0] normals_index[1] = p.vertices[1] normals_index[2] = p.vertices[2] fw("%d %d %d 0\n" % (normals_index[0], normals_index[2], normals_index[1])) else: fw("0 0 0 0\n") # Quads else: fw("1 %d %d %d %d " % (p.vertices[3], p.vertices[2], p.vertices[0], p.vertices[1])) if self.write_normals: if self.optimize_normals: for i,n in enumerate(mesh_norms): if (mesh_verts[p.vertices[0]].normal.x == n.x and mesh_verts[p.vertices[0]].normal.y == n.y and mesh_verts[p.vertices[0]].normal.z == n.z): normals_index[0] = i if (mesh_verts[p.vertices[1]].normal.x == n.x and mesh_verts[p.vertices[1]].normal.y == n.y and mesh_verts[p.vertices[1]].normal.z == n.z): normals_index[1] = i if (mesh_verts[p.vertices[2]].normal.x == n.x and mesh_verts[p.vertices[2]].normal.y == n.y and mesh_verts[p.vertices[2]].normal.z == n.z): normals_index[2] = i if (mesh_verts[p.vertices[3]].normal.x == n.x and mesh_verts[p.vertices[3]].normal.y == n.y and mesh_verts[p.vertices[3]].normal.z == n.z): normals_index[3] = i else: normals_index[0] = p.vertices[0] normals_index[1] = p.vertices[1] normals_index[2] = p.vertices[2] normals_index[3] = p.vertices[3] if p.use_smooth==False: normals_index[1] = normals_index[0] normals_index[2] = normals_index[0] normals_index[3] = normals_index[0] fw("%d %d %d %d\n" % (normals_index[3], normals_index[2], normals_index[0], normals_index[1])) else: fw("0 0 0 0\n") file.close # Create MAT file file = open(MATfilepath, "w") fw = file.write # Write header fw("%s%s" % (rsd_header1, rsd_header2)) fw("@MAT940801\n") fw("%d\n" % (len(mesh_polys)))
<reponame>SAP-samples/acl2020-commonsense # # SPDX-FileCopyrightText: 2020 SAP SE or an SAP affiliate company # # SPDX-License-Identifier: Apache-2.0 # # coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team., 2019 Intelligent Systems Lab, University of Oxford, SAP SE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import os import csv import json import logging import argparse import random import logging from tqdm import tqdm, trange import re import numpy as np import torch from torch.nn import CrossEntropyLoss from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from torch.nn import functional as F from transformers import BertTokenizer from transformers import BertPreTrainedModel, BertModel from transformers.modeling_bert import BertOnlyMLMHead #from transformers import BertAdam from transformers import AdamW, get_linear_schedule_with_warmup from transformers import PYTORCH_PRETRAINED_BERT_CACHE from torch import nn, optim from data_reader import InputExample,DataProcessor from scorer import scorer logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%m/%d/%Y %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) class BertForMaskedLM(BertPreTrainedModel): r""" **masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the masked language modeling loss. Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` **lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``: Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs: **masked_lm_loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. **ltr_lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Next token prediction loss. **prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)`` Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``) list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings) of shape ``(batch_size, sequence_length, hidden_size)``: Hidden-states of the model at the output of each layer plus the initial embedding outputs. **attentions**: (`optional`, returned when ``config.output_attentions=True``) list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``: Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForMaskedLM.from_pretrained('bert-base-uncased') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, encoder_hidden_states=None, encoder_attention_mask=None, lm_labels=None, ): outputs = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here # Although this may seem awkward, BertForMaskedLM supports two scenarios: # 1. If a tensor that contains the indices of masked labels is provided, # the cross-entropy is the MLM cross-entropy that measures the likelihood # of predictions for masked words. # 2. If `lm_labels` is provided we are in a causal scenario where we # try to predict the next token for each input in the decoder. if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1, reduction='none') # -1 index = padding token masked_lm_loss = loss_fct(prediction_scores.permute(0,2,1), masked_lm_labels) masked_lm_loss_normalized = torch.div(torch.mean(masked_lm_loss,1),(masked_lm_labels > -1).sum(dim=1,dtype=torch.float32)) masked_lm_loss_normalized[torch.isnan(masked_lm_loss_normalized)] = 0.0 outputs = (masked_lm_loss_normalized,) + outputs if lm_labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one prediction_scores = prediction_scores[:, :-1, :].contiguous() lm_labels = lm_labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss(ignore_index=-1) ltr_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), lm_labels.view(-1)) outputs = (ltr_lm_loss,) + outputs return outputs # (masked_lm_loss), (ltr_lm_loss), prediction_scores, (hidden_states), (attentions) class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids_1, input_ids_2, attention_mask_1, attention_mask_2, type_1, type_2, masked_lm_1, masked_lm_2, start, end_1, end_2, source_start_token_1, source_end_token_1, source_start_token_2, source_end_token_2): self.input_ids_1=input_ids_1 self.attention_mask_1=attention_mask_1 self.type_1=type_1 self.masked_lm_1=masked_lm_1 #These are only used for train examples self.input_ids_2=input_ids_2 self.attention_mask_2=attention_mask_2 self.type_2=type_2 self.masked_lm_2=masked_lm_2 self.start = start self.end_1 = end_1 self.end_2 = end_2 self.source_start_token_1 = source_start_token_1 self.source_end_token_1 = source_end_token_1 self.source_start_token_2 = source_start_token_2 self.source_end_token_2 = source_end_token_2 def convert_examples_to_features_train(examples, max_seq_len, tokenizer, mode='multimask'): """Loads a data file into a list of `InputBatch`s.""" features = [] count = [0,0] for (ex_index, example) in enumerate(examples): tokens_sent = tokenizer.tokenize(example.text_a) tokens_a = tokenizer.tokenize(example.candidate_a) tokens_b = tokenizer.tokenize(example.candidate_b) if len(tokens_a) == len(tokens_b): count[0]=count[0]+1 else: count[1]=count[1]+1 tokens_1, type_1, attention_mask_1, masked_lm_1 = [],[],[],[] tokens_2, type_2, attention_mask_2, masked_lm_2 = [],[],[],[] tokens_1.append("[CLS]") tokens_2.append("[CLS]") for token in tokens_sent: if token=="_": start = len(tokens_1) if mode == 'multimask': tokens_1.extend(["[MASK]" for _ in range(len(tokens_a))]) tokens_2.extend(["[MASK]" for _ in range(len(tokens_b))]) else: tokens_1.append("[MASK]") tokens_2.append("[MASK]") end_1 = len(tokens_1) end_2 = len(tokens_2) else: tokens_1.append(token) tokens_2.append(token) token_idx_1 = [] token_idx_2 = [] token_counter_1 = 0 token_counter_2 = 0 find_tokens_a = True find_tokens_b = True for idx, token in enumerate(tokens_a): if ( find_tokens_a and token.lower() == tokens_a[token_counter_1].lower()): token_idx_1.append(idx) token_counter_1 += 1 if ( len(token_idx_1) >= len(tokens_a) ): find_tokens_a = False elif find_tokens_a: token_idx_1 = [] token_counter_1 = 0 for idx, token in enumerate(tokens_b): if ( find_tokens_b and token.lower() == tokens_b[token_counter_2].lower()): token_idx_2.append(idx) token_counter_2 += 1 if ( len(token_idx_2) >= len(tokens_b) ): find_tokens_b = False elif find_tokens_b: token_idx_2 = [] token_counter_2 = 0 tokens_1 = tokens_1[:max_seq_len-1]#-1 because of [SEP] tokens_2 = tokens_2[:max_seq_len-1] if tokens_1[-1]!="[SEP]": tokens_1.append("[SEP]") if tokens_2[-1]!="[SEP]": tokens_2.append("[SEP]") type_1 = max_seq_len*[0]#We do not do any inference. type_2 = max_seq_len*[0]#These embeddings can thus be ignored attention_mask_1 = (len(tokens_1)*[1])+((max_seq_len-len(tokens_1))*[0]) attention_mask_2 = (len(tokens_2)*[1])+((max_seq_len-len(tokens_2))*[0]) #sentences input_ids_1 = tokenizer.convert_tokens_to_ids(tokens_1) input_ids_2 = tokenizer.convert_tokens_to_ids(tokens_2) #replacements input_ids_a = tokenizer.convert_tokens_to_ids(tokens_a) input_ids_b = tokenizer.convert_tokens_to_ids(tokens_b) for token in tokens_1: if token=="[MASK]": if len(input_ids_a)<=0: continue#broken case masked_lm_1.append(input_ids_a[0]) input_ids_a = input_ids_a[1:] else: masked_lm_1.append(-1) while len(masked_lm_1)<max_seq_len: masked_lm_1.append(-1) for token in tokens_2: if token=="[MASK]": if len(input_ids_b)<=0: continue#broken case masked_lm_2.append(input_ids_b[0]) input_ids_b = input_ids_b[1:] else: masked_lm_2.append(-1) while len(masked_lm_2)<max_seq_len: masked_lm_2.append(-1) # Zero-pad up to the sequence length. while len(input_ids_1) < max_seq_len: input_ids_1.append(0) while len(input_ids_2) < max_seq_len: input_ids_2.append(0) assert len(input_ids_1) == max_seq_len assert len(input_ids_2) == max_seq_len assert len(attention_mask_1) == max_seq_len assert len(attention_mask_2) == max_seq_len assert len(type_1) == max_seq_len assert len(type_2) == max_seq_len assert len(masked_lm_1) == max_seq_len assert len(masked_lm_2) == max_seq_len #if len(tokens_a) == len(tokens_b): features.append( InputFeatures(input_ids_1=input_ids_1, input_ids_2=input_ids_2, attention_mask_1=attention_mask_1, attention_mask_2=attention_mask_2, type_1=type_1, type_2=type_2, masked_lm_1=masked_lm_1, masked_lm_2=masked_lm_2, start=start, end_1=end_1, end_2=end_2, source_start_token_1=token_idx_1[0], source_end_token_1=token_idx_1[-1], source_start_token_2=token_idx_2[0], source_end_token_2=token_idx_2[-1])) logger.info('Ratio: '+str(count[0]/(count[0]+count[1]))) return features def convert_examples_to_features_evaluate(examples, max_seq_len, tokenizer): """Loads a data file into a list of `InputBatch`s.""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.candidate_a) tokens_sent = tokenizer.tokenize(example.text_a) tokens_1, type_1, attention_mask_1, masked_lm_1 = [],[],[],[] tokens_1.append("[CLS]") for token in tokens_sent: if token=="_": tokens_1.extend(["[MASK]" for _ in range(len(tokens_a))]) else: tokens_1.append(token) tokens_1 = tokens_1[:max_seq_len-1]#-1 because of [SEP] if tokens_1[-1]!="[SEP]": tokens_1.append("[SEP]") type_1 = max_seq_len*[0] attention_mask_1 = (len(tokens_1)*[1])+((max_seq_len-len(tokens_1))*[0]) #sentences input_ids_1 = tokenizer.convert_tokens_to_ids(tokens_1) #replacements input_ids_a = tokenizer.convert_tokens_to_ids(tokens_a) for token in tokens_1: if token=="[MASK]": if len(input_ids_a)<=0: continue#broken case masked_lm_1.append(input_ids_a[0]) input_ids_a = input_ids_a[1:] else: masked_lm_1.append(-1) while len(masked_lm_1)<max_seq_len: masked_lm_1.append(-1) # Zero-pad up to the sequence length. while len(input_ids_1) < max_seq_len: input_ids_1.append(0) assert len(input_ids_1) == max_seq_len assert len(attention_mask_1) == max_seq_len assert len(type_1) == max_seq_len assert len(masked_lm_1) == max_seq_len features.append( InputFeatures(input_ids_1=input_ids_1, input_ids_2=None, attention_mask_1=attention_mask_1, attention_mask_2=None, type_1=type_1, type_2=None, masked_lm_1=masked_lm_1, masked_lm_2=None, start=None, end_1=None, end_2=None, source_start_token_1=None, source_end_token_1=None, source_start_token_2=None, source_end_token_2=None)) return features def test(processor, args, tokenizer, model, device, global_step = 0, tr_loss = 0, test_set = "wscr-test", verbose=False, output_file=None): eval_examples = processor.get_examples(args.data_dir,test_set) eval_features = convert_examples_to_features_evaluate( eval_examples, args.max_seq_length, tokenizer) if verbose: logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) all_input_ids_1 = torch.tensor([f.input_ids_1 for f
<reponame>sahadikr1/sahadikr1sasa import asyncio, discord, base64, binascii, re, math, shutil, tempfile, os from discord.ext import commands from Cogs import Nullify, DL def setup(bot): # Add the bot and deps settings = bot.get_cog("Settings") bot.add_cog(Encode(bot, settings)) class Encode(commands.Cog): # Init with the bot reference def __init__(self, bot, settings): self.bot = bot self.settings = settings self.regex = re.compile(r"(http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?") def suppressed(self, guild, msg): # Check if we're suppressing @here and @everyone mentions if self.settings.getServerStat(guild, "SuppressMentions"): return Nullify.clean(msg) else: return msg async def download(self, url): url = url.strip("<>") # Set up a temp directory dirpath = tempfile.mkdtemp() tempFileName = url.rsplit('/', 1)[-1] # Strip question mark tempFileName = tempFileName.split('?')[0] filePath = dirpath + "/" + tempFileName rImage = None try: rImage = await DL.async_dl(url) except: pass if not rImage: self.remove(dirpath) return None with open(filePath, 'wb') as f: f.write(rImage) # Check if the file exists if not os.path.exists(filePath): self.remove(dirpath) return None return filePath def remove(self, path): if not path == None and os.path.exists(path): shutil.rmtree(os.path.dirname(path), ignore_errors=True) # Helper methods def _to_bytes(self, in_string): return in_string.encode('utf-8') def _to_string(self, in_bytes): return in_bytes.decode('utf-8') # Check hex value def _check_hex(self, hex_string): # Remove 0x/0X hex_string = hex_string.replace("0x", "").replace("0X", "") hex_string = re.sub(r'[^0-9A-Fa-f]+', '', hex_string) return hex_string # To base64 methods def _ascii_to_base64(self, ascii_string): ascii_bytes = self._to_bytes(ascii_string) base_64 = base64.b64encode(ascii_bytes) return self._to_string(base_64) def _hex_to_base64(self, hex_string): hex_string = self._check_hex(hex_string) hex_s_bytes = self._to_bytes(hex_string) hex_bytes = binascii.unhexlify(hex_s_bytes) base64_bytes = base64.b64encode(hex_bytes) return self._to_string(base64_bytes) # To ascii methods def _hex_to_ascii(self, hex_string): hex_string = self._check_hex(hex_string) hex_bytes = self._to_bytes(hex_string) ascii_bytes = binascii.unhexlify(hex_bytes) return self._to_string(ascii_bytes) def _base64_to_ascii(self, base64_string): base64_bytes = self._to_bytes(base64_string) ascii_bytes = base64.b64decode(base64_bytes) return self._to_string(ascii_bytes) # To hex methods def _ascii_to_hex(self, ascii_string): ascii_bytes = self._to_bytes(ascii_string) hex_bytes = binascii.hexlify(ascii_bytes) return self._to_string(hex_bytes) def _base64_to_hex(self, base64_string): b64_string = self._to_bytes(base64_string) base64_bytes = base64.b64decode(b64_string) hex_bytes = binascii.hexlify(base64_bytes) return self._to_string(hex_bytes) def _rgb_to_hex(self, r, g, b): return '#%02x%02x%02x' % (r, g, b) def _hex_to_rgb(self, _hex): _hex = _hex.replace("#", "") l_hex = len(_hex) return tuple(int(_hex[i:i + l_hex // 3], 16) for i in range(0, l_hex, l_hex // 3)) def _cmyk_to_rgb(self, c, m, y, k): c, m, y, k = [float(x)/100.0 for x in tuple([c, m, y, k])] return tuple([round(255.0 - ((min(1.0, x * (1.0 - k) + k)) * 255.0)) for x in tuple([c, m, y])]) def _rgb_to_cmyk(self, r, g, b): c, m, y = [1 - x/255 for x in tuple([r, g, b])] min_cmy = min(c, m, y) return tuple([0,0,0,100]) if all(x == 0 for x in [r, g, b]) else tuple([round(x*100) for x in [(x - min_cmy) / (1 - min_cmy) for x in tuple([c, m, y])] + [min_cmy]]) @commands.command() async def color(self, ctx, *, value = None): """ View info on a rgb, hex or cmyk color and their values in other formats Example usage: color #3399cc color rgb(3, 4, 5) """ if not value: await ctx.send("Usage: `{}color [value]`".format(ctx.prefix)) return value = value.lower() if not any(value.startswith(x) for x in ["#", "rgb", "cmyk"]): await ctx.send("Invalid value color format, please choose from rgb, cmyk or hex") return error = False if value.startswith('rgb'): count = value.count('(') + value.count(')') + value.count(',') if count != 4: error = True number_list = value.lower().replace("rgb", "").replace("(", "").replace(")", "").replace(" ", "") try: r, g, b = map(int, number_list.split(',')) if (r < 0 or r > 255) or (g < 0 or g > 255) or (b < 0 or b > 255): error = True except: error = True if error: await ctx.send("Invalid RGB color format!") return _hex = self._rgb_to_hex(r,g,b) c, m, y, k = self._rgb_to_cmyk(r, g, b) embed_color = int("0x{}".format(_hex.replace("#", '')), 16) embed = discord.Embed(color=embed_color) embed.title = "Color {}".format(value.replace(" ", "")) embed.add_field(name="Hex", value=_hex) embed.add_field(name="CMYK", value="cmyk({}, {}, {}, {})".format(c, m, y, k)) elif value.startswith('#'): match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', value) if not match: await ctx.send("Invalid Hex color format!") return embed_color = int("0x{}".format(value.replace('#', '')), 16) embed = discord.Embed(color=embed_color) r, g, b = self._hex_to_rgb(value) c, m, y, k = self._rgb_to_cmyk(r, g, b) embed.title = "Color {}".format(value.replace(" ", "")) embed.add_field(name="RGB", value="rgb({}, {}, {})".format(r, g, b)) embed.add_field(name="CMYK", value="cmyk({}, {}, {}, {})".format(c, m, y, k)) elif value.startswith('cmyk'): count = value.count('(') + value.count(')') + value.count(',') if count != 5: error = True number_list = value.lower().replace("cmyk", "").replace("(", "").replace(")", "").replace(" ", "") try: c, m, y, k = map(int, number_list.split(',')) if (c < 0 or c > 255) or (m < 0 or m > 255) or (y < 0 or y > 255) or (k < 0 or k > 255): error = True except: error = True if error: await ctx.send("Invalid CMYK color format!") return r, g, b = self._cmyk_to_rgb(c, m, y, k) _hex = self._rgb_to_hex(r, g, b) embed_color = int("0x{}".format(_hex.replace("#", '')), 16) embed = discord.Embed(color=embed_color) embed.title = "Color {}".format(value.replace(" ", "")) embed.add_field(name="Hex", value=_hex) embed.add_field(name="RGB", value="rgb({}, {}, {})".format(r, g, b)) await ctx.send(embed=embed) def get_slide(self, start_addr = 0): # Setup our temp vars m1 = int("0x100000",16) m2 = int("0x200000",16) slide = int(math.ceil(( start_addr - m1 ) / m2)) return 0 if slide < 0 else slide def get_available(self, line_list = []): available = [] for line in line_list: line_split = [x for x in line.split(" ") if len(x)] if not len(line_split): continue if len(line_split) == 1: # No spaces - let's make sure it's hex and add it try: available.append({"start":int(line_split[0],16)}) except: continue elif line_split[0].lower() == "available": # If our first item is "available", let's convert the others into ints new_line = [] for x in line_split: new_line.extend(x.split("-")) if len(new_line) < 3: # Not enough info continue try: available.append({ "start":int(new_line[1],16), "end":int(new_line[2],16), "size": (int(new_line[2],16)-int(new_line[1],16))/4096 if len(new_line) < 4 else int(new_line[3],16) }) except: continue return available @commands.command(pass_context=True) async def slide(self, ctx, *, input_hex = None): """Calculates your slide value for Clover based on an input address (in hex).""" if input_hex == None and len(ctx.message.attachments) == 0: # No info passed - bail! return await ctx.send("Usage: `{}slide [hex address]`".format(ctx.prefix)) # Check for urls matches = [] if input_hex == None else list(re.finditer(self.regex, input_hex)) slide_url = ctx.message.attachments[0].url if input_hex == None else None if not len(matches) else matches[0].group(0) if slide_url: path = await self.download(slide_url) if not path: # It was just an attachment - bail return await ctx.send("Looks like I couldn't download that link...") # Got something - let's load it as text with open(path,"rb") as f: input_hex = f.read().decode("utf-8","ignore").replace("\x00","").replace("\r","") self.remove(path) # At this point - we might have a url, a table of data, or a single hex address # Let's split by newlines first, then by spaces available = self.get_available(input_hex.replace("`","").split("\n")) if not len(available): return await ctx.send("No available space was found in the passed values.") # Let's sort our available by their size - then walk the list until we find the # first valid slide available = sorted(available, key=lambda x:x.get("size",0),reverse=True) slides = [] for x in available: slide = self.get_slide(x["start"]) if slide < 256: # Got a good one - spit it out hex_str = "{:x}".format(x["start"]).upper() hex_str = "0"*(len(hex_str)%2)+hex_str slides.append(("0x"+hex_str,slide)) # return await ctx.send("Slide value for starting address of 0x{}:\n```\nslide={}\n```".format(hex_str.upper(),slide)) if not len(slides): # If we got here - we have no applicable slides return await ctx.send("No valid slide values were found for the passed info.") # Format the slides pad = max([len(x[0]) for x in slides]) await ctx.send("**Applicable Slide Values:**\n```\n{}\n```".format("\n".join(["{}: slide={}".format(x[0].rjust(pad),x[1]) for x in slides]))) @commands.command(pass_context=True) async def hexswap(self, ctx, *, input_hex = None): """Byte swaps the passed hex value.""" if input_hex == None: await ctx.send("Usage: `{}hexswap [input_hex]`".format(ctx.prefix)) return input_hex = self._check_hex(input_hex) if not len(input_hex): await ctx.send("Malformed hex - try again.") return # Normalize hex into pairs input_hex = list("0"*(len(input_hex)%2)+input_hex) hex_pairs = [input_hex[i:i + 2] for i in range(0, len(input_hex), 2)] hex_rev = hex_pairs[::-1] hex_str = "".join(["".join(x) for x in hex_rev]) await ctx.send(hex_str.upper()) @commands.command(pass_context=True) async def hexdec(self, ctx, *, input_hex = None): """Converts hex to decimal.""" if input_hex == None: await ctx.send("Usage: `{}hexdec [input_hex]`".format(ctx.prefix)) return input_hex = self._check_hex(input_hex) if not len(input_hex): await ctx.send("Malformed hex - try again.") return try: dec = int(input_hex, 16) except Exception: await ctx.send("I couldn't make that conversion!") return await ctx.send(dec) @commands.command(pass_context=True) async def dechex(self, ctx, *, input_dec = None): """Converts an int to hex.""" if input_dec == None: await ctx.send("Usage: `{}dechex [input_dec]`".format(ctx.prefix)) return try: input_dec = int(input_dec) except Exception: await ctx.send("Input must be an integer.") return min_length = 2 hex_str = "{:x}".format(input_dec).upper() hex_str = "0"*(len(hex_str)%min_length)+hex_str await ctx.send("0x"+hex_str) @commands.command(pass_context=True) async def strbin(self, ctx, *, input_string = None): """Converts the input string to its binary representation.""" if input_string == None: await ctx.send("Usage: `{}strbin [input_string]`".format(ctx.prefix)) return msg = ''.join('{:08b}'.format(ord(c)) for c in input_string) # Format into blocks: # - First split into chunks of 8 msg_list = re.findall('........?', msg) # Now we format! msg = "```\n" msg += " ".join(msg_list) msg += "```" if len(msg) > 1993: await ctx.send("Well... that was *a lot* of 1s and 0s. Maybe try a smaller string... Discord won't let me send all that.") return await ctx.send(msg) @commands.command(pass_context=True) async def binstr(self, ctx, *, input_binary = None): """Converts the input binary to its string representation.""" if input_binary == None: await ctx.send("Usage: `{}binstr [input_binary]`".format(ctx.prefix)) return # Clean the string new_bin = "" for char in input_binary: if char is "0" or char is "1": new_bin += char if not len(new_bin): await ctx.send("Usage: `{}binstr [input_binary]`".format(ctx.prefix)) return msg = ''.join(chr(int(new_bin[i:i+8], 2)) for i in range(0, len(new_bin), 8)) await ctx.send(self.suppressed(ctx.guild, msg)) @commands.command(pass_context=True) async def binint(self, ctx, *, input_binary = None): """Converts the input binary to its integer representation.""" if input_binary == None: await ctx.send("Usage: `{}binint [input_binary]`".format(ctx.prefix)) return try: msg = int(input_binary, 2) except Exception: msg = "I couldn't make that conversion!" await ctx.send(msg) @commands.command(pass_context=True) async def intbin(self, ctx, *, input_int = None): """Converts the input integer to its binary representation.""" if input_int == None: await ctx.send("Usage: `{}intbin [input_int]`".format(ctx.prefix)) return try: input_int = int(input_int) except Exception: await ctx.send("Input must be an integer.") return await ctx.send("{:08b}".format(input_int)) @commands.command(pass_context=True) async def encode(self, ctx, from_type = None , to_type = None, *, value = None): """Data converter from ascii <--> hex <--> base64.""" if value == None or from_type == None or to_type == None: msg = 'Usage: `{}encode [from_type] [to_type] [value]`\nTypes include ascii, hex, and base64.'.format(ctx.prefix) await ctx.send(msg) return types = [ "base64", "hex", "ascii" ] # Allow first letters as well from_check = [x for x in types
<reponame>RelevanceAI/RelevanceAI<filename>relevanceai/_api/endpoints/datasets/datasets.py<gh_stars>10-100 """All Dataset related functions """ from typing import List, Optional from relevanceai.client.helpers import Credentials from relevanceai.utils.base import _Base from relevanceai._api.endpoints.datasets.documents import DocumentsClient from relevanceai._api.endpoints.datasets.monitor import MonitorClient from relevanceai._api.endpoints.datasets.tasks import TasksClient from relevanceai._api.endpoints.datasets.cluster import ClusterClient class DatasetsClient(_Base): """All dataset-related functions""" def __init__(self, credentials: Credentials): self.tasks = TasksClient(credentials) self.documents = DocumentsClient(credentials) self.monitor = MonitorClient(credentials) self.cluster = ClusterClient(credentials) super().__init__(credentials) def schema(self, dataset_id: str): """ Returns the schema of a dataset. Refer to datasets.create for different field types available in a VecDB schema. Parameters ---------- dataset_id : string Unique name of dataset """ return self.make_http_request( endpoint=f"/datasets/{dataset_id}/schema", method="GET" ) def metadata(self, dataset_id: str): """ Retreives metadata about a dataset. Notably description, data source, etc Parameters ---------- dataset_id : string Unique name of dataset """ return self.make_http_request( endpoint=f"/datasets/{dataset_id}/metadata", method="GET" ) def post_metadata(self, dataset_id: str, metadata: dict): """ Edit and add metadata about a dataset. Notably description, data source, etc """ return self.make_http_request( endpoint=f"/datasets/{dataset_id}/metadata", method="POST", parameters={"dataset_id": dataset_id, "metadata": metadata}, ) def create(self, dataset_id: str, schema: Optional[dict] = None): """ A dataset can store documents to be searched, retrieved, filtered and aggregated (similar to Collections in MongoDB, Tables in SQL, Indexes in ElasticSearch). A powerful and core feature of VecDB is that you can store both your metadata and vectors in the same document. When specifying the schema of a dataset and inserting your own vector use the suffix (ends with) "_vector_" for the field name, and specify the length of the vector in dataset_schema. \n For example: >>> { >>> "product_image_vector_": 1024, >>> "product_text_description_vector_" : 128 >>> } These are the field types supported in our datasets: ["text", "numeric", "date", "dict", "chunks", "vector", "chunkvector"]. \n For example: >>> { >>> "product_text_description" : "text", >>> "price" : "numeric", >>> "created_date" : "date", >>> "product_texts_chunk_": "chunks", >>> "product_text_chunkvector_" : 1024 >>> } You don't have to specify the schema of every single field when creating a dataset, as VecDB will automatically detect the appropriate data type for each field (vectors will be automatically identified by its "_vector_" suffix). Infact you also don't always have to use this endpoint to create a dataset as /datasets/bulk_insert will infer and create the dataset and schema as you insert new documents. \n Note: - A dataset name/id can only contain undercase letters, dash, underscore and numbers. - "_id" is reserved as the key and id of a document. - Once a schema is set for a dataset it cannot be altered. If it has to be altered, utlise the copy dataset endpoint. For more information about vectors check out the 'Vectorizing' section, services.search.vector or out blog at https://relevance.ai/blog. For more information about chunks and chunk vectors check out datasets.search.chunk. Parameters ---------- dataset_id : string Unique name of dataset schema : dict Schema for specifying the field that are vectors and its length """ schema = {} if schema is None else schema return self.make_http_request( endpoint=f"/datasets/create", method="POST", parameters={"id": dataset_id, "schema": schema}, ) def list(self): """List all datasets in a project that you are authorized to read/write.""" return self.make_http_request(endpoint="/datasets/list", method="GET") def list_all( self, include_schema: bool = True, include_stats: bool = True, include_metadata: bool = True, include_schema_stats: bool = False, include_vector_health: bool = False, include_active_jobs: bool = False, dataset_ids: Optional[list] = None, sort_by_created_at_date: bool = False, asc: bool = False, page_size: int = 20, page: int = 1, ): """ Returns a page of datasets and in detail the dataset's associated information that you are authorized to read/write. The information includes: - Schema - Data schema of a dataset (same as dataset.schema). - Metadata - Metadata of a dataset (same as dataset.metadata). - Stats - Statistics of number of documents and size of a dataset (same as dataset.stats). - Vector_health - Number of zero vectors stored (same as dataset.health). - Schema_stats - Fields and number of documents missing/not missing for that field (same as dataset.stats). - Active_jobs - All active jobs/tasks on the dataset. Parameters ---------- include_schema : bool Whether to return schema include_stats : bool Whether to return stats include_metadata : bool Whether to return metadata include_vector_health : bool Whether to return vector_health include_schema_stats : bool Whether to return schema_stats include_active_jobs : bool Whether to return active_jobs dataset_ids : list List of dataset IDs sort_by_created_at_date : bool Sort by created at date. By default shows the newest datasets. Set asc=False to get oldest dataset. asc : bool Whether to sort results by ascending or descending order page_size : int Size of each page of results page : int Page of the results """ dataset_ids = [] if dataset_ids is None else dataset_ids return self.make_http_request( endpoint="/datasets/list", method="POST", parameters={ "include_schema": include_schema, "include_stats": include_stats, "include_metadata": include_metadata, "include_schema_stats": include_schema_stats, "include_vector_health": include_vector_health, "include_active_jobs": include_active_jobs, "dataset_ids": dataset_ids, "sort_by_created_at_date": sort_by_created_at_date, "asc": asc, "page_size": page_size, "page": page, }, ) def facets( self, dataset_id, fields: Optional[list] = None, date_interval: str = "monthly", page_size: int = 5, page: int = 1, asc: bool = False, ): """ Takes a high level aggregation of every field, return their unique values and frequencies. This is used to help create the filter bar for search. Parameters ---------- dataset_id : string Unique name of dataset fields : list Fields to include in the facets, if [] then all date_interval : str Interval for date facets page_size : int Size of facet page page : int Page of the results asc: bool Whether to sort results by ascending or descending order """ fields = [] if fields in (None, [None]) else fields return self.make_http_request( endpoint=f"/datasets/{dataset_id}/facets", method="POST", parameters={ "fields": fields, "date_interval": date_interval, "page_size": page_size, "page": page, "asc": asc, }, ) def check_missing_ids(self, dataset_id, ids): """ Look up in bulk if the ids exists in the dataset, returns all the missing one as a list. Parameters ---------- dataset_id : string Unique name of dataset ids : list IDs of documents """ # Check if dataset_id exists dataset_exists = dataset_id in self.list()["datasets"] if dataset_exists: return self.make_http_request( endpoint=f"/datasets/{dataset_id}/documents/get_missing", method="GET", parameters={"ids": ids}, ) else: print("Dataset does not exist") return def insert( self, dataset_id: str, document: dict, insert_date: bool = True, overwrite: bool = True, update_schema: bool = True, ): """ Insert a single documents - When inserting the document you can optionally specify your own id for a document by using the field name "_id", if not specified a random id is assigned. - When inserting or specifying vectors in a document use the suffix (ends with) "_vector_" for the field name. e.g. "product_description_vector_". - When inserting or specifying chunks in a document the suffix (ends with) "_chunk_" for the field name. e.g. "products_chunk_". - When inserting or specifying chunk vectors in a document's chunks use the suffix (ends with) "_chunkvector_" for the field name. e.g. "products_chunk_.product_description_chunkvector_". Documentation can be found here: https://ingest-api-dev-aueast.relevance.ai/latest/documentation#operation/InsertEncode \n Try to keep each batch of documents to insert under 200mb to avoid the insert timing out. \n Parameters ---------- dataset_id : string Unique name of dataset documents : list A list of documents. Document is a JSON-like data that we store our metadata and vectors with. For specifying id of the document use the field '_id', for specifying vector field use the suffix of '_vector_' insert_date : bool Whether to include insert date as a field 'insert_date_'. overwrite : bool Whether to overwrite document if it exists. update_schema : bool Whether the api should check the documents for vector datatype to update the schema. """ return self.make_http_request( endpoint=f"/datasets/{dataset_id}/documents/insert", method="POST", parameters={ "document": document, "insert_date": insert_date, "overwrite": overwrite, "update_schema": update_schema, }, ) def bulk_insert( self, dataset_id: str, documents: List, insert_date: bool = True, overwrite: bool = True, update_schema: bool = True, field_transformers: Optional[list] = None, return_documents: bool = False, ): """ Documentation can be found here: https://ingest-api-dev-aueast.relevance.ai/latest/documentation#operation/InsertEncode - When inserting the document you can optionally specify your own id for a document by using the field name "_id", if not specified a random id is assigned. - When inserting or specifying
string :param bondIdx: Bond Index Value according to ParentMol. If None, it will try to identified all breakable single bonds :type bondIdx: int or ndarray or tuple or list :param simplifyHydro: Whether to not include normal Hydro in the SMILES (Default to be True) :type simplifyHydro: bool :param reverse: Whether to swap the position of radicals (Default to be False) :type reverse: bool :param doubleTriple: Whether to allow double bonds - triple bonds into breakable mode (default to be False) :type doubleTriple: bool :return: List[Tuple[object]] or List[List[object]] ndarray """ if True: if isinstance(ParentMol, str): ParentMol: Union[Mol, RWMol] = MolFromSmiles(ParentMol) ParentMol: Union[Mol, RWMol] = AddHs(ParentMol) max_bonds: int = ParentMol.GetNumBonds() if bondIdx is not None: if isinstance(bondIdx, (int, np.integer)): bondIdx = [int(bondIdx)] elif isinstance(bondIdx, Tuple): bondIdx = list(bondIdx) elif isinstance(bondIdx, ndarray): bondIdx = bondIdx.ravel().tolist() for index in range(len(bondIdx)): if not inputFastCheck(bondIdx[index], 'int'): bondIdx[index] = int(bondIdx[index]) inputCheckRange(value=min(bondIdx), name='min(bondIdx)', maxValue=max_bonds, minValue=0) inputCheckRange(value=max(bondIdx), name='max(bondIdx)', maxValue=max_bonds, minValue=0) else: bondIdx = [i for i in range(0, max_bonds)] inputFullCheck(value=reverse, name='reverse', dtype='bool') inputFullCheck(value=doubleTriple, name='doubleTriple', dtype='bool') array: ndarray = np.zeros(shape=(len(bondIdx), 3), dtype=np.object_) NULL: List = [None] * array.shape[1] FalseLine: List[List] = [[], []] cache: str = dFramework["Bond Type Cache"][0] for index, bond_index in enumerate(bondIdx): if index != 0 and bondIdx[index] == bondIdx[index - 1]: array[index] = [array[index, 1], array[index, 0], array[index, 2]] if bond_index is None: array[index] = NULL FalseLine[0].append(index) FalseLine[1].append(bond_index) continue bond = ParentMol.GetBondWithIdx(bond_index) if bond.IsInRing() or (not doubleTriple and str(bond.GetBondType()) != cache): array[index] = NULL FalseLine[0].append(index) FalseLine[1].append(bond_index) continue TempMol: RWMol = RWMol(ParentMol) BeginAtom, EndAtom = bond.GetBeginAtomIdx(), bond.GetEndAtomIdx() TempMol.RemoveBond(BeginAtom, EndAtom) TempMol.GetAtomWithIdx(BeginAtom).SetNoImplicit(True) TempMol.GetAtomWithIdx(EndAtom).SetNoImplicit(True) SanitizeMol(TempMol) # Call SanitizeMol to update radicals FragA, FragB = sorted(MolToSmiles(TempMol).split(".")) atomType = sorted([BeginAtom.GetSymbol(), EndAtom.GetSymbol()]) if simplifyHydro: FragA, FragB = MolToSmiles(MolFromSmiles(FragA)), MolToSmiles(MolFromSmiles(FragB)) if reverse: FragA, FragB = FragB, FragA array[index] = [FragA, FragB, f"{atomType[0]}-{atomType[1]}"] return array, FalseLine def getStereoChemistryFast(mol: Mol, useLegacyImplementation: bool = True) -> int: if not useLegacyImplementation: AssignStereochemistry(mol, flagPossibleStereoCenters=True, force=True) FindPotentialStereoBonds(mol) INVALID: str = "?" centers = FindMolChiralCenters(mol, includeUnassigned=True, useLegacyImplementation=useLegacyImplementation) check: bool = False for center in centers: if center[1] == INVALID: if check: return 0 check = True INVALID: str = "ANY" bonds = mol.GetBonds() for bond in bonds: if str(bond.GetStereo())[6:] == INVALID: return 0 return 1 def getStereoChemistry(mol: Mol, useLegacyImplementation: bool = True) -> Tuple[dict, int]: """ Implementation of molecule stereo-chemistry: Check the given SMILES string to determine whether accurate enthalpies can be calculated with the given stereo-chemistry information. Inspired by <NAME> (2020) :param mol: The referenced molecule used for validation :type mol: Mol :param useLegacyImplementation: Legacy implementation of RDKit in FindMolChiralCenters :type useLegacyImplementation: bool :return: Union[Tuple, bool] """ if not useLegacyImplementation: AssignStereochemistry(mol, flagPossibleStereoCenters=True, force=True) FindPotentialStereoBonds(mol) centers = FindMolChiralCenters(mol, includeUnassigned=True, useLegacyImplementation=useLegacyImplementation) unassigned_atom: int = 0 for center in centers: if center[1] == "?": unassigned_atom += 1 bondList: List[str] = [] unassigned_bond: int = 0 bonds = mol.GetBonds() for bond in bonds: value = str(bond.GetStereo())[6:] if value != "NONE": bondList.append(value) if value == "ANY": unassigned_bond += 1 series = {"atom": len(centers) - unassigned_atom, "non_atom": unassigned_atom, "bond": len(bondList) - unassigned_bond, "non_bond": unassigned_bond} if unassigned_atom <= 1 and unassigned_bond == 0: return series, 1 return series, 0 def checkInvalidStereoChemistry(mol: Mol, previous_message: str = None, useLegacyImplementation: bool = True) -> str: series = getStereoChemistry(mol=RemoveHs(mol), useLegacyImplementation=useLegacyImplementation)[0] if series["non_atom"] != 0 or series["non_bond"] != 0: new_message: str = f" Molecule {MolToSmiles(RemoveHs(mol))} has undefined stereochemistry" if previous_message != new_message: previous_message = new_message warning(new_message) return previous_message def getRadicalsStereoChemistry(FragMolX: Mol, FragMolY: Mol, useLegacyImplementation: bool = True) -> int: """ Implementation of retrieving stereo-chemistry with radical :param FragMolX: The first (1st) fragment used for identification. Order can be swapped with FragMolY. :type FragMolX: Mol :param FragMolY: The second (2nd) fragment used for identification. Order can be swapped with FragMolX. :type FragMolY: Mol :param useLegacyImplementation: Legacy implementation of RDKit in FindMolChiralCenters :type useLegacyImplementation: bool :return: int """ if getStereoChemistryFast(mol=FragMolX, useLegacyImplementation=useLegacyImplementation) == 0: return 0 return getStereoChemistryFast(mol=FragMolY, useLegacyImplementation=useLegacyImplementation) def getFullCisTrans(mol: Mol) -> Tuple: # Get Cis-Trans Full Structure CisTransBond: List[str] = [] CisTransIdx: List[int] = [] INVALID: Tuple[str, str] = ("NONE", "ANY") bonds = mol.GetBonds() for bond in bonds: stereo = str(bond.GetStereo())[6:] if stereo not in INVALID: CisTransBond.append(stereo) CisTransIdx.append(bond.GetIdx()) return CisTransBond, CisTransIdx def detectCisTrans(ParentMol: Mol, bondIdx: int, CisTransBond: List[str], CisTransIdx: List[int], stack: List[int], ) -> Optional[List[int]]: # [0]: Generate Input/Output Array currentLength: int = len(stack) for _ in range(2 * len(dFramework["Cis-Trans Atoms"]) + 2): stack.append(0) # [1]: Check possibility of atoms if potentially possess cis-trans bond # Cis-Trans: Z == "Cis", "E" == "Trans" checkBond = ParentMol.GetBondWithIdx(bondIdx) BeginAtom, EndingAtom = checkBond.GetBeginAtom(), checkBond.GetEndAtom() checkAtom = [] if BeginAtom.GetSymbol() not in dFramework["Non-Connected Atoms"]: checkAtom.append(BeginAtom) if EndingAtom.GetSymbol() not in dFramework["Non-Connected Atoms"]: checkAtom.append(EndingAtom) # [2]: Get all data needed (Cis - Trans Identifier) if len(checkAtom) != 0: # Bond Database store the bond index which is part of cis-trans TargetAtom: Tuple[str, str] = dFramework["Cis-Trans Atoms"] separation: int = len(TargetAtom) + 1 Z: str = "Z" for _, atom in enumerate(checkAtom): bonds = atom.GetBonds() for neighbor_bond in bonds: # [2.1]: Get the neighbor index that is contained possibility of being a cis-trans bond but not # current prediction bond. neighbor_index: int = neighbor_bond.GetIdx() if neighbor_index == bondIdx: continue try: location: int = CisTransIdx.index(neighbor_index) except MAIN_ERROR: continue temp = currentLength if CisTransBond[location] == Z else separation + currentLength stack[temp] += 1 try: targetBond = ParentMol.GetBondWithIdx(neighbor_index) stack[TargetAtom.index(targetBond.GetOtherAtom(atom).GetSymbol()) + 1 + temp] += 1 except MAIN_ERROR: pass return stack def findNoImplicitAtom(mol) -> int: # Find the first atom with no implicit hydrogen atoms = mol.GetAtoms() for atom in atoms: if atom.GetNoImplicit(): return atom.GetIdx() return -1 @measureExecutionTime def getRingAttachedBondsDatabase(database: ndarray, MoleculeCol: int = 0, BondIdxCol: int = 3, aromaticOnly: bool = False) -> Tuple[List[int], ndarray, List[int], ndarray]: if True: if not inputFastCheck(database, 'ndarray'): database = np.asarray(database) inputCheckRange(value=MoleculeCol, name='MoleculeCol', maxValue=database.shape[1], minValue=0, fastCheck=True) inputCheckRange(value=BondIdxCol, name='BondIdxCol', maxValue=database.shape[1], minValue=0, fastCheck=True) inputFullCheck(value=aromaticOnly, name='aromaticOnly', dtype="bool") # [1]: Prepare data indexData: List = GetIndexOnNonSortedData(database=database, column=MoleculeCol, excel_fit=False, get_last=True) size: int = len(indexData) - 1 path: List[int] = [] moleculeRingTracking: List[int] = [] reversedPath: List[int] = [] moleculeNonRingTracking: List[int] = [] def add(indexRow: List[int], value: int, moleculeTracking: List[int], moleculeValue: int): indexRow.append(value) if len(moleculeTracking) == 0 or moleculeTracking[-1] != moleculeValue: moleculeTracking.append(moleculeValue) # [2]: Looping by request for molSet in range(0, size): begin, end = indexData[molSet][0], indexData[molSet + 1][0] molecule: Mol = AddHs(MolFromSmiles(str(database[begin, MoleculeCol]))) for row in range(begin, end): bond = molecule.GetBondWithIdx(int(database[row, BondIdxCol])) startAtom, endAtom = bond.GetBeginAtom(), bond.GetEndAtom() if startAtom.IsInRing() or endAtom.IsInRing(): condition: bool = True if aromaticOnly: if not (startAtom.GetIsAromatic() or endAtom.GetIsAromatic()): condition = False if condition: add(indexRow=path, value=row, moleculeTracking=moleculeRingTracking, moleculeValue=begin) else: add(indexRow=reversedPath, value=row, moleculeTracking=moleculeNonRingTracking, moleculeValue=begin) else: add(indexRow=reversedPath, value=row, moleculeTracking=moleculeNonRingTracking, moleculeValue=begin) if len(path) < 100: print("List of Rows for Ring-Attached:", path) if len(reversedPath) < 100: print("List of Rows for Non-Ring-Attached:", reversedPath) print(f"Result:" f"\n1) Ring-Attached Bonds: " f"{len(path)} BDEs ({round(100 * len(path) / database.shape[0], 6)} %; " f"{len(moleculeRingTracking)} molecules ({round(100 * len(moleculeRingTracking) / len(indexData), 6)}%)" f"\n2) Non-Ring-Attached Bonds: " f"{len(reversedPath)} BDEs ({round(100 * len(reversedPath) / database.shape[0], 6)}%); " f"{len(moleculeNonRingTracking)} molecules ({round(100 * len(moleculeNonRingTracking) / len(indexData), 6)}%)") return path, database[path, :], reversedPath, database[reversedPath, :] def getRingAttachedBondsByFile(InputFileName: str, RingFileName: Optional[str], NonRingFileName: Optional[str], MoleculeCol: int = 0, BondIdxCol: int = 3, aromaticOnly: bool = False) -> \ Tuple[List[int], pd.DataFrame, List[int], pd.DataFrame]: """ Retrieve a dataset contained bonds whose atoms are part of the rings :param InputFileName: A string directory of dataset used to retrieve bond :type InputFileName: str :param RingFileName: A string directory to generate a csv dataset of ring-attached bond :type RingFileName: str :param NonRingFileName: A string directory to generate a csv dataset of ring-attached bond :type NonRingFileName: str :param MoleculeCol: The molecule's column :type MoleculeCol: int :param BondIdxCol: The bond index's column :type BondIdxCol: int :param aromaticOnly: Whether to extract aromatic bonds only :type aromaticOnly: bool :return: List[int] """ if True: inputFullCheck(value=InputFileName, name='InputFileName', dtype="str") if RingFileName is not None: inputFullCheck(value=RingFileName, name='RingFileName', dtype="str") if RingFileName == InputFileName: RingFileName = f"{RemoveExtension(FixPath(RingFileName, 'csv'), '.csv')} - Ring.csv" if NonRingFileName is not None: inputFullCheck(value=NonRingFileName, name='NonRingFileName', dtype="str") if NonRingFileName == InputFileName: NonRingFileName = f"{RemoveExtension(FixPath(NonRingFileName, 'csv'), '.csv')} - Non-Ring.csv" if NonRingFileName is None
<reponame>adambernier/flaskr<filename>flaskr/blog.py import datetime as dt import itertools as it from functools import wraps from flask import ( Blueprint, current_app, flash, g, json, jsonify, redirect, render_template, request, url_for ) from flask_login import ( current_user, login_required, login_user, logout_user, ) from elasticsearch import helpers from psycopg2 import errors from slugify import slugify from werkzeug.exceptions import abort, BadRequestKeyError from flaskr.auth import login_required from flaskr.db import get_db def paginate(iterable, page_size): while True: i1, i2 = it.tee(iterable) iterable, page = (it.islice(i1, page_size, None), list(it.islice(i2, page_size))) if len(page) == 0: break yield page def admin_required(f): @wraps(f) def decorated_function(*args, **kwargs): try: if not g.user['role_id'] == 2: abort(403) except TypeError: abort(403) return f(*args, **kwargs) return decorated_function bp = Blueprint('blog', __name__, url_prefix='/blog') @bp.route('/') @bp.route('/',defaults={'page':1}) @bp.route('/page/<int:page>') def index(page=None): if not page: page = 1 PAGINATION_SIZE = 3 db = get_db() db.execute(""" SELECT count(p.id) row_count, min(p.id) min_id FROM post p; """) result = db.fetchone() count, min_id = result['row_count'], result['min_id'] offset = (page - 1) * PAGINATION_SIZE db.execute(""" SELECT p.id, title, title_slug, body, created, author_id, username, familyname, role_id, pt.tags, pt.tag_slugs FROM post p JOIN ( SELECT p2.id, ROW_NUMBER() OVER () rownum FROM post p2 ) p2 ON p2.id = p.id JOIN usr u ON p.author_id = u.id LEFT JOIN ( SELECT pt.post_id, string_agg(t.title, ' ') tags, string_agg(t.slug, ' ') tag_slugs FROM tag t JOIN post_tag pt ON pt.tag_id = t.id GROUP BY pt.post_id ) pt ON pt.post_id = p.id ORDER BY created DESC LIMIT %s OFFSET %s;""", (PAGINATION_SIZE,offset,) ) posts = db.fetchall() try: if posts[-1]['id'] == min_id: last_post = True else: last_post = False except IndexError: last_post = True if current_user.is_authenticated: g.user = current_user #db.execute('SELECT role_id FROM usr WHERE id = %s;', # (g.user['id'],)) #user = db.fetchone() return render_template('blog/index.html',posts=posts,page=page, PAGINATION_SIZE=PAGINATION_SIZE,last_post=last_post) @bp.route('/fts',defaults={'page':1}) @bp.route('/fts/<search_slug>/page/<int:page>') def fts(page=None,search_slug=None): if not page: page = 1 PAGINATION_SIZE = 3 search = request.args.get('autocomplete') offset = (page - 1) * PAGINATION_SIZE if not search: search = search_slug else: search = slugify(search) # begin fts query = { "query": { "multi_match": { "query": search, "fields": ["post_body", "post_title", "post_tags"] } } } results = current_app.es.search(index="blog-index", #doc_type="post", body=query) scan = helpers.scan(current_app.es,query=query,scroll='1m', index='blog-index') ids = tuple(sorted(scan_result['_id'] for scan_result in scan)) if len(ids) == 0: abort(404,f"No posts with {search} in either body or title.") # end fts placeholders = ",".join(["%s" for id in ids]) qry = f""" SELECT count(p.id) row_count, min(p.id) min_id FROM post p WHERE p.id in ({placeholders}); """ db = get_db() db.execute(qry,ids) # ids is a tuple result = db.fetchone() count, min_id = result['row_count'], result['min_id'] qry = f""" SELECT p.id, title, title_slug, body, created, author_id, username, role_id, pt.tags, pt.tag_slugs FROM post p JOIN usr u ON p.author_id = u.id LEFT JOIN ( SELECT pt.post_id, string_agg(t.title, ' ') tags, string_agg(t.slug, ' ') tag_slugs FROM tag t JOIN post_tag pt ON pt.tag_id = t.id GROUP BY pt.post_id ) pt ON pt.post_id = p.id WHERE p.id IN ({placeholders}) ORDER BY created DESC LIMIT %s OFFSET %s;""" db.execute(qry,ids+(PAGINATION_SIZE,offset,)) posts = db.fetchall() try: if posts[-1]['id'] == min_id: last_post = True else: last_post = False except IndexError: last_post = True search_slug = slugify(search) return render_template('blog/index.html',posts=posts,page=page, PAGINATION_SIZE=PAGINATION_SIZE,last_post=last_post, search_slug=search_slug) @bp.route('/create', methods=('GET', 'POST',)) @login_required def create(): if request.method == 'POST': title = request.form['title'] title_slug = slugify(title) body = request.form['body'] tags = request.form['tags'] tags = tags.split(' ') # expand to allow other delimiters? tag_slugs = [slugify(tag) for tag in tags] error = None if not title: error = 'Title is required.' if error is not None: flash(error) else: db = get_db() db.execute(''' INSERT INTO post (title, title_slug, body, author_id, thank_count) VALUES (%s,%s,%s,%s,%s) RETURNING id;''', (title,title_slug,body,g.user['id'],0) ) post_id = db.fetchone()['id'] tag_ids = [] for tag,tag_slug in zip(tags,tag_slugs): try: db.execute( 'INSERT INTO tag (title, slug)' ' VALUES (%s,%s) RETURNING id;', (tag, tag_slug,) ) tag_ids.append(db.fetchone()['id']) except errors.UniqueViolation: db.execute( 'SELECT id FROM tag WHERE slug = %s;', (tag_slug,) ) tag_ids.append(db.fetchone()['id']) db.executemany( 'INSERT INTO post_tag (post_id, tag_id)' ' VALUES (%s,%s);', list(zip([post_id]*len(tag_ids),tag_ids)) ) # begin elasticsearch suggest = [word for word in title_slug.split('-')] suggest += [tag_slug for tag_slug in tag_slugs] doc = { 'post_author': g.user['id'], 'post_body': body, 'post_title': [word for word in title_slug.split('-')], 'post_tags': [tag_slug for tag_slug in tag_slugs], 'post_timestamp': dt.datetime.now(), 'suggest': suggest, } result = current_app.es.index(index="blog-index", #doc_type='post', id=post_id, body=doc) current_app.es.indices.refresh(index='blog-index') # end elasticsearch return redirect(url_for('blog.index')) return render_template('blog/create.html') @bp.route('/create_comment', methods=('POST',)) def create_comment(): body = request.form['body'] post_id = request.form['post_id'] author_id = request.form['author_id'] db = get_db() db.execute( 'INSERT INTO post_comment (body, post_id, author_id)' ' VALUES (%s,%s,%s);', (body,post_id,author_id) ) # to get blog post to redirect to db.execute("SELECT title_slug FROM post WHERE id = %s;", (post_id,)) post = db.fetchone() redirect_url = request.path.replace('create_comment',post['title_slug']) return redirect(redirect_url) def get_post(title_slug, check_author=True): get_db().execute(""" SELECT p.id, title, title_slug, body, created, author_id, username, role_id, thank_count, pt.tags, pt.tag_slugs FROM post p JOIN usr u ON p.author_id = u.id LEFT JOIN ( SELECT pt.post_id, string_agg(t.title, ' ') tags, string_agg(t.slug, ' ') tag_slugs FROM tag t JOIN post_tag pt ON pt.tag_id = t.id GROUP BY pt.post_id ) pt ON pt.post_id = p.id WHERE p.title_slug = %s;""", (title_slug,) ) post = get_db().fetchone() if post is None: abort(404, "Post id {0} doesn't exist.".format(id)) #if check_author and post['author_id'] != g.user['id']: # abort(403) get_db().execute(""" SELECT c.id, body, created, username, c.author_id FROM post_comment c LEFT JOIN usr u ON u.id = c.author_id WHERE c.post_id = %s ORDER BY created;""", (post['id'],) ) comments = get_db().fetchall() return post, comments def get_related(id): """ fetch any related blog posts ordered by most tags in common descending """ get_db().execute(""" SELECT p.id, p.title, title_slug, count(rt.id) related_tag_count, string_agg(rt.title, ' ') related_tags, string_agg(rt.slug, ' ') related_tag_slugs FROM post p JOIN post_tag rpt ON rpt.post_id = p.id JOIN tag rt ON rt.id = rpt.tag_id WHERE p.id != %s AND rt.slug IN ( SELECT t.slug FROM post_tag pt JOIN tag t ON t.id = pt.tag_id WHERE pt.post_id = %s ) GROUP BY p.id, p.title, title_slug ORDER BY count(rt.id) DESC, string_agg(rt.title, ' ');""", (id,id,) ) related = get_db().fetchall() if related is None: abort(404, "No posts related to post id: {0}.".format(id)) return related @bp.route('/<title_slug>/thank', methods=('POST',)) def thank(title_slug): if request.method == "POST": db = get_db() db.execute('''UPDATE post SET thank_count = thank_count + 1 WHERE title_slug = %s;''',(title_slug,)) if db.rowcount == 1: db.execute(''' SELECT thank_count FROM post WHERE title_slug = %s;''',(title_slug,)) thank_count = db.fetchone()['thank_count'] #return json.dumps({'status': 'success', 'thank_count': thank_count}) return f'''<p id="click-response">Post thanked {thank_count} time(s).</p>''' else: return json.dumps({'status': 'record not updated'}) return redirect(url_for('blog.detail')) <EMAIL>('/<int:id>/detail',methods=('GET',)) @bp.route('/<title_slug>',methods=('GET',)) def detail(title_slug): post, comments = get_post(title_slug) related_posts = get_related(post['id']) try: thank_count = request.args['thank_count'] except BadRequestKeyError: thank_count = post['thank_count'] return render_template('blog/detail.html', post=post, comments=comments, related_posts=related_posts, thank_count=thank_count) <EMAIL>('/<int:id>/update', methods=('GET', 'POST')) @bp.route('/<title_slug>/update', methods=('GET', 'POST')) @login_required def update(title_slug): post, comments = get_post(title_slug) if request.method == 'POST': db = get_db() # for restriction get db.execute('SELECT author_id FROM post WHERE title_slug = %s;', (title_slug,)) post_user = db.fetchone() if g.user['id'] != post_user['author_id'] and g.user['role_id'] != 2: return redirect(url_for('blog.index')) old_title_slug = title_slug title = request.form['title'] title_slug = slugify(title) body = request.form['body'] tags = request.form['tags'] tags = tags.split(' ') # expand to allow other delimiters? tag_slugs = [slugify(tag) for tag in tags] error = None if not title: error = 'Title is required.' if error is not None: flash(error) else: db.execute( 'UPDATE post SET title = %s, title_slug = %s, body = %s' ' WHERE title_slug = %s RETURNING id;', (title, title_slug, body, old_title_slug) ) id = db.fetchone()['id'] # doesn't cost a lot to just delete any existing tags db.execute('DELETE FROM post_tag where post_id = %s;', (id,)) tag_ids = [] for tag,tag_slug in zip(tags,tag_slugs): try: db.execute( 'INSERT INTO tag (title, slug)' ' VALUES (%s,%s) RETURNING id;', (tag, tag_slug,) ) tag_ids.append(db.fetchone()['id']) except errors.UniqueViolation: db.execute( 'SELECT id FROM tag WHERE slug = %s;', (tag_slug,) ) tag_ids.append(db.fetchone()['id']) db.executemany( 'INSERT INTO post_tag (post_id, tag_id)' ' VALUES (%s,%s);', list(zip([id]*len(tag_ids),tag_ids)) ) # begin elasticsearch suggest = [word for word in title_slug.split('-')] suggest += [tag_slug for tag_slug in tag_slugs] doc
<gh_stars>0 import re import numpy as np import scipy.sparse as sp from scipy import linalg from sklearn.decomposition import NMF, non_negative_factorization from sklearn.decomposition import _nmf as nmf # For testing internals from scipy.sparse import csc_matrix import pytest from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import ignore_warnings from sklearn.utils.extmath import squared_norm from sklearn.base import clone from sklearn.exceptions import ConvergenceWarning @pytest.mark.parametrize("solver", ["cd", "mu"]) def test_convergence_warning(solver): convergence_warning = ( "Maximum number of iterations 1 reached. Increase it to improve convergence." ) A = np.ones((2, 2)) with pytest.warns(ConvergenceWarning, match=convergence_warning): NMF(solver=solver, max_iter=1).fit(A) def test_initialize_nn_output(): # Test that initialization does not return negative values rng = np.random.mtrand.RandomState(42) data = np.abs(rng.randn(10, 10)) for init in ("random", "nndsvd", "nndsvda", "nndsvdar"): W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0) assert not ((W < 0).any() or (H < 0).any()) def test_parameter_checking(): A = np.ones((2, 2)) name = "spam" # FIXME : should be removed in 1.1 init = "nndsvda" msg = "Invalid solver parameter: got 'spam' instead of one of" with pytest.raises(ValueError, match=msg): NMF(solver=name, init=init).fit(A) msg = "Invalid init parameter: got 'spam' instead of one of" with pytest.raises(ValueError, match=msg): NMF(init=name).fit(A) with ignore_warnings(category=FutureWarning): # TODO remove in 1.2 msg = "Invalid regularization parameter: got 'spam' instead of one of" with pytest.raises(ValueError, match=msg): NMF(regularization=name, init=init).fit(A) msg = "Invalid beta_loss parameter: got 'spam' instead of one" with pytest.raises(ValueError, match=msg): NMF(solver="mu", init=init, beta_loss=name).fit(A) msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0" with pytest.raises(ValueError, match=msg): NMF(solver="cd", init=init, beta_loss=1.0).fit(A) msg = "Negative values in data passed to" with pytest.raises(ValueError, match=msg): NMF(init=init).fit(-A) with pytest.raises(ValueError, match=msg): nmf._initialize_nmf(-A, 2, "nndsvd") clf = NMF(2, tol=0.1, init=init).fit(A) with pytest.raises(ValueError, match=msg): clf.transform(-A) for init in ["nndsvd", "nndsvda", "nndsvdar"]: msg = re.escape( "init = '{}' can only be used when " "n_components <= min(n_samples, n_features)".format(init) ) with pytest.raises(ValueError, match=msg): NMF(3, init=init).fit(A) with pytest.raises(ValueError, match=msg): nmf._initialize_nmf(A, 3, init) def test_initialize_close(): # Test NNDSVD error # Test that _initialize_nmf error is less than the standard deviation of # the entries in the matrix. rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(10, 10)) W, H = nmf._initialize_nmf(A, 10, init="nndsvd") error = linalg.norm(np.dot(W, H) - A) sdev = linalg.norm(A - A.mean()) assert error <= sdev def test_initialize_variants(): # Test NNDSVD variants correctness # Test that the variants 'nndsvda' and 'nndsvdar' differ from basic # 'nndsvd' only where the basic version has zeros. rng = np.random.mtrand.RandomState(42) data = np.abs(rng.randn(10, 10)) W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd") Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda") War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0) for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)): assert_almost_equal(evl[ref != 0], ref[ref != 0]) # ignore UserWarning raised when both solver='mu' and init='nndsvd' @ignore_warnings(category=UserWarning) @pytest.mark.parametrize("solver", ("cd", "mu")) @pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random")) @pytest.mark.parametrize("alpha_W", (0.0, 1.0)) @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) def test_nmf_fit_nn_output(solver, init, alpha_W, alpha_H): # Test that the decomposition does not contain negative values A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)] model = NMF( n_components=2, solver=solver, init=init, alpha_W=alpha_W, alpha_H=alpha_H, random_state=0, ) transf = model.fit_transform(A) assert not ((model.components_ < 0).any() or (transf < 0).any()) @pytest.mark.parametrize("solver", ("cd", "mu")) def test_nmf_fit_close(solver): rng = np.random.mtrand.RandomState(42) # Test that the fit is not too far away pnmf = NMF( 5, solver=solver, init="nndsvdar", random_state=0, max_iter=600, ) X = np.abs(rng.randn(6, 5)) assert pnmf.fit(X).reconstruction_err_ < 0.1 @pytest.mark.parametrize("solver", ("cd", "mu")) def test_nmf_transform(solver): # Test that NMF.transform returns close values rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(6, 5)) m = NMF( solver=solver, n_components=3, init="random", random_state=0, tol=1e-5, ) ft = m.fit_transform(A) t = m.transform(A) assert_array_almost_equal(ft, t, decimal=2) def test_nmf_transform_custom_init(): # Smoke test that checks if NMF.transform works with custom initialization random_state = np.random.RandomState(0) A = np.abs(random_state.randn(6, 5)) n_components = 4 avg = np.sqrt(A.mean() / n_components) H_init = np.abs(avg * random_state.randn(n_components, 5)) W_init = np.abs(avg * random_state.randn(6, n_components)) m = NMF(solver="cd", n_components=n_components, init="custom", random_state=0) m.fit_transform(A, W=W_init, H=H_init) m.transform(A) @pytest.mark.parametrize("solver", ("cd", "mu")) def test_nmf_inverse_transform(solver): # Test that NMF.inverse_transform returns close values random_state = np.random.RandomState(0) A = np.abs(random_state.randn(6, 4)) m = NMF( solver=solver, n_components=4, init="random", random_state=0, max_iter=1000, ) ft = m.fit_transform(A) A_new = m.inverse_transform(ft) assert_array_almost_equal(A, A_new, decimal=2) def test_n_components_greater_n_features(): # Smoke test for the case of more components than features. rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(30, 10)) # FIXME : should be removed in 1.1 init = "random" NMF(n_components=15, random_state=0, tol=1e-2, init=init).fit(A) @pytest.mark.parametrize("solver", ["cd", "mu"]) @pytest.mark.parametrize("alpha_W", (0.0, 1.0)) @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) def test_nmf_sparse_input(solver, alpha_W, alpha_H): # Test that sparse matrices are accepted as input from scipy.sparse import csc_matrix rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 A_sparse = csc_matrix(A) est1 = NMF( solver=solver, n_components=5, init="random", alpha_W=alpha_W, alpha_H=alpha_H, random_state=0, tol=1e-2, ) est2 = clone(est1) W1 = est1.fit_transform(A) W2 = est2.fit_transform(A_sparse) H1 = est1.components_ H2 = est2.components_ assert_array_almost_equal(W1, W2) assert_array_almost_equal(H1, H2) def test_nmf_sparse_transform(): # Test that transform works on sparse data. Issue #2124 rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(3, 2)) A[1, 1] = 0 A = csc_matrix(A) for solver in ("cd", "mu"): model = NMF( solver=solver, random_state=0, n_components=2, max_iter=400, init="nndsvd" ) A_fit_tr = model.fit_transform(A) A_tr = model.transform(A) assert_array_almost_equal(A_fit_tr, A_tr, decimal=1) @pytest.mark.parametrize("init", ["random", "nndsvd"]) @pytest.mark.parametrize("solver", ("cd", "mu")) @pytest.mark.parametrize("alpha_W", (0.0, 1.0)) @pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same")) def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H): # Test that the function is called in the same way, either directly # or through the NMF class rng = np.random.mtrand.RandomState(42) A = np.abs(rng.randn(10, 10)) A[:, 2 * np.arange(5)] = 0 W_nmf, H, _ = non_negative_factorization( A, init=init, solver=solver, alpha_W=alpha_W, alpha_H=alpha_H, random_state=1, tol=1e-2, ) W_nmf_2, _, _ = non_negative_factorization( A, H=H, update_H=False, init=init, solver=solver, alpha_W=alpha_W, alpha_H=alpha_H, random_state=1, tol=1e-2, ) model_class = NMF( init=init, solver=solver, alpha_W=alpha_W, alpha_H=alpha_H, random_state=1, tol=1e-2, ) W_cls = model_class.fit_transform(A) W_cls_2 = model_class.transform(A) assert_array_almost_equal(W_nmf, W_cls, decimal=10) assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10) def test_non_negative_factorization_checking(): A = np.ones((2, 2)) # Test parameters checking is public function nnmf = non_negative_factorization msg = re.escape( "Number of components must be a positive integer; got (n_components=1.5)" ) with pytest.raises(ValueError, match=msg): nnmf(A, A, A, 1.5, init="random") msg = re.escape( "Number of components must be a positive integer; got (n_components='2')" ) with pytest.raises(ValueError, match=msg): nnmf(A, A, A, "2", init="random") msg = re.escape("Negative values in data passed to NMF (input H)") with pytest.raises(ValueError, match=msg): nnmf(A, A, -A, 2, init="custom") msg = re.escape("Negative values in data passed to NMF (input W)") with pytest.raises(ValueError, match=msg): nnmf(A, -A, A, 2, init="custom") msg = re.escape("Array passed to NMF (input H) is full of zeros") with pytest.raises(ValueError, match=msg): nnmf(A, A, 0 * A, 2, init="custom") with ignore_warnings(category=FutureWarning): # TODO remove in 1.2 msg = "Invalid regularization parameter: got 'spam' instead of one of" with pytest.raises(ValueError, match=msg): nnmf(A, A, 0 * A, 2, init="custom", regularization="spam") def _beta_divergence_dense(X, W, H, beta): """Compute the beta-divergence of X and W.H for dense array only. Used as a reference for testing nmf._beta_divergence. """ WH = np.dot(W, H) if beta == 2: return squared_norm(X - WH) / 2 WH_Xnonzero = WH[X != 0] X_nonzero = X[X != 0] np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero) if beta == 1: res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero)) res += WH.sum() - X.sum() elif beta == 0: div = X_nonzero / WH_Xnonzero res = np.sum(div) - X.size - np.sum(np.log(div)) else: res = (X_nonzero ** beta).sum() res += (beta - 1) * (WH ** beta).sum() res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum() res /= beta * (beta - 1) return res def test_beta_divergence(): # Compare _beta_divergence with the reference _beta_divergence_dense n_samples = 20 n_features = 10 n_components = 5 beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0] # initialization rng = np.random.mtrand.RandomState(42) X = rng.randn(n_samples, n_features) np.clip(X, 0, None, out=X) X_csr = sp.csr_matrix(X) W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42) for beta in beta_losses: ref = _beta_divergence_dense(X, W, H, beta) loss = nmf._beta_divergence(X, W, H, beta) loss_csr = nmf._beta_divergence(X_csr, W, H, beta) assert_almost_equal(ref, loss, decimal=7) assert_almost_equal(ref, loss_csr, decimal=7) def test_special_sparse_dot(): # Test the function that computes np.dot(W, H), only where X is non zero. n_samples = 10 n_features = 5 n_components = 3 rng = np.random.mtrand.RandomState(42) X = rng.randn(n_samples, n_features) np.clip(X, 0, None, out=X) X_csr = sp.csr_matrix(X) W = np.abs(rng.randn(n_samples, n_components)) H = np.abs(rng.randn(n_components, n_features)) WH_safe = nmf._special_sparse_dot(W, H, X_csr) WH = nmf._special_sparse_dot(W, H, X) # test that both results have same values, in X_csr nonzero
1 and lambda = 0, then using BAR to compute the free energy difference. As the test is designed so that both endpoints are the same, the free energy difference should be zero. Parameters ---------- topology_proposal : TopologyProposal The topology proposal to test. This must be a null transformation, where topology_proposal.old_system == topology_proposal.new_system ncmc_steps : int, optional, default=50 Number of NCMC switching steps, or 0 for instantaneous switching. NSIGMA_MAX : float, optional, default=6.0 """ # TODO note - this test is not called anywhere else in package # fix test or delete functions = { 'lambda_sterics' : 'lambda', 'lambda_electrostatics' : 'lambda', 'lambda_bonds' : 'lambda', 'lambda_angles' : 'lambda', 'lambda_torsions' : 'lambda' } # Initialize engine from perses.annihilation import NCMCGHMCAlchemicalIntegrator from perses.annihilation.relative import HybridTopologyFactory #The current and "proposed" positions are the same, since the molecule is not changed. factory = HybridTopologyFactory(topology_proposal, positions, positions) forward_integrator = NCMCGHMCAlchemicalIntegrator(temperature, factory.hybrid_system, functions, nsteps=ncmc_nsteps, direction='insert') reverse_integrator = NCMCGHMCAlchemicalIntegrator(temperature, factory.hybrid_system, functions, nsteps=ncmc_nsteps, direction='delete') platform = openmm.Platform.getPlatformByName("Reference") forward_context = openmm.Context(factory.hybrid_system, forward_integrator, platform) reverse_context = openmm.Context(factory.hybrid_system, reverse_integrator, platform) # Make sure that old system and new system are identical. if not (topology_proposal.old_system == topology_proposal.new_system): raise Exception("topology_proposal must be a null transformation for this test (old_system == new_system)") for (k,v) in topology_proposal.new_to_old_atom_map.items(): if k != v: raise Exception("topology_proposal must be a null transformation for this test (retailed atoms must map onto themselves)") nequil = 5 # number of equilibration iterations niterations = 50 # number of round-trip switching trials logP_work_n_f = np.zeros([niterations], np.float64) for iteration in range(nequil): positions = simulate_hybrid(factory.hybrid_system,functions, 0.0, factory.hybrid_positions) #do forward switching: for iteration in range(niterations): # Equilibrate positions = simulate_hybrid(factory.hybrid_system,functions, 0.0, factory.hybrid_positions) # Check that positions are not NaN if(np.any(np.isnan(positions / unit.angstroms))): raise Exception("Positions became NaN during equilibration") # Hybrid NCMC forward_integrator.reset() forward_context.setPositions(positions) forward_integrator.step(ncmc_nsteps) logP_work = forward_integrator.getTotalWork(forward_context) # Check that positions are not NaN if(np.any(np.isnan(positions / unit.angstroms))): raise Exception("Positions became NaN on Hybrid NCMC switch") # Store log probability associated with work logP_work_n_f[iteration] = logP_work logP_work_n_r = np.zeros([niterations], np.float64) for iteration in range(nequil): positions = simulate_hybrid(factory.hybrid_system,functions, 1.0, factory.hybrid_positions) #do forward switching: for iteration in range(niterations): # Equilibrate positions = simulate_hybrid(factory.hybrid_system,functions, 1.0, factory.hybrid_positions) # Check that positions are not NaN if(np.any(np.isnan(positions / unit.angstroms))): raise Exception("Positions became NaN during equilibration") # Hybrid NCMC reverse_integrator.reset() reverse_context.setPositions(positions) reverse_integrator.step(ncmc_nsteps) logP_work = reverse_integrator.getTotalWork(forward_context) # Check that positions are not NaN if(np.any(np.isnan(positions / unit.angstroms))): raise Exception("Positions became NaN on Hybrid NCMC switch") # Store log probability associated with work logP_work_n_r[iteration] = logP_work work_f = - logP_work_n_f work_r = - logP_work_n_r from pymbar import BAR [df, ddf] = BAR(work_f, work_r) print("df = %12.6f +- %12.5f kT" % (df, ddf)) if (abs(df) > NSIGMA_MAX * ddf): msg = 'Delta F (%d steps switching) = %f +- %f kT; should be within %f sigma of 0\n' % (ncmc_nsteps, df, ddf, NSIGMA_MAX) msg += 'logP_work_n:\n' msg += str(work_f) + '\n' msg += str(work_r) + '\n' raise Exception(msg) def check_hybrid_null_elimination(topology_proposal, positions, new_positions, ncmc_nsteps=50, NSIGMA_MAX=6.0, geometry=False): """ Test alchemical elimination engine on null transformations, where some atoms are deleted and then reinserted in a cycle. Parameters ---------- topology_proposal : TopologyProposal The topology proposal to test. This must be a null transformation, where topology_proposal.old_system == topology_proposal.new_system ncmc_steps : int, optional, default=50 Number of NCMC switching steps, or 0 for instantaneous switching. NSIGMA_MAX : float, optional, default=6.0 Number of standard errors away from analytical solution tolerated before Exception is thrown geometry : bool, optional, default=None If True, will also use geometry engine in the middle of the null transformation. """ # TODO note - this test is not called anywhere else in package # fix test or delete functions = { 'lambda_sterics' : 'lambda', 'lambda_electrostatics' : 'lambda', 'lambda_bonds' : 'lambda', 'lambda_angles' : 'lambda', 'lambda_torsions' : 'lambda' } # Initialize engine from perses.annihilation.ncmc_switching import NCMCHybridEngine ncmc_engine = NCMCHybridEngine(temperature=temperature, functions=functions, nsteps=ncmc_nsteps) # Make sure that old system and new system are identical. # if not (topology_proposal.old_system == topology_proposal.new_system): # raise Exception("topology_proposal must be a null transformation for this test (old_system == new_system)") # for (k,v) in topology_proposal.new_to_old_atom_map.items(): # if k != v: # raise Exception("topology_proposal must be a null transformation for this test (retailed atoms must map onto themselves)") nequil = 5 # number of equilibration iterations niterations = 50 # number of round-trip switching trials logP_work_n = np.zeros([niterations], np.float64) for iteration in range(nequil): [positions, velocities] = simulate(topology_proposal.old_system, positions) for iteration in range(niterations): # Equilibrate [positions, velocities] = simulate(topology_proposal.old_system, positions) # Check that positions are not NaN if(np.any(np.isnan(positions / unit.angstroms))): raise Exception("Positions became NaN during equilibration") # Hybrid NCMC from old to new [_, new_old_positions, logP_work, logP_energy] = ncmc_engine.integrate(topology_proposal, positions, positions) # Check that positions are not NaN if(np.any(np.isnan(positions / unit.angstroms))): raise Exception("Positions became NaN on Hybrid NCMC switch") # Store log probability associated with work logP_work_n[iteration] = logP_work #print("Iteration %5d : NCMC work %16.8f kT | NCMC energy %16.8f kT" % (iteration, logP_work, logP_energy)) # Check free energy difference is withing NSIGMA_MAX standard errors of zero. work_n = - logP_work_n from pymbar import EXP [df, ddf] = EXP(work_n) print("df = %12.6f +- %12.5f kT" % (df, ddf)) if (abs(df) > NSIGMA_MAX * ddf): msg = 'Delta F (%d steps switching) = %f +- %f kT; should be within %f sigma of 0\n' % (ncmc_nsteps, df, ddf, NSIGMA_MAX) msg += 'logP_work_n:\n' msg += str(logP_work_n) + '\n' raise Exception(msg) # TODO: Re-enable this test once PointMutationEngine can return size of chemical space #@nottest #removing peptide mutations for the itme-being @skipIf(istravis, "Skip mutations") def test_alchemical_elimination_mutation(): """ Test alchemical elimination for mutations. """ ff_filename = "amber99sbildn.xml" proposal_metadata = {'ffxmls':[ff_filename]} # Create peptide. from openmmtools import testsystems testsystem = testsystems.AlanineDipeptideVacuum() [topology, system, positions] = [testsystem.topology, testsystem.system, testsystem.positions] # Create forcefield. ff = app.ForceField(ff_filename) chain_id = '1' allowed_mutations = [[('2','GLY')]] from perses.rjmc.topology_proposal import SystemGenerator system_generator = SystemGenerator([ff_filename]) # Create a topology proposal fro mutating ALA -> GLY from perses.rjmc.topology_proposal import PointMutationEngine proposal_engine = PointMutationEngine(topology, system_generator, chain_id, proposal_metadata=proposal_metadata, allowed_mutations=allowed_mutations) topology_proposal = proposal_engine.propose(system, topology) # Modify atom mapping to get a null transformation. from perses.rjmc.topology_proposal import TopologyProposal new_to_old_atom_map = { atom1 : atom1 for atom1 in topology_proposal.new_to_old_atom_map } topology_proposal = TopologyProposal( new_topology=topology_proposal.old_topology, new_system=topology_proposal.old_system, old_topology=topology_proposal.old_topology, old_system=topology_proposal.old_system, old_chemical_state_key='AA', new_chemical_state_key='AG', logp_proposal=0.0, new_to_old_atom_map=new_to_old_atom_map, metadata=topology_proposal.metadata) for ncmc_nsteps in [0, 1, 2, 50]: f = partial(check_alchemical_null_elimination, topology_proposal, positions, ncmc_nsteps=ncmc_nsteps) f.description = "Testing alchemical null transformation of ALA sidechain in alanine dipeptide with %d NCMC steps" % ncmc_nsteps yield f @skipIf(istravis, "Skip neq switching") def test_ncmc_alchemical_integrator_stability_molecules(): """ Test NCMCAlchemicalIntegrator """ molecule_names = ['pentane', 'biphenyl', 'imatinib'] #if os.environ.get("TRAVIS", None) == 'true': # molecule_names = ['pentane'] for molecule_name in molecule_names: from perses.utils.openeye import createSystemFromIUPAC [molecule, system, positions, topology] = createSystemFromIUPAC(molecule_name) # Eliminate half of the molecule # TODO: Use a more rigorous scheme to make sure we are really cutting the molecule in half and not just eliminating hydrogens or something. alchemical_atoms = [ index for index in range(int(system.getNumParticles()/2)) ] # Create an alchemically-modified system. from alchemy import AbsoluteAlchemicalFactory alchemical_factory = AbsoluteAlchemicalFactory(system, ligand_atoms=alchemical_atoms, annihilate_electrostatics=True, annihilate_sterics=True) # Return the alchemically-modified system in fully-interacting form. alchemical_system = alchemical_factory.createPerturbedSystem() # Create an NCMC switching integrator. from perses.annihilation.ncmc_switching import NCMCVVAlchemicalIntegrator temperature = 300.0 * unit.kelvin nsteps = 10 # number of steps to run integration for functions = { 'lambda_sterics' : 'lambda', 'lambda_electrostatics' : 'lambda^0.5', 'lambda_torsions' : 'lambda', 'lambda_angles' : 'lambda^2' } ncmc_integrator = NCMCVVAlchemicalIntegrator(temperature, alchemical_system, functions, direction='delete', nsteps=nsteps, timestep=1.0*unit.femtoseconds) # Create a Context context = openmm.Context(alchemical_system, ncmc_integrator) context.setPositions(positions) # Run the integrator ncmc_integrator.step(nsteps) # Check positions are finite positions = context.getState(getPositions=True).getPositions(asNumpy=True) if np.isnan(np.any(positions / positions.unit)): raise Exception('NCMCAlchemicalIntegrator gave NaN positions') if np.isnan(ncmc_integrator.getLogAcceptanceProbability(context)): raise Exception('NCMCAlchemicalIntegrator gave NaN logAcceptanceProbability') del context, ncmc_integrator @skipIf(istravis, "Skip neq switching") def test_ncmc_engine_molecule(): """ Check alchemical elimination for alanine dipeptide in vacuum with 0, 1, 2, and 50 switching steps. """ molecule_names = ['pentane', 'biphenyl', 'imatinib'] #if os.environ.get("TRAVIS", None) == 'true': # molecule_names = ['pentane'] for molecule_name in molecule_names: from perses.utils.openeye import createSystemFromIUPAC [molecule, system, positions, topology] = createSystemFromIUPAC(molecule_name) natoms = system.getNumParticles() # DEBUG print(molecule_name) from openeye import oechem
<filename>of13/parser.py """ Parser of the OpenFlow 1.3 message """ import netaddr from tcpiplib.parser import * from tcpiplib.prints import * from struct import unpack import of13.packet import of13.dissector import of13.prints # ################## OFPT_HELLO ############################ def parse_hello(msg, packet): start = 0 elements = [] # Get all Elements # Each Element has 0 - N bitmaps while len(packet[start:]) > 0: # Get element[] elem = unpack('!HH', packet[start:start+4]) element = of13.packet.ofp_hello.ofp_hello_elem_header() element.type = elem[0] element.length = elem[1] bitmaps_list = [] bitmaps = packet[start+4:start+element.length] start_bit = 0 while len(bitmaps[start_bit:]) > 0: bp = unpack('!HH', packet[start_bit:start_bit+4]) bitmap = of13.packet.ofp_hello.ofp_hello_elem_versionbitmap() bitmap.type = bp[0] bitmap.length = bp[1] bmp = unpack('!L', packet[start_bit+4:]) bitmap.bitmaps = bmp[0] start_bit = start_bit + 4 + bitmap.bitmaps bitmap.bitmaps = bin(bitmap.bitmaps) bitmaps_list.append(bitmap) del bitmap element.versionbitmap = bitmaps_list start += element.length elements.append(element) del element msg.elements = elements return 1 # ################## OFPT_ERROR ############################ def parse_error_msg(msg, packet): of_error = packet[0:4] ofe = unpack('!HH', of_error) ofe_type = ofe[0] ofe_code = ofe[1] msg.error_type, msg.code = of13.dissector.get_ofp_error(ofe_type, ofe_code) return 1 # ################## OFPT_ECHO_REQUEST ############################ def parse_echo_request(msg, packet): length = len(packet) strg = '!%ss' % length msg.data = unpack(strg, packet) return 0 # ################## OFPT_ECHO_REPLY ############################ def parse_echo_reply(msg, packet): length = len(packet) strg = '!%ss' % length msg.data = unpack(strg, packet) return 0 # ################## OFPT_EXPERIMENTER ############################ def parse_experimenter(msg, packet): msg.experimenter = 'To finish this function' + packet return 0 # ################## OFPT_FEATURE_REPLY ############################ def _parse_bitmask(bitmask, array): size = len(array) for i in range(0, size): mask = 2**i aux = bitmask & mask if aux == 0: array.remove(mask) return array def _parse_capabilities(capabilities): caps = [1, 2, 4, 8, 16, 32, 64, 128, 256] return _parse_bitmask(capabilities, caps) def parse_switch_features(msg, packet): of_fres = packet[0:24] ofrs = unpack('!8sLBB2sLL', of_fres) caps = _parse_capabilities(ofrs[5]) msg.datapath_id = ofrs[0] msg.n_buffers = ofrs[1] msg.n_tbls = ofrs[2] msg.auxiliary_id = ofrs[3] msg.pad = ofrs[4] msg.caps = caps msg.reserved = ofrs[6] return 1 # ########## OFPT_GET_CONFIG_REPLY & OFPT_SET_CONFIG ############### def parse_switch_config(msg, packet): options = unpack('!HH', packet[:4]) msg.flag = of13.dissector.get_config_flags(options[0]) msg.miss_send_len = options[1] return 1 # ################## OFPT_PACKET_IN ############################ def parse_packet_in(msg, packet): ofpi = unpack('!LHBBQ', packet[:16]) msg.buffer_id = ofpi[0] msg.total_len = ofpi[1] reason = of13.dissector.get_packet_in_reason(ofpi[2]) msg.reason = reason msg.table_id = ofpi[3] msg.cookie = ofpi[4] next_pos = _parse_matches(msg, packet, 16) msg.pad = unpack('!2s', packet[next_pos:next_pos+2])[0] msg.data = parser.process_data(packet, next_pos+2, msg) return 0 # ################## OFPT_FLOW_REMOVED ############################ def parse_flow_removed(msg, packet): offr = unpack('!QHBBLLHHQQ', packet[:40]) cookie = offr[0] if offr[0] > 0 else 0 msg.cookie = '0x' + format(cookie, '02x') msg.priority = offr[1] msg.reason = of13.dissector.get_flow_removed_reason(offr[2]) msg.table_id = offr[3] msg.duration_sec = offr[4] msg.duration_nsec = offr[5] msg.idle_timeout = offr[6] msg.hard_timeout = offr[7] msg.packet_count = offr[8] msg.byte_count = offr[9] msg.match = _parse_matches(msg, packet, 40) return 0 # ################## OFPT_PORT_STATUS ############################ def _parse_phy_config(config): confs = [1, 4, 32, 64] return _parse_bitmask(config, confs) def _parse_phy_state(state): states = [1, 2, 4] return _parse_bitmask(state, states) def _parse_phy_curr(values): confs = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768] return _parse_bitmask(values, confs) def _parse_phy_ports(packet): phy = unpack('!L4s6s2s16sLLLLLLLL', packet) port_id = of13.dissector.get_phy_port_id(phy[0]) hw_addr = prints.eth_addr(phy[2]) config = _parse_phy_config(phy[5]) state = _parse_phy_state(phy[6]) curr = _parse_phy_curr(phy[7]) advertised = _parse_phy_curr(phy[8]) supported = _parse_phy_curr(phy[9]) peer = _parse_phy_curr(phy[10]) # curr_speed # max_speed port = of13.packet.ofp_port() port.port_id = port_id port.pad = phy[1] port.hw_addr = hw_addr port.pad2 = phy[3] port.name = phy[4] port.config = config port.state = state port.curr = curr port.advertised = advertised port.supported = supported port.peer = peer # port.curr_speed = None # port.max_speed = None return port def parse_port_status(msg, packet): ofps_raw = packet[0:8] ofps = unpack('!B7s', ofps_raw) reason = of13.dissector.get_port_status_reason(ofps[0]) msg.reason = reason msg.pad = ofps[1] msg.desc = _parse_phy_ports(packet[8:64]) return 0 # ################## OFPT_PACKET_OUT ############################ def parse_packet_out(msg, packet): ofpo = unpack('!LLH6s', packet[:16]) actions = _parse_actions(packet[16:20]) data = parser.process_data(packet, 20, msg) msg.buffer_id = ofpo[0] msg.in_port = ofpo[1] msg.actions_len = ofpo[2] msg.pad = ofpo[3] msg.actions = actions msg.data = data return 0 # ################## OFPT_FLOW_MOD ############################ # def parse_ipv6_extension_header(extensions): # still useful? # bits = [1, 2, 4, 8, 16, 32, 64, 128, 256] # return _parse_bitmask(extensions, bits) def _parse_action_output(packet, start, a_type, a_length, offset=12): # Output has 12 bytes raw2 = unpack('!LH6s', packet[start:start + offset]) action = of13.packet.ofp_action_set_output(a_type, a_length) action.port = raw2[0] action.max_len = raw2[1] action.pad = raw2[2] return action, offset def _parser_action_set_vlan_vid(packet, start, a_type, a_length, offset=4): # Set_vlan_vid has 4 bytes raw2 = unpack('!H2s', packet[start:start + offset]) action = of13.packet.ofp_action_set_vlan_vid(a_type, a_length) action.vlan_vid = raw2[0] action.pad = raw2[1] return action, offset def _parse_actions(packet): actions = [] start = 0 while len(packet[start:]) > 0: raw = unpack('!HH', packet[start:start + 4]) action_type = raw[0] action_length = raw[1] start += 4 action_types = {0: _parse_action_output, 1: _parser_action_set_vlan_vid} try: action, offset = action_types[action_type](packet, start, action_type, action_length) except KeyError: return 0 actions.append(action) start += offset return actions def _inst_goto_table(packet, start, instruction): raw = unpack('!B3s', packet[start:start+4]) instruction.table_id = raw[0] instruction.pad = raw[1] def _inst_write_metadata(packet, start, instruction): raw = unpack('!4s12s12s', packet[start:start + 28]) instruction.pad = raw[0] instruction.metadata = raw[1] instruction.metadata_mask = raw[2] def _inst_write_apply_clear_actions(packet, instruction): raw = unpack('!4s', packet[:4]) instruction.pad = raw[0] instruction.actions = _parse_actions(packet[4:]) def _inst_meter(packet, start, instruction): raw = unpack('!L', packet[start:start + 4]) instruction.meter_id = raw[0] def _inst_experimenter(packet, start, instruction): raw = unpack('!L', packet[start:start + 4]) instruction.experimenter_id = raw[0] def _parse_instructions(packet, start): instructions = [] while len(packet[start:]) > 0: instruction = unpack('!HH', packet[start:start+4]) i_type = instruction[0] i_len = instruction[1] # Call proper instruction if i_type == 1: instruction = of13.packet.ofp_instruction_go_to(i_type, i_len) _inst_goto_table(packet, start, instruction) elif i_type == 2: instruction = of13.packet.ofp_instruction_write_metadata(i_type, i_len) _inst_write_metadata(packet, start, instruction) elif i_type in [3, 4, 5]: instruction = of13.packet.ofp_instruction_wac_actions(i_type, i_len) _inst_write_apply_clear_actions(packet[start + 4:], instruction) elif i_type == 6: instruction = of13.packet.ofp_instruction_meter(i_type, i_len) _inst_meter(packet, start, instruction) else: instruction = of13.packet.ofp_instruction_experimenter(i_type, i_len) _inst_experimenter(packet, start, instruction) instructions.append(instruction) del instruction start = start + i_len return instructions def unpack_oxm_payload(oxm_tlv, packet_oxm_payload): payload = of13.packet.ofp_match_oxm_payload() len_packet_oxm_content = len(packet_oxm_payload) strg = '' if oxm_tlv.hasmask == 0: if len_packet_oxm_content == 1: strg = '!B' elif len_packet_oxm_content == 2: strg = '!H' elif len_packet_oxm_content == 3: strg = '!3s' elif len_packet_oxm_content == 4: strg = '!L' elif len_packet_oxm_content == 6: strg = '!6s' elif len_packet_oxm_content == 8: strg = '!Q' elif len_packet_oxm_content == 16: net, host = unpack('!QQ', packet_oxm_payload) ipv6 = ((net << 64) | host) payload.value = netaddr.IPAddress(ipv6) return payload payload.value = unpack(strg, packet_oxm_payload)[0] else: if len_packet_oxm_content == 2: strg = '!BB' elif len_packet_oxm_content == 4: strg = '!HH' elif len_packet_oxm_content == 6: strg = '!3s3s' elif len_packet_oxm_content == 8: strg = '!LL' elif len_packet_oxm_content == 12: strg = '!6s6s' elif len_packet_oxm_content == 16: strg = '!QQ' elif len_packet_oxm_content == 32: net, host, net1, host1 = unpack('!QQQQ', packet_oxm_payload) host = (net << 64) | host subnet = (net1 << 64) | host1 payload.value = netaddr.IPAddress(host) payload.mask = netaddr.IPAddress(subnet) return payload payload.value, payload.mask = unpack(strg, packet_oxm_payload) return payload def _parse_matches(match, packet, start): match.type, match.length = unpack('!HH', packet[start:start + 4]) length_oxm = match.length - 4 match.pad = (match.length + 7)/8*8 - match.length start += 4 oxms = packet[start:start+length_oxm] start_2 = 0 oxm_array = [] while len(oxms[start_2:]) > 0: oxm_raw = unpack('!L', oxms[start_2:start_2 + 4]) oxm_tlv = of13.packet.ofp_match_oxm_fields() oxm_tlv.oxm_class = (oxm_raw[0] >> 16) oxm_tlv.field = ((oxm_raw[0] >> 9) & 0x7f) oxm_tlv.hasmask = ((oxm_raw[0] >> 8) & 1) oxm_tlv.length = (oxm_raw[0] & 0xff) packet_oxm_payload = oxms[start_2+4:start_2 + 4 + oxm_tlv.length] oxm_tlv.payload = unpack_oxm_payload(oxm_tlv, packet_oxm_payload) oxm_array.append(oxm_tlv) start_2 = start_2 + 4 + oxm_tlv.length del oxm_tlv match.oxm_fields = oxm_array # Return offset for Instructions return start + length_oxm + match.pad def parse_flow_mod(msg, packet): ofmod = unpack('!QQBBHHHLLLH2s', packet[:40]) cookie = ofmod[0] if ofmod[0] > 0 else 0 cookie_mask = ofmod[1] if ofmod[1] > 0 else 0 msg.cookie = '0x' + format(cookie, '02x') msg.cookie_mask = '0x' + format(cookie_mask, '02x') msg.buffer_id = '0x' + format(ofmod[7], '02x') msg.out_port = 4294967040 if ofmod[8] > 4294967040 else ofmod[8] msg.table_id = ofmod[2] msg.command = ofmod[3] msg.idle_timeout = ofmod[4] msg.hard_timeout = ofmod[5] msg.priority = ofmod[6] msg.out_group = ofmod[9] msg.flags = ofmod[10] msg.pad = ofmod[11] instructions_start = _parse_matches(msg.match, packet, 40) msg.instructions = _parse_instructions(packet, instructions_start) return 1 # ################## OFPT_GROUP_MOD ############################ def _parse_buckets(packet): ofpb = unpack('!HHLL4s', packet[:16]) actions = _parse_actions(packet[16:20]) bucket = of13.packet.ofp_bucket() bucket.len = ofpb[0] bucket.weight = ofpb[1] bucket.watch_port = ofpb[2] bucket.watch_group = ofpb[3] bucket.pad = ofpb[3] bucket.actions = actions return bucket def parse_group_mod(msg, packet): ofgm = unpack('!HBBL', packet[:8]) command = of13.dissector.get_group_mod_command(ofgm[0]) group_type = of13.dissector.get_group_mod_type(ofgm[1]) msg.command = command msg.group_type = group_type msg.pad = ofgm[2] msg.group_id =
dest . afi = LISP_AFI_IPV4 if 98 - 98: Ii1I if 92 - 92: iII111i % i1IIi . OoOoOO00 * iIii1I11I1II1 if 17 - 17: OoooooooOO . OOooOOo if 32 - 32: OoOoOO00 . oO0o + O0 i111 = struct . pack ( "H" , 0 ) ooOO = struct . calcsize ( "HHIBB" ) oO0Ooo = struct . calcsize ( "H" ) packet = packet [ : ooOO ] + i111 + packet [ ooOO + oO0Ooo : ] if 49 - 49: II111iiii . OoooooooOO packet = packet [ OO00OO : : ] packet = self . source . unpack_address ( packet ) if ( packet == None ) : return ( None ) packet = self . dest . unpack_address ( packet ) if ( packet == None ) : return ( None ) if 30 - 30: OoO0O00 / i11iIiiIii - OoO0O00 / ooOoO0o + iIii1I11I1II1 + i1IIi if 99 - 99: OOooOOo * I1IiiI + oO0o % oO0o % OOooOOo * IiII if ( I1OO == 6 ) : OO00OO = struct . calcsize ( "IHBB" ) if ( len ( packet ) < OO00OO ) : return ( None ) if 98 - 98: OOooOOo I1I111 , I1i , i111 , oOO0oOo0OOoOO = struct . unpack ( "IHBB" , packet [ : OO00OO ] ) self . length = socket . ntohs ( I1i ) self . protocol = i111 self . ttl = oOO0oOo0OOoOO self . source . afi = self . dest . afi = LISP_AFI_IPV6 if 97 - 97: o0oOOo0O0Ooo packet = packet [ OO00OO : : ] packet = self . source . unpack_address ( packet ) if ( packet == None ) : return ( None ) packet = self . dest . unpack_address ( packet ) if ( packet == None ) : return ( None ) if 35 - 35: ooOoO0o + i11iIiiIii if 82 - 82: i11iIiiIii + I11i + iII111i % I1IiiI self . source . mask_len = self . source . host_mask_len ( ) self . dest . mask_len = self . dest . host_mask_len ( ) if 84 - 84: oO0o % OOooOOo OO00OO = struct . calcsize ( "HHHH" ) if ( len ( packet ) < OO00OO ) : return ( None ) if 25 - 25: i11iIiiIii * OoOoOO00 + i11iIiiIii . i1IIi IiIIi1I1I11Ii , oOo0OOOOOO , I1i , i1 = struct . unpack ( "HHHH" , packet [ : OO00OO ] ) self . udp_sport = socket . ntohs ( IiIIi1I1I11Ii ) self . udp_dport = socket . ntohs ( oOo0OOOOOO ) self . udp_length = socket . ntohs ( I1i ) self . udp_checksum = socket . ntohs ( i1 ) packet = packet [ OO00OO : : ] return ( packet ) if 83 - 83: I1IiiI if 90 - 90: II111iiii if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo if 71 - 71: i1IIi / I11i if 14 - 14: OoooooooOO if 99 - 99: o0oOOo0O0Ooo * o0oOOo0O0Ooo if 6 - 6: i11iIiiIii + oO0o % ooOoO0o + i11iIiiIii - OOooOOo if 12 - 12: iII111i . oO0o % IiII * OoooooooOO . IiII if 15 - 15: I1IiiI . I1IiiI / i11iIiiIii if 17 - 17: iIii1I11I1II1 / OoO0O00 - II111iiii if 46 - 46: iIii1I11I1II1 * oO0o / i11iIiiIii + II111iiii + I11i if 30 - 30: O0 * IiII - I1Ii111 % O0 * Ii1I if 29 - 29: I1ii11iIi11i % I1ii11iIi11i % Ii1I + ooOoO0o % iIii1I11I1II1 if 41 - 41: I1ii11iIi11i % I1Ii111 if 37 - 37: Oo0Ooo . I1IiiI % OoOoOO00 . OoO0O00 - Oo0Ooo / OoO0O00 if 34 - 34: i11iIiiIii + OoO0O00 + i11iIiiIii . IiII % O0 if 64 - 64: o0oOOo0O0Ooo . iIii1I11I1II1 if 86 - 86: ooOoO0o - I11i . iIii1I11I1II1 - iIii1I11I1II1 if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00 if 60 - 60: oO0o . OoooooooOO if 40 - 40: I11i if 44 - 44: ooOoO0o if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i if 97 - 97: I1IiiI / o0oOOo0O0Ooo if 13 - 13: I1ii11iIi11i if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i if 85 - 85: OoO0O00 + II111iiii if 87 - 87: OoO0O00 if 93 - 93: OoooooooOO if 80 - 80: o0oOOo0O0Ooo if 3 - 3: i11iIiiIii / OOooOOo + oO0o if 10 - 10: OoO0O00 . OoO0O00 + O0 if 13 - 13: i1IIi . I1IiiI if 45 - 45: ooOoO0o % I11i if 37 - 37: iII111i if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00 if 55 - 55: OOooOOo if 5 - 5: I11i / OoOoOO00 if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00 if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0 if 62 - 62: o0oOOo0O0Ooo % II111iiii if 22 - 22: oO0o - o0oOOo0O0Ooo if 89 - 89: OOooOOo if 34 - 34: iII111i . OOooOOo if 13 - 13: OoO0O00 * OOooOOo + oO0o if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I if 92 - 92: i1IIi + OoO0O00 * I11i if 70 - 70: Oo0Ooo if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO if 51 - 51: O0 - iII111i if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO if 85 - 85: iII111i + OOooOOo if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi if 53 - 53: Ii1I - OOooOOo if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1 if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00 if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo if 99 - 99: I1ii11iIi11i % Oo0Ooo if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1 if 84 - 84: IiII if 42 - 42: O0 . I1Ii111 / I11i if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI if 76 - 76: O0 + II111iiii * OoO0O00 if 1 - 1: o0oOOo0O0Ooo if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o if 81 - 81: o0oOOo0O0Ooo * OoO0O00 if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi if 67 - 67: Ii1I if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO if 27 - 27: II111iiii
<reponame>ace-ecosystem/ACE # vim: sw=4:ts=4:et # # instance types # INSTANCE_TYPE_PRODUCTION = 'PRODUCTION' INSTANCE_TYPE_QA = 'QA' INSTANCE_TYPE_DEV = 'DEV' INSTANCE_TYPE_UNITTEST = 'UNITTEST' # # required fields for every alert # F_UUID = 'uuid' F_ID = 'id' F_TOOL = 'tool' F_TOOL_INSTANCE = 'tool_instance' F_TYPE = 'type' F_DESCRIPTION = 'description' F_EVENT_TIME = 'event_time' F_DETAILS = 'details' F_DISPOSITION = 'disposition' #F_COMMENTS = 'comments' # # observable types # # # WARNING # XXX NOTE # when you add a new observable type you ALSO need to edit saq/observables/__init__.py # and add a matching entry to the _OBSERVABLE_TYPE_MAPPING dictionary F_ASSET = 'asset' F_CIDR = 'cidr' F_COMMAND_LINE = 'command_line' F_DLP_INCIDENT = 'dlp_incident' F_EMAIL_ADDRESS = 'email_address' F_EMAIL_CONVERSATION = 'email_conversation' F_EMAIL_DELIVERY = 'email_delivery' F_EMAIL_SUBJECT = 'email_subject' F_EXABEAM_SESSION = 'exabeam_session' F_EXTERNAL_UID = 'external_uid' F_FILE = 'file' F_FILE_LOCATION = 'file_location' F_FILE_NAME = 'file_name' F_FILE_PATH = 'file_path' F_FIREEYE_UUID = 'fireeye_uuid' F_FQDN = 'fqdn' F_HOSTNAME = 'hostname' F_HTTP_REQUEST = 'http_request' F_INDICATOR = 'indicator' F_IPV4 = 'ipv4' F_IPV4_CONVERSATION = 'ipv4_conversation' F_IPV4_FULL_CONVERSATION = 'ipv4_full_conversation' F_MAC_ADDRESS = 'mac_address' F_MD5 = 'md5' F_MESSAGE_ID = 'message_id' F_O365_FILE = 'o365_file' F_O365_FILE_CONVERSATION = 'o365_file_conversation' F_PCAP = 'pcap' F_PROCESS_GUID = 'process_guid' F_CBC_PROCESS_GUID = 'cbc_process_guid' F_SHA1 = 'sha1' F_SHA256 = 'sha256' F_SNORT_SIGNATURE = 'snort_sig' F_SUSPECT_FILE = 'suspect_file' # DEPRECATED F_TEST = 'test' F_URL = 'url' F_USER = 'user' F_YARA = 'yara' F_YARA_RULE = 'yara_rule' OBSERVABLE_DESCRIPTIONS = { F_ASSET: 'a F_IPV4 identified to be a managed asset', F_CIDR: 'IPv4 range in CIDR notation', F_COMMAND_LINE: 'command line options to a command that was executed', F_DLP_INCIDENT: 'id of a symantec dlp incident', F_EMAIL_ADDRESS: 'email address', F_EMAIL_CONVERSATION: 'a conversation between a source email address (MAIL FROM) and a destination email address (RCPT TO)', F_EMAIL_DELIVERY: 'a delivery of a an email to a target mailbox', F_EMAIL_SUBJECT: 'the subject of an email', F_EXABEAM_SESSION: 'id of an exabeam session', F_EXTERNAL_UID: 'unique identifier for something that is stored in an external tool. Format: tool_name:uid', F_FILE: 'path to an attached file', F_FILE_LOCATION: 'the location of file with format hostname@full_path', F_FILE_NAME: 'a file name (no directory path)', F_FILE_PATH: 'a file path', F_FIREEYE_UUID: 'UUID used to identify a FireEye alert', F_FQDN: 'fully qualified domain name', F_HOSTNAME: 'host or workstation name', F_HTTP_REQUEST: 'a single HTTP request', F_INDICATOR: 'indicator id', F_IPV4: 'IP address (version 4)', F_IPV4_CONVERSATION: 'two F_IPV4 that were communicating formatted as aaa.bbb.ccc.ddd_aaa.bbb.ccc.ddd', F_IPV4_FULL_CONVERSATION: 'two F_IPV4 that were communicating formatted as src_ipv4:src_port:dest_ipv4:dest_port', F_MAC_ADDRESS: 'network card mac address', F_MD5: 'MD5 hash', F_MESSAGE_ID: 'email Message-ID', F_O365_FILE: 'graph api path to a file in o365', F_O365_FILE_CONVERSATION: 'two users sharing o365 files', F_PCAP: 'path to a pcap formatted file *** DEPRECATED (use F_FILE instead)', F_PROCESS_GUID: 'Carbon Black Response global process identifier', F_CBC_PROCESS_GUID: 'Carbon Black Cloud process identifier', F_SHA1: 'SHA1 hash', F_SHA256: 'SHA256 hash', F_SNORT_SIGNATURE: 'snort signature ID', F_SUSPECT_FILE: 'path to an attached file that might be malicious *** DEPRECATED (use directives instead)', F_TEST: 'unit testing observable', F_URL: 'a URL', F_USER: 'an NT user ID identified to have used a given asset in the given period of time', F_YARA: 'yara scan result *** DEPRECATED (use F_YARA_RULE instead)', F_YARA_RULE: 'yara rule name', } VALID_OBSERVABLE_TYPES = sorted([ F_ASSET, F_CIDR, F_COMMAND_LINE, F_DLP_INCIDENT, F_EMAIL_ADDRESS, F_EMAIL_CONVERSATION, F_EMAIL_DELIVERY, F_EMAIL_SUBJECT, F_EXABEAM_SESSION, F_EXTERNAL_UID, F_FILE, F_FILE_LOCATION, F_FILE_NAME, F_FILE_PATH, F_FIREEYE_UUID, F_FQDN, F_HOSTNAME, F_HTTP_REQUEST, F_INDICATOR, F_IPV4, F_IPV4_CONVERSATION, F_IPV4_FULL_CONVERSATION, F_MAC_ADDRESS, F_MD5, F_MESSAGE_ID, F_O365_FILE, F_O365_FILE_CONVERSATION, F_PCAP, F_PROCESS_GUID, F_CBC_PROCESS_GUID, F_SHA1, F_SHA256, F_SNORT_SIGNATURE, F_SUSPECT_FILE, F_TEST, F_URL, F_USER, F_YARA, F_YARA_RULE, ]) DEPRECATED_OBSERVABLES = sorted([ F_CIDR, F_PCAP, F_HTTP_REQUEST, F_SUSPECT_FILE, F_YARA ]) # utility functions to work with F_IPV4_FULL_CONVERSATION types def parse_ipv4_full_conversation(f_ipv4_fc): return f_ipv4_fc.split(':', 4) def create_ipv4_full_conversation(src, src_port, dst, dst_port): return '{}:{}:{}:{}'.format(src.strip(), src_port, dst.strip(), dst_port) # utility functions to work with F_IPV4_CONVERSATION types def parse_ipv4_conversation(f_ipv4_c): return f_ipv4_c.split('_', 2) def create_ipv4_conversation(src, dst): return '{}_{}'.format(src.strip(), dst.strip()) # utility functions to work with F_EMAIL_CONVERSATION types def parse_email_conversation(f_ipv4_c): result = f_ipv4_c.split('|', 2) # did parsing fail? if len(result) != 2: return f_ipv4_c, '' return result def create_email_conversation(mail_from, rcpt_to): return '{}|{}'.format(mail_from.strip(), rcpt_to.strip()) def parse_file_location(file_location): return file_location.split('@', 1) def create_file_location(hostname, full_path): return '{}@{}'.format(hostname.strip(), full_path) def parse_email_delivery(email_delivery): return email_delivery.split('|', 1) def create_email_delivery(message_id, mailbox): return '{}|{}'.format(message_id.strip(), mailbox.strip()) # the expected format of the event_time of an alert event_time_format_tz = '%Y-%m-%d %H:%M:%S %z' # the old time format before we started storing timezones event_time_format = '%Y-%m-%d %H:%M:%S' # the "ISO 8601" format that ACE uses to store datetime objects in JSON with a timezone # NOTE this is the preferred format event_time_format_json_tz = '%Y-%m-%dT%H:%M:%S.%f%z' # the "ISO 8601" format that ACE uses to store datetime objects in JSON without a timezone event_time_format_json = '%Y-%m-%dT%H:%M:%S.%f' # alert dispositions DISPOSITION_FALSE_POSITIVE = 'FALSE_POSITIVE' DISPOSITION_IGNORE = 'IGNORE' DISPOSITION_UNKNOWN = 'UNKNOWN' DISPOSITION_REVIEWED = 'REVIEWED' DISPOSITION_GRAYWARE = 'GRAYWARE' DISPOSITION_POLICY_VIOLATION = 'POLICY_VIOLATION' DISPOSITION_RECONNAISSANCE = 'RECONNAISSANCE' DISPOSITION_WEAPONIZATION = 'WEAPONIZATION' DISPOSITION_DELIVERY = 'DELIVERY' DISPOSITION_EXPLOITATION = 'EXPLOITATION' DISPOSITION_INSTALLATION = 'INSTALLATION' DISPOSITION_COMMAND_AND_CONTROL = 'COMMAND_AND_CONTROL' DISPOSITION_EXFIL = 'EXFIL' DISPOSITION_DAMAGE = 'DAMAGE' DISPOSITION_INSIDER_DATA_CONTROL = 'INSIDER_DATA_CONTROL' DISPOSITION_INSIDER_DATA_EXFIL = 'INSIDER_DATA_EXFIL' DISPOSITION_APPROVED_BUSINESS = 'APPROVED_BUSINESS' DISPOSITION_APPROVED_PERSONAL = 'APPROVED_PERSONAL' # disposition to label mapping # each disposition has a specific CSS class assigned to it DISPOSITION_CSS_MAPPING = { None: 'special', # when no disposition has been set yet DISPOSITION_FALSE_POSITIVE: 'success', DISPOSITION_IGNORE: 'default', DISPOSITION_UNKNOWN: 'info', DISPOSITION_REVIEWED: 'info', DISPOSITION_GRAYWARE: 'info', DISPOSITION_POLICY_VIOLATION: 'warning', DISPOSITION_RECONNAISSANCE: 'warning', DISPOSITION_WEAPONIZATION: 'danger', DISPOSITION_DELIVERY: 'danger', DISPOSITION_EXPLOITATION: 'danger', DISPOSITION_INSTALLATION: 'danger', DISPOSITION_COMMAND_AND_CONTROL: 'danger', DISPOSITION_EXFIL: 'danger', DISPOSITION_DAMAGE: 'danger', DISPOSITION_INSIDER_DATA_CONTROL: 'warning', DISPOSITION_INSIDER_DATA_EXFIL: 'danger', DISPOSITION_APPROVED_BUSINESS: 'success', DISPOSITION_APPROVED_PERSONAL: 'success', } VALID_ALERT_DISPOSITIONS = [ DISPOSITION_FALSE_POSITIVE, DISPOSITION_IGNORE, DISPOSITION_UNKNOWN, DISPOSITION_REVIEWED, DISPOSITION_GRAYWARE, DISPOSITION_POLICY_VIOLATION, DISPOSITION_RECONNAISSANCE, DISPOSITION_WEAPONIZATION, DISPOSITION_DELIVERY, DISPOSITION_EXPLOITATION, DISPOSITION_INSTALLATION, DISPOSITION_COMMAND_AND_CONTROL, DISPOSITION_EXFIL, DISPOSITION_DAMAGE, DISPOSITION_INSIDER_DATA_CONTROL, DISPOSITION_INSIDER_DATA_EXFIL, DISPOSITION_APPROVED_BUSINESS, DISPOSITION_APPROVED_PERSONAL, ] IGNORE_ALERT_DISPOSITIONS = [ DISPOSITION_IGNORE, DISPOSITION_UNKNOWN, DISPOSITION_REVIEWED ] BENIGN_ALERT_DISPOSITIONS = [ DISPOSITION_FALSE_POSITIVE, DISPOSITION_APPROVED_BUSINESS, DISPOSITION_APPROVED_PERSONAL, ] MAL_ALERT_DISPOSITIONS = [ DISPOSITION_GRAYWARE, DISPOSITION_POLICY_VIOLATION, DISPOSITION_RECONNAISSANCE, DISPOSITION_WEAPONIZATION, DISPOSITION_DELIVERY, DISPOSITION_EXPLOITATION, DISPOSITION_INSTALLATION, DISPOSITION_COMMAND_AND_CONTROL, DISPOSITION_EXFIL, DISPOSITION_DAMAGE, DISPOSITION_INSIDER_DATA_CONTROL, DISPOSITION_INSIDER_DATA_EXFIL ] DISPOSITION_RANK = { None: -2, DISPOSITION_IGNORE: -1, DISPOSITION_FALSE_POSITIVE: 0, DISPOSITION_APPROVED_BUSINESS: 1, DISPOSITION_APPROVED_PERSONAL: 2, DISPOSITION_UNKNOWN: 3, DISPOSITION_REVIEWED: 4, DISPOSITION_GRAYWARE: 5, DISPOSITION_POLICY_VIOLATION: 6, DISPOSITION_RECONNAISSANCE: 7, DISPOSITION_WEAPONIZATION: 8, DISPOSITION_INSIDER_DATA_CONTROL: 9, DISPOSITION_DELIVERY: 10, DISPOSITION_EXPLOITATION: 11, DISPOSITION_INSTALLATION: 12, DISPOSITION_COMMAND_AND_CONTROL: 13, DISPOSITION_INSIDER_DATA_EXFIL: 14, DISPOSITION_EXFIL: 15, DISPOSITION_DAMAGE: 16, } # --- DIRECTIVES DIRECTIVE_ARCHIVE = 'archive' DIRECTIVE_COLLECT_FILE = 'collect_file' DIRECTIVE_CRAWL = 'crawl' DIRECTIVE_CRAWL_EXTRACTED_URLS = 'crawl_extracted_urls' DIRECTIVE_DELAY = 'delay' DIRECTIVE_EXCLUDE_ALL = 'exclude_all' DIRECTIVE_EXTRACT_EMAIL = 'extract_email' DIRECTIVE_EXTRACT_PCAP = 'extract_pcap' DIRECTIVE_EXTRACT_URLS = 'extract_urls' DIRECTIVE_FORCE_DOWNLOAD = 'force_download' DIRECTIVE_IGNORE_AUTOMATION_LIMITS = 'ignore_automation_limits' DIRECTIVE_NO_SCAN = 'no_scan' DIRECTIVE_ORIGINAL_EMAIL = 'original_email' DIRECTIVE_ORIGINAL_SMTP = 'original_smtp' DIRECTIVE_PREVIEW = 'preview' DIRECTIVE_REMEDIATE = 'remediate' DIRECTIVE_RENAME_ANALYSIS = 'rename_analysis' DIRECTIVE_RESOLVE_ASSET = 'resolve_asset' DIRECTIVE_SANDBOX = 'sandbox' DIRECTIVE_TRACKED = 'tracked' DIRECTIVE_WHITELISTED = 'whitelisted' DIRECTIVE_DESCRIPTIONS = { DIRECTIVE_ARCHIVE: 'archive the file', DIRECTIVE_COLLECT_FILE: 'collect the file from the remote endpoint', DIRECTIVE_CRAWL: 'crawl the URL', DIRECTIVE_CRAWL_EXTRACTED_URLS: 'crawl all extracted URLs', DIRECTIVE_DELAY: 'instructs various analysis modules to delay the analysis', DIRECTIVE_EXCLUDE_ALL: 'instructs ACE to NOT analyze this observable at all', DIRECTIVE_EXTRACT_EMAIL: 'extract email from exchange or o365', DIRECTIVE_EXTRACT_PCAP: 'extract PCAP for the given observable and given time', DIRECTIVE_EXTRACT_URLS: 'extract URLs from the given file', DIRECTIVE_FORCE_DOWNLOAD: 'download the content of the URL no matter what', DIRECTIVE_IGNORE_AUTOMATION_LIMITS: 'ignores any automation limits when analyzing this observable', DIRECTIVE_NO_SCAN: 'do not scan this file with yara', DIRECTIVE_ORIGINAL_EMAIL: 'treat this file as the original email file', DIRECTIVE_ORIGINAL_SMTP: 'treat this file as the original smtp stream', DIRECTIVE_PREVIEW: 'show this content inline if possible', DIRECTIVE_REMEDIATE: 'remediate the target', DIRECTIVE_RENAME_ANALYSIS: 'indicates that the description of the root analysis object should be updated with analysis results', DIRECTIVE_RESOLVE_ASSET: 'indicates that ACE should treat this IP address as an asset and try to figure out the details', DIRECTIVE_SANDBOX: 'run the observable through a sandbox', DIRECTIVE_TRACKED: 'indicates this observable should be tracked across different analysis requests', DIRECTIVE_WHITELISTED: 'indicates this observable was whitelisted, causing the entire analysis to also become whitelisted', } # NOTE this really isn't used any more VALID_DIRECTIVES = [ DIRECTIVE_ARCHIVE, DIRECTIVE_COLLECT_FILE, DIRECTIVE_CRAWL, DIRECTIVE_CRAWL_EXTRACTED_URLS, DIRECTIVE_DELAY, DIRECTIVE_EXCLUDE_ALL, DIRECTIVE_EXTRACT_EMAIL, DIRECTIVE_EXTRACT_PCAP, DIRECTIVE_EXTRACT_URLS, DIRECTIVE_FORCE_DOWNLOAD, DIRECTIVE_IGNORE_AUTOMATION_LIMITS, DIRECTIVE_NO_SCAN, DIRECTIVE_ORIGINAL_EMAIL, DIRECTIVE_ORIGINAL_SMTP, DIRECTIVE_PREVIEW, DIRECTIVE_REMEDIATE, DIRECTIVE_RENAME_ANALYSIS, DIRECTIVE_SANDBOX, DIRECTIVE_TRACKED, DIRECTIVE_WHITELISTED, ] def is_valid_directive(directive): return directive in VALID_DIRECTIVES # --- TAGS TAG_LEVEL_FALSE_POSITIVE = 'fp' TAG_LEVEL_INFO = 'info' TAG_LEVEL_WARNING = 'warning' TAG_LEVEL_ALERT = 'alert' TAG_LEVEL_CRITICAL = 'critical' TAG_LEVEL_HIDDEN = 'hidden' # --- EVENTS # fired when we add a tag to something EVENT_TAG_ADDED = 'tag_added' # called when an Observable is added to the Analysis EVENT_OBSERVABLE_ADDED = 'observable_added' # called when the details of an Analysis have been updated EVENT_DETAILS_UPDATED = 'details_updated' # fired when we add a directive to an Observable EVENT_DIRECTIVE_ADDED = 'directive_added' # fired when we add an Analysis to an Observable EVENT_ANALYSIS_ADDED = 'analysis_added' # fired when we add a DetectionPoint ot an Analysis or Observable EVENT_DETECTION_ADDED = 'detection_added' # fired when an analysis is marked as completed manually EVENT_ANALYSIS_MARKED_COMPLETED = 'analysis_marked_completed' # fired when a relationship is added to an observable EVENT_RELATIONSHIP_ADDED = 'relationship_added' # these next two events are intended to be used with the RootAnalysis object # fired when we add a tag to any taggable object EVENT_GLOBAL_TAG_ADDED = 'global_tag_added' # fired when we add an observable to any analysis object EVENT_GLOBAL_OBSERVABLE_ADDED = 'global_observable_added' # fired when we add an analysis to any observable object EVENT_GLOBAL_ANALYSIS_ADDED = 'global_analysis_added' # list of all valid events VALID_EVENTS = [ EVENT_ANALYSIS_MARKED_COMPLETED, EVENT_TAG_ADDED, EVENT_OBSERVABLE_ADDED, EVENT_ANALYSIS_ADDED, EVENT_DETECTION_ADDED, EVENT_DIRECTIVE_ADDED, EVENT_RELATIONSHIP_ADDED, EVENT_DETAILS_UPDATED, EVENT_GLOBAL_TAG_ADDED, EVENT_GLOBAL_OBSERVABLE_ADDED, EVENT_GLOBAL_ANALYSIS_ADDED ] # available actions for observables ACTION_CLEAR_CLOUDPHISH_ALERT = 'clear_cloudphish_alert' ACTION_COLLECT_FILE = 'collect_file' ACTION_DLP_INCIDENT_VIEW_DLP = 'dlp_incident_view_dlp' ACTION_EXABEAM_SESSION_VIEW_EXABEAM = 'exabeam_session_view_exabeam' ACTION_O365_FILE_DOWNLOAD = 'o365_file_download' ACTION_USER_VIEW_EXABEAM = 'user_view_exabeam' ACTION_FILE_DOWNLOAD = 'file_download' ACTION_FILE_DOWNLOAD_AS_ZIP = 'file_download_as_zip' ACTION_FILE_SEND_TO = 'file_send_to' ACTION_FILE_UPLOAD_VT = 'file_upload_vt' ACTION_FILE_UPLOAD_FALCON_SANDBOX = 'file_upload_falcon_sandbox' ACTION_FILE_UPLOAD_VX = 'file_upload_vx' ACTION_FILE_VIEW_AS_HEX =
couplings = {(0,0):C.GC_877}) V_259 = Vertex(name = 'V_259', particles = [ P.e__plus__, P.n1, P.sl1__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_100}) V_260 = Vertex(name = 'V_260', particles = [ P.e__plus__, P.n2, P.sl1__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_123}) V_261 = Vertex(name = 'V_261', particles = [ P.e__plus__, P.n3, P.sl1__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_146}) V_262 = Vertex(name = 'V_262', particles = [ P.e__plus__, P.n4, P.sl1__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_169}) V_263 = Vertex(name = 'V_263', particles = [ P.sl1__plus__, P.sl1__plus__, P.sl1__minus__, P.sl1__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_373}) V_264 = Vertex(name = 'V_264', particles = [ P.sl2__plus__, P.sl2__minus__, P.sv1__tilde__, P.sv1 ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_400}) V_265 = Vertex(name = 'V_265', particles = [ P.sl2__plus__, P.sl2__minus__, P.sv2__tilde__, P.sv2 ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_539}) V_266 = Vertex(name = 'V_266', particles = [ P.sl2__plus__, P.sl2__minus__, P.sv3__tilde__, P.sv3 ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_400}) V_267 = Vertex(name = 'V_267', particles = [ P.a, P.a, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.VVSS1 ], couplings = {(0,0):C.GC_387}) V_268 = Vertex(name = 'V_268', particles = [ P.h02, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_1862}) V_269 = Vertex(name = 'V_269', particles = [ P.h01, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_1861}) V_270 = Vertex(name = 'V_270', particles = [ P.h01, P.h01, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1007}) V_271 = Vertex(name = 'V_271', particles = [ P.h02, P.h02, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1006}) V_272 = Vertex(name = 'V_272', particles = [ P.G__plus__, P.sl2__minus__, P.sv2__tilde__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2112}) V_273 = Vertex(name = 'V_273', particles = [ P.H__plus__, P.sl2__minus__, P.sv2__tilde__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2111}) V_274 = Vertex(name = 'V_274', particles = [ P.G__plus__, P.h02, P.sl2__minus__, P.sv2__tilde__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1161}) V_275 = Vertex(name = 'V_275', particles = [ P.h01, P.H__plus__, P.sl2__minus__, P.sv2__tilde__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1160}) V_276 = Vertex(name = 'V_276', particles = [ P.A0, P.A0, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1130}) V_277 = Vertex(name = 'V_277', particles = [ P.G0, P.G0, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1129}) V_278 = Vertex(name = 'V_278', particles = [ P.G__minus__, P.G__plus__, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1090}) V_279 = Vertex(name = 'V_279', particles = [ P.H__minus__, P.H__plus__, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1089}) V_280 = Vertex(name = 'V_280', particles = [ P.G0, P.G__plus__, P.sl2__minus__, P.sv2__tilde__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1099}) V_281 = Vertex(name = 'V_281', particles = [ P.A0, P.H__plus__, P.sl2__minus__, P.sv2__tilde__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1100}) V_282 = Vertex(name = 'V_282', particles = [ P.a, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.VSS2 ], couplings = {(0,0):C.GC_386}) V_283 = Vertex(name = 'V_283', particles = [ P.vm__tilde__, P.x1__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_860}) V_284 = Vertex(name = 'V_284', particles = [ P.vm__tilde__, P.x2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_878}) V_285 = Vertex(name = 'V_285', particles = [ P.mu__plus__, P.n1, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_101}) V_286 = Vertex(name = 'V_286', particles = [ P.mu__plus__, P.n2, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_124}) V_287 = Vertex(name = 'V_287', particles = [ P.mu__plus__, P.n3, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_147}) V_288 = Vertex(name = 'V_288', particles = [ P.mu__plus__, P.n4, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.FFS3 ], couplings = {(0,0):C.GC_170}) V_289 = Vertex(name = 'V_289', particles = [ P.sl1__plus__, P.sl1__minus__, P.sl2__plus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_398}) V_290 = Vertex(name = 'V_290', particles = [ P.sl2__plus__, P.sl2__plus__, P.sl2__minus__, P.sl2__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_399}) V_291 = Vertex(name = 'V_291', particles = [ P.sl3__plus__, P.sl3__minus__, P.sv1__tilde__, P.sv1 ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_427}) V_292 = Vertex(name = 'V_292', particles = [ P.sl3__plus__, P.sl3__minus__, P.sv2__tilde__, P.sv2 ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_427}) V_293 = Vertex(name = 'V_293', particles = [ P.sl3__plus__, P.sl3__minus__, P.sv3__tilde__, P.sv3 ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_555}) V_294 = Vertex(name = 'V_294', particles = [ P.a, P.a, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.VVSS1 ], couplings = {(0,0):C.GC_413}) V_295 = Vertex(name = 'V_295', particles = [ P.h02, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_1864}) V_296 = Vertex(name = 'V_296', particles = [ P.h01, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_1897}) V_297 = Vertex(name = 'V_297', particles = [ P.h02, P.sl3__minus__, P.sl6__plus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_1898}) V_298 = Vertex(name = 'V_298', particles = [ P.h01, P.sl3__minus__, P.sl6__plus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_1884}) V_299 = Vertex(name = 'V_299', particles = [ P.h01, P.h01, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1951}) V_300 = Vertex(name = 'V_300', particles = [ P.h02, P.h02, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1008}) V_301 = Vertex(name = 'V_301', particles = [ P.G__plus__, P.sl3__minus__, P.sv3__tilde__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2114}) V_302 = Vertex(name = 'V_302', particles = [ P.H__plus__, P.sl3__minus__, P.sv3__tilde__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2163}) V_303 = Vertex(name = 'V_303', particles = [ P.G0, P.sl3__minus__, P.sl6__plus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2161}) V_304 = Vertex(name = 'V_304', particles = [ P.A0, P.sl3__minus__, P.sl6__plus__ ], color = [ '1' ], lorentz = [ L.SSS1 ], couplings = {(0,0):C.GC_2126}) V_305 = Vertex(name = 'V_305', particles = [ P.G__plus__, P.h02, P.sl3__minus__, P.sv3__tilde__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1167}) V_306 = Vertex(name = 'V_306', particles = [ P.h01, P.H__plus__, P.sl3__minus__, P.sv3__tilde__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2234}) V_307 = Vertex(name = 'V_307', particles = [ P.A0, P.A0, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_2245}) V_308 = Vertex(name = 'V_308', particles = [ P.G0, P.G0, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1132}) V_309 = Vertex(name = 'V_309', particles = [ P.G__minus__, P.G__plus__, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1092}) V_310 = Vertex(name = 'V_310', particles = [ P.H__minus__, P.H__plus__, P.sl3__plus__, P.sl3__minus__ ], color = [ '1' ], lorentz = [ L.SSSS1 ], couplings = {(0,0):C.GC_1091}) V_311
import time import json import tempfile import re import ffmpeg import cv2 import numpy as np import pandas as pd from pathlib import Path from tqdm import tqdm from argparse import ArgumentParser WIDTH = 1280 HEIGHT = 960 GAMMA = 0.5 SYNC_FRAMERATE = 36 AUTORANGELOWDROPOUT = 0.0035 AUTORANGEHIGHDROPOUT = 0.007 def merge_quadrants(frame): assert frame.shape == (HEIGHT, WIDTH), 'Invalid frame shape' top, bottom = np.split(frame, 2, axis=0) TL, TR = np.split(top, 2, axis=1) BL, BR = np.split(bottom, 2, axis=1) new_frame = np.zeros_like(frame) new_frame[0::2, 0::2] = TL new_frame[0::2, 1::2] = TR new_frame[1::2, 0::2] = BL new_frame[1::2, 1::2] = BR return new_frame def convertImageForDisplay(frame, minRange, maxRange, gamma): frame = frame.astype(np.float64) factor = 1.0 / (maxRange - minRange) new_frame = (frame - minRange) * factor new_frame = np.power(new_frame, gamma) return np.clip(new_frame * 255, 0, 255).astype('uint8') def calculate_cdf(histogram): cdf = histogram.cumsum() normalized_cdf = cdf / float(cdf.max()) return normalized_cdf def calculate_lookup(src_cdf, ref_cdf, bits=10): lookup_table = np.zeros(2**bits) lookup_val = 0 for src_pixel_val in range(len(src_cdf)): for ref_pixel_val in range(len(ref_cdf)): if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]: lookup_val = ref_pixel_val break lookup_table[src_pixel_val] = lookup_val return lookup_table def findImageMinMax(frame, darkDropoutFactor=AUTORANGELOWDROPOUT, brightDropoutFactor=AUTORANGEHIGHDROPOUT): hMargin = WIDTH // 16 topMargin = HEIGHT // 10 bottomMargin = HEIGHT // 4 histsize = 0x8000 region = frame[topMargin: HEIGHT - bottomMargin, hMargin: WIDTH - hMargin] region_min = region.min() region_max = region.max() hist, bins = np.histogram(frame, histsize) cdf_normalized = calculate_cdf(hist) max_idx = len(cdf_normalized[cdf_normalized <= (1 - brightDropoutFactor)]) min_idx = len(cdf_normalized[cdf_normalized <= (darkDropoutFactor)]) factor = (region_max - region_min) / histsize return region_min + min_idx * factor, region_min + max_idx * factor def read_h265(in_file): out, err = (ffmpeg .input(in_file.name) .output('pipe:', format='rawvideo', pix_fmt='yuv420p10le') .global_args('-loglevel', 'fatal') .run(capture_stdout=True)) # The quarter frame sized U and V planes are empty video = np.frombuffer(out, np.uint16).reshape([-1, int(HEIGHT * 1.5), WIDTH])[:, :HEIGHT, :] return video def first_pass(video): num_frames = video.shape[0] merged_vid = np.zeros((num_frames, HEIGHT, WIDTH), dtype=np.uint16) rolling_min = np.zeros(num_frames) rolling_max = np.zeros(num_frames) for frame_idx in tqdm(range(num_frames), desc='Processing frames'): merged_frame = merge_quadrants(video[frame_idx, :, :]) range_min, range_max = findImageMinMax(merged_frame.astype(np.float64), darkDropoutFactor=0, brightDropoutFactor=0.01) merged_vid[frame_idx, :, :] = merged_frame rolling_min[frame_idx] = range_min rolling_max[frame_idx] = range_max range_min = np.median(rolling_min) range_max = np.median(rolling_max) return merged_vid, range_min, range_max def demosaic_vid(merged_vid): num_frames = merged_vid.shape[0] demosaiced_vid = np.zeros((num_frames, HEIGHT, WIDTH, 3), dtype=np.uint16) for frame_idx in tqdm(range(num_frames), desc='Demosaicing frames'): merged_frame = merged_vid[frame_idx, :, :] demosaiced_frame = cv2.cvtColor(merged_frame, cv2.COLOR_BAYER_GB2RGB) demosaiced_vid[frame_idx, :, :, :] = demosaiced_frame return demosaiced_vid def write_vid(processed_vid, out_file): num_frames = processed_vid.shape[0] process = (ffmpeg .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(WIDTH, HEIGHT), r=SYNC_FRAMERATE) .drawtext(text=Path(out_file).stem, fontcolor='white', fontsize=20, box=1, boxcolor='black@0.5', boxborderw=5, x='(w-text_w)/2', y='(text_h)/2') .output(str(out_file), pix_fmt='yuv420p') .global_args('-loglevel', 'fatal') .overwrite_output() .run_async(pipe_stdin=True)) for frame in tqdm(range(num_frames), desc=f'Writing to: {out_file}'): out_frame = processed_vid[frame, :, :, :] process.stdin.write(out_frame.tobytes()) process.stdin.close() def auto_white_balance(merged_vid, bits=10): print('Auto white-balancing...') greenvid = (merged_vid[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::2] + merged_vid[:, fdf8:f53e:61e4::18, 1::2]) / 2 redvid = merged_vid[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 1::2] bluevid = merged_vid[:, fdf8:f53e:61e4::18, ::2] greenhist, _ = np.histogram(greenvid.flatten(), 2**bits, [0, 2**bits]) redhist, _ = np.histogram(redvid.flatten(), 2**bits, [0, 2**bits]) bluehist, _ = np.histogram(bluevid.flatten(), 2**bits, [0, 2**bits]) greencdf = calculate_cdf(greenhist) redcdf = calculate_cdf(redhist) bluecdf = calculate_cdf(bluehist) red_lookup = calculate_lookup(redcdf, greencdf, bits=bits) blue_lookup = calculate_lookup(bluecdf, greencdf, bits=bits) red_fixed = red_lookup[redvid.astype(np.uint16)] blue_fixed = blue_lookup[bluevid.astype(np.uint16)] merged_vid[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, 1::2] = red_fixed merged_vid[:, fdf8:f53e:61e4::18, ::2] = blue_fixed return merged_vid def build_index_from_file(index_path, has_metalines=True): index_fmt = np.dtype([('seconds', '<u2'), ('pad1', 'V6'), ('nanoseconds', '<u4'), ('pad2', 'V4'), ('offset', '<u4'), ('pad3', 'V4'), ('length', '<u4'), ('pad4', 'V4')]) indices = pd.DataFrame(np.fromfile(index_path, dtype=index_fmt)) data_path = Path(index_path).parent / 'data.img' if not data_path.exists(): data_path = Path(index_path).parent / 'data.h265' if not data_path.exists(): print('Did not find video data, exiting.') exit(1) indices['frame_path'] = data_path indices['timestamp'] = indices['seconds'] + indices['nanoseconds'] / 1e9 metalines_size = WIDTH * 2 * 4 # 4 rows of 16bit register values if not has_metalines: metalines_size = 0 indices['frame_offset'] = indices['offset'] + metalines_size indices['frame_length'] = indices['length'] - metalines_size indices['metalines_path'] = data_path indices['metalines_offset'] = indices['offset'] indices['metalines_length'] = metalines_size return indices[['timestamp', 'frame_path', 'frame_offset', 'frame_length', 'metalines_path', 'metalines_offset', 'metalines_length']] def gen_replay_rows(replay_dir): name_pat = re.compile(r'(\w+)-(\w+)\.*') replay_dir = Path(replay_dir) for frame in sorted(replay_dir.glob('*.h265')): metalines = frame.parent / (frame.name.split('.')[0] + '.img.metalines') assert metalines.exists(), 'Did not find matching metalines file' hit = re.search(name_pat, frame.name) timestamp = int(hit.group(1), 16) / 1e9 yield {'timestamp': timestamp, 'frame_path': frame, 'frame_offset': 0, 'frame_length': frame.stat().st_size, 'metalines_path': metalines, 'metalines_offset': 0, 'metalines_length': metalines.stat().st_size} def build_index_from_replay_dir(replay_dir): return pd.DataFrame(gen_replay_rows(replay_dir)) def sync_vid(video, timestamps, t_start, t_end, sync_framerate): if len(timestamps) > video.shape[0]: timestamps = timestamps[:video.shape[0]] sync_time = np.arange(0, np.ceil(t_end - t_start) * sync_framerate) / sync_framerate + t_start synced_vid = np.zeros((len(sync_time), video.shape[1], video.shape[2], 3), dtype=np.uint8) for i, t in tqdm(enumerate(sync_time), desc='Synchronizing frames'): frame_idx = abs(timestamps - t).argmin() if frame_idx != 0: # nearest is first frame, show black (Already initialized to zeros) synced_vid[i, :, :, :] = video[frame_idx, :, :, :] return synced_vid def process_new_backup(in_file, out_file): print('Converting backup with ffmpeg...') out, err = (ffmpeg .input(in_file, r=30) .filter('scale', size=f'{WIDTH}:{HEIGHT}', force_original_aspect_ratio=f'decrease') .filter('pad', width=WIDTH, height=HEIGHT, x='(ow-iw)/2', y='(oh-ih)/2') .output(str(out_file), pix_fmt='yuv420p') .overwrite_output() .global_args('-loglevel', 'fatal') .run()) print(f'Loading {out_file}...') out, err = (ffmpeg .input(out_file) .output('pipe:', format='rawvideo', pix_fmt='rgb24') .run(capture_stdout=True)) video = np.frombuffer(out, np.uint8).reshape([-1, HEIGHT, WIDTH, 3]) return video def calc_enhancement_LUT(range_min, range_max, gamma): x = np.arange(0, 2 ** 16, dtype=np.uint16).astype(np.float64) factor = 1 / (range_max - range_min) y = np.clip((x - range_min) * factor, 0, 1) lut = np.clip(np.power(y, gamma) * 255, 0, 255).astype(np.uint8) return lut def process_vid(in_file): video = read_h265(in_file) merged_vid, range_min, range_max = first_pass(video) merged_vid = auto_white_balance(merged_vid) demosaiced_vid = demosaic_vid(merged_vid) return demosaiced_vid, range_min, range_max def convert_vids(snapshot_dir, out_dir): assert (snapshot_dir / 'archive_info.json').exists(), 'Did not find archive_info.json!' archive_info = json.loads((snapshot_dir / 'archive_info.json').read_text()) camera_indices = dict() camera_ranges = dict() sync_framerate = 36 set_min_range, set_max_range = 0, 0 shown_cams = ['main', 'rightpillar', 'leftpillar', 'rightrepeater', 'backup', 'leftrepeater'] # the snapshot layout and structure varies over SW versions, so we check the ones we know of: # Step 1, build an index of the video data if 'tclip' in (child.name for child in snapshot_dir.iterdir()): assert (snapshot_dir / 'tclip/img').exists(), 'Did not find tclip/img directory' for cam_path in (snapshot_dir / 'tclip/img').iterdir(): cam = cam_path.name.split('_')[0] if cam in shown_cams: has_metalines = True if cam == 'backup': # The backup cam can be different than the other 7 has_metalines = int(archive_info['cameras']['backup']['width']) == WIDTH index = build_index_from_file(cam_path / 'index', has_metalines=has_metalines) camera_indices[cam] = index elif 'img' in (child.name for child in snapshot_dir.iterdir()): for cam_path in filter(lambda x: x.name.endswith('_replay'), (snapshot_dir / 'img').iterdir()): cam = cam_path.name.split('_')[0] if cam in shown_cams: index = build_index_from_replay_dir(cam_path) camera_indices[cam] = index t_start = min([df['timestamp'].min() for df in camera_indices.values()]) # Show all footage # t_start = max([df['timestamp'].min() for df in camera_timestamps.values()]) # Start when all cams have footage t_end = max([df['timestamp'].max() for df in camera_indices.values()]) sync_time = np.arange(0, np.ceil(t_end - t_start) * sync_framerate) / sync_framerate + t_start (out_dir / 'time_info.txt').write_text(','.join(str(t) for t in sync_time)) # Needed for further analysis # Step 2, process each video for cam in shown_cams: index = camera_indices[cam] with tempfile.NamedTemporaryFile('wb') as temp_file: for _, (frame_path, frame_offset, frame_length) in \ index[['frame_path', 'frame_offset', 'frame_length']].iterrows(): with open(frame_path, 'rb') as frame_file: frame_file.seek(frame_offset, 0) data = frame_file.read(frame_length) temp_file.write(data) if not (out_dir / f'{cam}.mp4').exists(): vid_framerate = round(1 / index['timestamp'].diff().median()) if cam == 'backup' and ffmpeg.probe(temp_file.name)['streams'][0]['width'] != WIDTH: assert vid_framerate == 30, 'Not encountered this framerate before' processed = process_new_backup(temp_file.name, out_dir / f'{cam}.mp4') else: assert vid_framerate == sync_framerate, 'Not encountered this framerate before' print(f'Loading {cam}...') demosaiced_vid, range_min, range_max = process_vid(temp_file) camera_ranges[cam] = (range_min, range_max) if cam == 'main': # Match intensity range of all cams to main set_min_range, set_max_range = range_min, range_max range_min = set_min_range if set_min_range else range_min range_max = set_max_range if set_max_range else range_max print('Enhancing video for display...') lut = calc_enhancement_LUT(range_min, range_max * 1.5, GAMMA) processed = lut[demosaiced_vid] synced_vid = sync_vid(processed, index['timestamp'], t_start, t_end, sync_framerate) write_vid(synced_vid, out_dir / f'{cam}.mp4') print(f'Finished {cam}\n') def create_overview(out_dir): print('Creating overview vid...') frontvids = ['leftpillar', 'main', 'rightpillar'] rearvids = ['leftrepeater', 'backup', 'rightrepeater'] front = ffmpeg.filter([ffmpeg.input(Path(out_dir) / f'{f}.mp4') for f in frontvids], 'hstack', inputs=3) rear = ffmpeg.filter([ffmpeg.input(Path(out_dir) / f'{r}.mp4') for r in rearvids], 'hstack', inputs=3) out = (ffmpeg .filter([front, rear], 'vstack', inputs=2) .output(str(out_dir / 'overview.mp4')) .global_args('-loglevel', 'fatal') .overwrite_output() .run()) print('Done') def downscale_overview(out_dir): in_file = out_dir / 'overview.mp4' probe = ffmpeg.probe(str(in_file)) video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) width = int(video_stream['width']) height = int(video_stream['height']) print('Creating downscaled overview...') # Downscale pixels and insert more keyframes for responsive seeking in visualisations out, err = (ffmpeg .input(out_dir / 'overview.mp4') .filter('scale', size=f'{width//2}:{height//2}', force_original_aspect_ratio=f'decrease') .output(str(out_dir / 'overview_downscaled.mp4'), pix_fmt='yuv420p', x264opts='keyint=10') .overwrite_output()
Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cs L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)Cs L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Cs L7: Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)Cs L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cs L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cs L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cs L8: Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs L7: Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)Cs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs L5: Cs-CtCdsCdsCs L6: Cs-(Cds-O2d)(Cds-O2d)CtCs L6: Cs-(Cds-O2d)(Cds-Cd)CtCs L7: Cs-(Cds-O2d)(Cds-Cds)CtCs L7: Cs-(Cds-O2d)(Cds-Cdd)CtCs L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CtCs L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CtCs L6: Cs-(Cds-Cd)(Cds-Cd)CtCs L7: Cs-(Cds-Cds)(Cds-Cds)CtCs L7: Cs-(Cds-Cdd)(Cds-Cds)CtCs L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CtCs L8: Cs-(Cds-Cdd-S2d)(Cds-Cds)CtCs L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CtCs L7: Cs-(Cds-Cdd)(Cds-Cdd)CtCs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CtCs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CtCs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CtCs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CtCs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CtCs L5: Cs-CbCdsCdsCs L6: Cs-(Cds-O2d)(Cds-O2d)CbCs L6: Cs-(Cds-O2d)(Cds-Cd)CbCs L7: Cs-(Cds-O2d)(Cds-Cds)CbCs L7: Cs-(Cds-O2d)(Cds-Cdd)CbCs L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CbCs L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CbCs L6: Cs-(Cds-Cd)(Cds-Cd)CbCs L7: Cs-(Cds-Cds)(Cds-Cds)CbCs L7: Cs-(Cds-Cdd)(Cds-Cds)CbCs L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CbCs L8: Cs-(Cds-Cdd-S2d)(Cds-Cds)CbCs L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CbCs L7: Cs-(Cds-Cdd)(Cds-Cdd)CbCs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CbCs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CbCs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CbCs L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CbCs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CbCs L5: Cs-CtCtCdsCs L6: Cs-(Cds-O2d)CtCtCs L6: Cs-(Cds-Cd)CtCtCs L7: Cs-(Cds-Cds)CtCtCs L7: Cs-(Cds-Cdd)CtCtCs L8: Cs-(Cds-Cdd-O2d)CtCtCs L8: Cs-(Cds-Cdd-S2d)CtCtCs L8: Cs-(Cds-Cdd-Cd)CtCtCs L5: Cs-CbCtCdsCs L6: Cs-(Cds-O2d)CbCtCs L6: Cs-(Cds-Cd)CbCtCs L7: Cs-(Cds-Cds)CbCtCs L7: Cs-(Cds-Cdd)CbCtCs L8: Cs-(Cds-Cdd-O2d)CbCtCs L8: Cs-(Cds-Cdd-S2d)CbCtCs L8: Cs-(Cds-Cdd-Cd)CbCtCs L5: Cs-CbCbCdsCs L6: Cs-(Cds-O2d)CbCbCs L6: Cs-(Cds-Cd)CbCbCs L7: Cs-(Cds-Cds)CbCbCs L7: Cs-(Cds-Cdd)CbCbCs L8: Cs-(Cds-Cdd-O2d)CbCbCs L8: Cs-(Cds-Cdd-S2d)CbCbCs L8: Cs-(Cds-Cdd-Cd)CbCbCs L5: Cs-CtCtCtCs L5: Cs-CbCtCtCs L5: Cs-CbCbCtCs L5: Cs-CbCbCbCs L5: Cs-CdsCdsCdsCds L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-O2d) L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-Cd) L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-Cds) L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-Cdd) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd) L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cd)(Cds-Cd) L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)(Cds-Cds) L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd)(Cds-Cds) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cds) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cds) L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd)(Cds-Cdd) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd) L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L6: Cs-(Cds-O2d)(Cds-Cd)(Cds-Cd)(Cds-Cd) L7: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cds) L7: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cdd) L8: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d) L8: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd) L7: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cdd)(Cds-Cdd) L8: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd) L8: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cdd)(Cds-Cdd) L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd) L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L6: Cs-(Cds-Cd)(Cds-Cd)(Cds-Cd)(Cds-Cd) L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cds) L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd) L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)(Cds-Cdd) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd) L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L7: Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)(Cds-Cdd) L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd) L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d) L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd) L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L7: Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)(Cds-Cdd) L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d) L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd) L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d) L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd) L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L5: Cs-CtCdsCdsCds L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)Ct L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cd)Ct L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)Ct L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd)Ct L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)Ct L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd)Ct L6: Cs-(Cds-O2d)(Cds-Cd)(Cds-Cd)Ct L7: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)Ct L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cds)Ct L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cds)Ct L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cds)Ct L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cdd)Ct L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Ct L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct L6: Cs-(Cds-Cd)(Cds-Cd)(Cds-Cd)Ct L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Ct L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)Ct L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Ct L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)Ct L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Ct L7: Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)Ct L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Ct L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Ct L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct L7: Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)Ct L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Ct L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Ct L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct L5: Cs-CbCdsCdsCds L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)Cb L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cd)Cb L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)Cb L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd)Cb L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)Cb L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd)Cb L6: Cs-(Cds-O2d)(Cds-Cd)(Cds-Cd)Cb L7: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)Cb L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cds)Cb L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cds)Cb L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cds)Cb L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cdd)Cb L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cb L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb L6: Cs-(Cds-Cd)(Cds-Cd)(Cds-Cd)Cb L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)Cb L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)Cb L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)Cb L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d)Cb L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)Cb L7: Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)Cb L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cb L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cb L8: Cs-(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb L7: Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)Cb L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)Cb L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cb L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb L5: Cs-CtCtCdsCds L6: Cs-(Cds-O2d)(Cds-O2d)CtCt L6: Cs-(Cds-O2d)(Cds-Cd)CtCt L7: Cs-(Cds-O2d)(Cds-Cds)CtCt L7: Cs-(Cds-O2d)(Cds-Cdd)CtCt L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CtCt L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CtCt L6: Cs-(Cds-Cd)(Cds-Cd)CtCt L7: Cs-(Cds-Cds)(Cds-Cds)CtCt L7: Cs-(Cds-Cdd)(Cds-Cds)CtCt L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CtCt L8: Cs-(Cds-Cdd-S2d)(Cds-Cds)CtCt L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CtCt L7: Cs-(Cds-Cdd)(Cds-Cdd)CtCt L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CtCt L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CtCt L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CtCt L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CtCt L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CtCt L5: Cs-CbCtCdsCds L6: Cs-(Cds-O2d)(Cds-O2d)CbCt L6: Cs-(Cds-O2d)(Cds-Cd)CbCt L7: Cs-(Cds-O2d)(Cds-Cds)CbCt L7: Cs-(Cds-O2d)(Cds-Cdd)CbCt L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CbCt L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CbCt L6: Cs-(Cds-Cd)(Cds-Cd)CbCt L7: Cs-(Cds-Cds)(Cds-Cds)CbCt L7: Cs-(Cds-Cdd)(Cds-Cds)CbCt L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CbCt L8: Cs-(Cds-Cdd-S2d)(Cds-Cds)CbCt L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CbCt L7: Cs-(Cds-Cdd)(Cds-Cdd)CbCt L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CbCt L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CbCt L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CbCt L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CbCt L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CbCt L5: Cs-CbCbCdsCds L6: Cs-(Cds-O2d)(Cds-O2d)CbCb L6: Cs-(Cds-O2d)(Cds-Cd)CbCb L7: Cs-(Cds-O2d)(Cds-Cds)CbCb L7: Cs-(Cds-O2d)(Cds-Cdd)CbCb L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CbCb L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CbCb L6: Cs-(Cds-Cd)(Cds-Cd)CbCb L7: Cs-(Cds-Cds)(Cds-Cds)CbCb L7: Cs-(Cds-Cdd)(Cds-Cds)CbCb L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CbCb L8: Cs-(Cds-Cdd-S2d)(Cds-Cds)CbCb L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CbCb L7: Cs-(Cds-Cdd)(Cds-Cdd)CbCb L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CbCb L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CbCb L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-S2d)CbCb L8: Cs-(Cds-Cdd-S2d)(Cds-Cdd-Cd)CbCb L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CbCb L5: Cs-CtCtCtCds L6: Cs-(Cds-O2d)CtCtCt L6: Cs-(Cds-Cd)CtCtCt L7: Cs-(Cds-Cds)CtCtCt L7: Cs-(Cds-Cdd)CtCtCt L8: Cs-(Cds-Cdd-O2d)CtCtCt L8: Cs-(Cds-Cdd-S2d)CtCtCt L8: Cs-(Cds-Cdd-Cd)CtCtCt L5: Cs-CbCtCtCds L6: Cs-(Cds-O2d)CbCtCt L6: Cs-(Cds-Cd)CbCtCt L7: Cs-(Cds-Cds)CbCtCt L7: Cs-(Cds-Cdd)CbCtCt L8: Cs-(Cds-Cdd-O2d)CbCtCt L8: Cs-(Cds-Cdd-S2d)CbCtCt L8: Cs-(Cds-Cdd-Cd)CbCtCt L5: Cs-CbCbCtCds L6: Cs-(Cds-O2d)CbCbCt L6: Cs-(Cds-Cd)CbCbCt L7: Cs-(Cds-Cds)CbCbCt L7: Cs-(Cds-Cdd)CbCbCt L8: Cs-(Cds-Cdd-O2d)CbCbCt L8: Cs-(Cds-Cdd-S2d)CbCbCt L8: Cs-(Cds-Cdd-Cd)CbCbCt L5: Cs-CbCbCbCds L6: Cs-(Cds-O2d)CbCbCb L6: Cs-(Cds-Cd)CbCbCb L7: Cs-(Cds-Cds)CbCbCb L7: Cs-(Cds-Cdd)CbCbCb L8: Cs-(Cds-Cdd-O2d)CbCbCb L8: Cs-(Cds-Cdd-S2d)CbCbCb L8: Cs-(Cds-Cdd-Cd)CbCbCb L5: Cs-CtCtCtCt L5: Cs-CbCtCtCt L5: Cs-CbCbCtCt L5: Cs-CbCbCbCt L5: Cs-CbCbCbCb L5: Cs-C=SCbCtCt L5: Cs-C=S(Cds-Cd)(Cds-Cd)(Cds-Cd) L6: Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cdd) L7: Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd) L7: Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cdd-S2d) L6: Cs-C=S(Cds-Cdd)(Cds-Cdd)(Cds-Cdd) L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-S2d) L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-S2d)(Cds-Cdd-Cd) L6: Cs-C=S(Cds-Cds)(Cds-Cds)(Cds-Cds) L6: Cs-C=S(Cds-Cds)(Cds-Cdd)(Cds-Cdd) L7: Cs-C=S(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-S2d) L7: Cs-C=S(Cds-Cds)(Cds-Cdd-S2d)(Cds-Cdd-Cd) L7: Cs-C=S(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd) L5: Cs-C=S(Cds-Cd)CtCt L6: Cs-C=S(Cds-Cds)CtCt L6: Cs-C=S(Cds-Cdd)CtCt L7: Cs-C=S(Cds-Cdd-S2d)CtCt L7: Cs-C=S(Cds-Cdd-Cd)CtCt L5: Cs-C=S(Cds-Cd)CtCs L6: Cs-C=S(Cds-Cds)CtCs L6: Cs-C=S(Cds-Cdd)CtCs L7: Cs-C=S(Cds-Cdd-S2d)CtCs L7: Cs-C=S(Cds-Cdd-Cd)CtCs L5: Cs-C=SCbCbCt L5: Cs-C=SCbCsCs L5: Cs-C=SCbCbCs L5: Cs-C=SCtCtCt L5: Cs-C=S(Cds-Cd)(Cds-Cd)Cs L6: Cs-C=S(Cds-Cdd)(Cds-Cdd)Cs L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cs L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cs L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cs L6: Cs-C=S(Cds-Cds)(Cds-Cds)Cs L6: Cs-C=S(Cds-Cdd)(Cds-Cds)Cs L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cds)Cs L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cds)Cs L5: Cs-C=SC=SCtCt L5: Cs-C=SCsCsCs L5: Cs-C=SCtCtCs L5: Cs-C=SC=SC=SCt L5: Cs-C=SC=SC=SCs L5: Cs-C=SC=SC=SC=S L5: Cs-C=SCtCsCs L5: Cs-C=SC=SC=SCb L5: Cs-C=SC=SC=S(Cds-Cd) L6: Cs-C=SC=SC=S(Cds-Cdd) L7: Cs-C=SC=SC=S(Cds-Cdd-Cd) L7: Cs-C=SC=SC=S(Cds-Cdd-S2d) L6: Cs-C=SC=SC=S(Cds-Cds) L5: Cs-C=S(Cds-Cd)(Cds-Cd)Ct L6: Cs-C=S(Cds-Cdd)(Cds-Cdd)Ct L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cdd-Cd)Ct L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-S2d)Ct L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-Cd)Ct L6: Cs-C=S(Cds-Cds)(Cds-Cds)Ct L6: Cs-C=S(Cds-Cdd)(Cds-Cds)Ct L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cds)Ct L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cds)Ct L5: Cs-C=SC=SCtCs L5: Cs-C=SC=SCbCb L5: Cs-C=S(Cds-Cd)CsCs L6: Cs-C=S(Cds-Cds)CsCs L6: Cs-C=S(Cds-Cdd)CsCs L7: Cs-C=S(Cds-Cdd-Cd)CsCs L7: Cs-C=S(Cds-Cdd-S2d)CsCs L5: Cs-C=SC=SCbCt L5: Cs-C=S(Cds-Cd)CbCt L6: Cs-C=S(Cds-Cds)CbCt L6: Cs-C=S(Cds-Cdd)CbCt L7: Cs-C=S(Cds-Cdd-S2d)CbCt L7: Cs-C=S(Cds-Cdd-Cd)CbCt L5: Cs-C=SC=SCsCs L5: Cs-C=S(Cds-Cd)CbCb L6: Cs-C=S(Cds-Cds)CbCb L6: Cs-C=S(Cds-Cdd)CbCb L7: Cs-C=S(Cds-Cdd-S2d)CbCb L7: Cs-C=S(Cds-Cdd-Cd)CbCb L5: Cs-C=SC=S(Cds-Cd)Ct L6: Cs-C=SC=S(Cds-Cds)Ct L6: Cs-C=SC=S(Cds-Cdd)Ct L7: Cs-C=SC=S(Cds-Cdd-Cd)Ct L7: Cs-C=SC=S(Cds-Cdd-S2d)Ct L5: Cs-C=SC=S(Cds-Cd)Cs L6: Cs-C=SC=S(Cds-Cds)Cs L6: Cs-C=SC=S(Cds-Cdd)Cs L7: Cs-C=SC=S(Cds-Cdd-S2d)Cs L7: Cs-C=SC=S(Cds-Cdd-Cd)Cs L5: Cs-C=SC=S(Cds-Cd)(Cds-Cd) L6: Cs-C=SC=S(Cds-Cdd)(Cds-Cds) L7: Cs-C=SC=S(Cds-Cdd-S2d)(Cds-Cds) L7: Cs-C=SC=S(Cds-Cdd-Cd)(Cds-Cds) L6: Cs-C=SC=S(Cds-Cdd)(Cds-Cdd) L7: Cs-C=SC=S(Cds-Cdd-S2d)(Cds-Cdd-S2d) L7: Cs-C=SC=S(Cds-Cdd-S2d)(Cds-Cdd-Cd) L7: Cs-C=SC=S(Cds-Cdd-Cd)(Cds-Cdd-Cd) L6: Cs-C=SC=S(Cds-Cds)(Cds-Cds) L5: Cs-C=SC=S(Cds-Cd)Cb L6: Cs-C=SC=S(Cds-Cdd)Cb L7: Cs-C=SC=S(Cds-Cdd-S2d)Cb L7: Cs-C=SC=S(Cds-Cdd-Cd)Cb L6: Cs-C=SC=S(Cds-Cds)Cb L5: Cs-C=SCbCtCs L5: Cs-C=S(Cds-Cd)CbCs L6: Cs-C=S(Cds-Cds)CbCs L6: Cs-C=S(Cds-Cdd)CbCs L7: Cs-C=S(Cds-Cdd-S2d)CbCs L7: Cs-C=S(Cds-Cdd-Cd)CbCs L5: Cs-C=S(Cds-Cd)(Cds-Cd)Cb L6: Cs-C=S(Cds-Cdd)(Cds-Cdd)Cb L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-Cd)Cb L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cdd-Cd)Cb L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cdd-S2d)Cb L6: Cs-C=S(Cds-Cds)(Cds-Cds)Cb L6: Cs-C=S(Cds-Cdd)(Cds-Cds)Cb L7: Cs-C=S(Cds-Cdd-S2d)(Cds-Cds)Cb L7: Cs-C=S(Cds-Cdd-Cd)(Cds-Cds)Cb L5: Cs-C=SCbCbCb L5: Cs-C=SC=SCbCs L4: Cs-CCCOs L5: Cs-CsCsCsOs L5: Cs-CdsCsCsOs L6: Cs-(Cds-O2d)CsCsOs L6: Cs-(Cds-Cd)CsCsOs L7: Cs-(Cds-Cds)CsCsOs L7: Cs-(Cds-Cdd)CsCsOs L8: Cs-(Cds-Cdd-O2d)CsCsOs L8: Cs-(Cds-Cdd-Cd)CsCsOs L5: Cs-OsCtCsCs L5: Cs-CbCsCsOs L5: Cs-CdsCdsCsOs L6: Cs-(Cds-O2d)(Cds-O2d)CsOs L6: Cs-(Cds-O2d)(Cds-Cd)CsOs L7: Cs-(Cds-O2d)(Cds-Cds)CsOs L7: Cs-(Cds-O2d)(Cds-Cdd)CsOs L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CsOs L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CsOs L6: Cs-(Cds-Cd)(Cds-Cd)CsOs L7: Cs-(Cds-Cds)(Cds-Cds)CsOs L7: Cs-(Cds-Cdd)(Cds-Cds)CsOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CsOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CsOs L7: Cs-(Cds-Cdd)(Cds-Cdd)CsOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CsOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CsOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CsOs L5: Cs-CtCdsCsOs L6: Cs-(Cds-O2d)CtCsOs L6: Cs-(Cds-Cd)CtCsOs L7: Cs-(Cds-Cds)CtCsOs L7: Cs-(Cds-Cdd)CtCsOs L8: Cs-(Cds-Cdd-O2d)CtCsOs L8: Cs-(Cds-Cdd-Cd)CtCsOs L5: Cs-CbCdsCsOs L6: Cs-(Cds-O2d)CbCsOs L6: Cs-(Cds-Cd)CbCsOs L7: Cs-(Cds-Cds)CbCsOs L7: Cs-(Cds-Cdd)CbCsOs L8: Cs-(Cds-Cdd-O2d)CbCsOs L8: Cs-(Cds-Cdd-Cd)CbCsOs L5: Cs-CtCtCsOs L5: Cs-CbCtCsOs L5: Cs-CbCbCsOs L5: Cs-CdsCdsCdsOs L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-O2d)O2s L6: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cd)O2s L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cds)O2s L7: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd)O2s L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-O2d)O2s L8: Cs-(Cds-O2d)(Cds-O2d)(Cds-Cdd-Cd)O2s L6: Cs-(Cds-O2d)(Cds-Cd)(Cds-Cd)O2s L7: Cs-(Cds-O2d)(Cds-Cds)(Cds-Cds)O2s L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cds)O2s L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cds)O2s L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cds)O2s L7: Cs-(Cds-O2d)(Cds-Cdd)(Cds-Cdd)O2s L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)O2s L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)O2s L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)O2s L6: Cs-(Cds-Cd)(Cds-Cd)(Cds-Cd)O2s L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cds)O2s L7: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd)O2s L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-O2d)O2s L8: Cs-(Cds-Cds)(Cds-Cds)(Cds-Cdd-Cd)O2s L7: Cs-(Cds-Cds)(Cds-Cdd)(Cds-Cdd)O2s L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-O2d)O2s L8: Cs-(Cds-Cds)(Cds-Cdd-O2d)(Cds-Cdd-Cd)O2s L8: Cs-(Cds-Cds)(Cds-Cdd-Cd)(Cds-Cdd-Cd)O2s L7: Cs-(Cds-Cdd)(Cds-Cdd)(Cds-Cdd)O2s L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-O2d)O2s L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)(Cds-Cdd-Cd)O2s L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)(Cds-Cdd-Cd)O2s L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)(Cds-Cdd-Cd)O2s L5: Cs-CtCdsCdsOs L6: Cs-(Cds-O2d)(Cds-O2d)CtOs L6: Cs-(Cds-O2d)(Cds-Cd)CtOs L7: Cs-(Cds-O2d)(Cds-Cds)CtOs L7: Cs-(Cds-O2d)(Cds-Cdd)CtOs L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CtOs L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CtOs L6: Cs-(Cds-Cd)(Cds-Cd)CtOs L7: Cs-(Cds-Cds)(Cds-Cds)CtOs L7: Cs-(Cds-Cdd)(Cds-Cds)CtOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CtOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CtOs L7: Cs-(Cds-Cdd)(Cds-Cdd)CtOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CtOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CtOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CtOs L5: Cs-CbCdsCdsOs L6: Cs-(Cds-O2d)(Cds-O2d)CbOs L6: Cs-(Cds-O2d)(Cds-Cd)CbOs L7: Cs-(Cds-O2d)(Cds-Cds)CbOs L7: Cs-(Cds-O2d)(Cds-Cdd)CbOs L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)CbOs L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)CbOs L6: Cs-(Cds-Cd)(Cds-Cd)CbOs L7: Cs-(Cds-Cds)(Cds-Cds)CbOs L7: Cs-(Cds-Cdd)(Cds-Cds)CbOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)CbOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)CbOs L7: Cs-(Cds-Cdd)(Cds-Cdd)CbOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)CbOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)CbOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)CbOs L5: Cs-CtCtCdsOs L6: Cs-(Cds-O2d)CtCtOs L6: Cs-(Cds-Cd)CtCtOs L7: Cs-(Cds-Cds)CtCtOs L7: Cs-(Cds-Cdd)CtCtOs L8: Cs-(Cds-Cdd-O2d)CtCtOs L8: Cs-(Cds-Cdd-Cd)CtCtOs L5: Cs-CbCtCdsOs L6: Cs-(Cds-O2d)CbCtOs L6: Cs-(Cds-Cd)CbCtOs L7: Cs-(Cds-Cds)CbCtOs L7: Cs-(Cds-Cdd)CbCtOs L8: Cs-(Cds-Cdd-O2d)CbCtOs L8: Cs-(Cds-Cdd-Cd)CbCtOs L5: Cs-CbCbCdsOs L6: Cs-(Cds-O2d)CbCbOs L6: Cs-(Cds-Cd)CbCbOs L7: Cs-(Cds-Cds)CbCbOs L7: Cs-(Cds-Cdd)CbCbOs L8: Cs-(Cds-Cdd-O2d)CbCbOs L8: Cs-(Cds-Cdd-Cd)CbCbOs L5: Cs-CtCtCtOs L5: Cs-CbCtCtOs L5: Cs-CbCbCtOs L5: Cs-CbCbCbOs L4: Cs-CCOsOs L5: Cs-CsCsOsOs L5: Cs-CdsCsOsOs L6: Cs-(Cds-O2d)CsOsOs L6: Cs-(Cds-Cd)CsOsOs L7: Cs-(Cds-Cds)CsOsOs L7: Cs-(Cds-Cdd)CsOsOs L8: Cs-(Cds-Cdd-O2d)CsOsOs L8: Cs-(Cds-Cdd-Cd)CsOsOs L5: Cs-CdsCdsOsOs L6: Cs-(Cds-O2d)(Cds-O2d)OsOs L6: Cs-(Cds-O2d)(Cds-Cd)OsOs L7: Cs-(Cds-O2d)(Cds-Cds)OsOs L7: Cs-(Cds-O2d)(Cds-Cdd)OsOs L8: Cs-(Cds-O2d)(Cds-Cdd-O2d)OsOs L8: Cs-(Cds-O2d)(Cds-Cdd-Cd)OsOs L6: Cs-(Cds-Cd)(Cds-Cd)OsOs L7: Cs-(Cds-Cds)(Cds-Cds)OsOs L7: Cs-(Cds-Cdd)(Cds-Cds)OsOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cds)OsOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cds)OsOs L7: Cs-(Cds-Cdd)(Cds-Cdd)OsOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-O2d)OsOs L8: Cs-(Cds-Cdd-O2d)(Cds-Cdd-Cd)OsOs L8: Cs-(Cds-Cdd-Cd)(Cds-Cdd-Cd)OsOs L5: Cs-CtCsOsOs L5: Cs-CtCdsOsOs L6: Cs-(Cds-O2d)CtOsOs L6: Cs-(Cds-Cd)CtOsOs L7: Cs-(Cds-Cds)CtOsOs L7: Cs-(Cds-Cdd)CtOsOs L8: Cs-(Cds-Cdd-O2d)CtOsOs L8: Cs-(Cds-Cdd-Cd)CtOsOs L5: Cs-CtCtOsOs L5: Cs-CbCsOsOs L5: Cs-CbCdsOsOs L6: Cs-(Cds-O2d)CbOsOs L6: Cs-(Cds-Cd)CbOsOs L7: Cs-(Cds-Cds)CbOsOs L7: Cs-(Cds-Cdd)CbOsOs L8: Cs-(Cds-Cdd-O2d)CbOsOs L8: Cs-(Cds-Cdd-Cd)CbOsOs L5: Cs-CbCtOsOs L5: Cs-CbCbOsOs L4: Cs-COsOsOs L5: Cs-CsOsOsOs L5: Cs-CdsOsOsOs L6: Cs-(Cds-O2d)OsOsOs L6: Cs-(Cds-Cd)OsOsOs L7: Cs-(Cds-Cds)OsOsOs L7: Cs-(Cds-Cdd)OsOsOs L8: Cs-(Cds-Cdd-O2d)OsOsOs L8: Cs-(Cds-Cdd-Cd)OsOsOs L5: Cs-CtOsOsOs L5: Cs-CbOsOsOs L4: Cs-OsOsOsOs L4: Cs-COsOsH L5: Cs-CsOsOsH L5: Cs-CdsOsOsH L6: Cs-(Cds-O2d)OsOsH L6: Cs-(Cds-Cd)OsOsH L7: Cs-(Cds-Cds)OsOsH L7: Cs-(Cds-Cdd)OsOsH L8: Cs-(Cds-Cdd-O2d)OsOsH L8: Cs-(Cds-Cdd-Cd)OsOsH L5: Cs-CtOsOsH L5: Cs-CbOsOsH L4: Cs-COsSsH L5: Cs-CsOsSsH L5: Cs-CdsOsSsH L5: Cs-CtOsSsH L5: Cs-CbOsSsH L4: Cs-CCOsSs L5: Cs-CsCsOsSs L4: Cs-COsOsSs L5: Cs-CsOsOsSs L4: Cs-CCOsH L5: Cs-CsCsOsH L5: Cs-CdsCsOsH L6: Cs-(Cds-O2d)CsOsH L6:
= True # # Define Chords for pausing TerminalVi # def do_say_more(self): # Vim ⌃G """Reply once with more verbose details""" count = self.get_vi_arg1_int() editor = self.editor if editor.finding_line: editor.finding_highlights = True self.say_more(count) # Vim ⌃G Quirk doesn't turn Search highlights back on, Vi Py ⌃G does # TODO: Think more deeply into Vi Py ⌃C Esc vs Vi Py ⌃G def say_more(self, count=0): """Reply once with some details often left unmentioned""" editor = self.editor showing_lag = editor.showing_lag held_vi_file = self.held_vi_file write_path = held_vi_file.write_path # Mention the full Path only if asked homepath = os_path_homepath(write_path) nickname = held_vi_file.pick_file_nickname() if held_vi_file else None enough_path = homepath if (count > 1) else nickname # ⌃G2⌃G Egg # Mention the Search in progress only if it's highlit if editor.finding_highlights: editor.reply_with_finding() # Mention injecting Lag str_lag = None if showing_lag is not None: str_lag = "{}s lag".format(showing_lag) # Collect the Mentions joins = list() joins.append(repr(enough_path)) if str_lag: joins.append(str_lag) if held_vi_file.touches: joins.append("{} bytes touched".format(held_vi_file.touches)) joins.append(sys_argv_0_title_py()) # such as "Vi Py", "Em Py", etc # Join the Mentions into one Status Row more_status = " ".join(joins) editor.editor_print(more_status) # such as "'bin/vi.py' less lag" # TODO: explore Vim Quirk of scrolling and pausing to make room for wide pathnames def do_vi_c0_control_etx(self): # Vim ⌃C # Vi Py Init """Cancel Prefix, or close Replace/ Insert, or suggest ⇧Z⇧Q to quit Vi Py""" self.vi_keyboard_quit("Cancelled") # Vim ⌃C Quirk rings a Bell for each extra ⌃C, Vi Py doesn't def do_vi_c0_control_esc(self): # Vim Esc """Cancel Prefix, or close Replace/ Insert, or suggest ⇧Z⇧Z to quit Vi Py""" self.vi_keyboard_quit("Escaped") # Vim Esc Quirk slowly rings a Bell for each extra Esc, Vi Py doesn't def vi_keyboard_quit(self, verbed): """Cancel or escape some one thing that is most going on""" count = self.get_vi_arg1_int(default=None) editor = self.editor skin = editor.skin keyboard = skin.keyboard if count is not None: # 123⌃C Egg, 123Esc Egg, etc self.vi_print("{} Repeat Count".format(verbed)) elif keyboard.intake_bypass: # ⌃O⌃C Egg, ⌃O⌃Esc Egg self.vi_print("{} ⌃O Bypass".format(verbed)) elif editor.intake_beyond == "inserting": # ⇧A ⇧I ⇧O A I O then Esc ⌃C self.take_vi_views() rep_count = editor.format_touch_count() self.vi_print("{} after {} inserted".format(verbed, rep_count)) skin.doing_traceback = skin.traceback # ⌃C of A⌃OZ⇧Q⌃C⇧Z⇧Q Egg # FIXME report chars inserted this time, not since last save elif editor.intake_beyond == "replacing": # ⇧R then Esc or ⌃C skin.doing_traceback = skin.traceback # ⌃C of R⌃OZ⇧Q⌃C⇧Z⇧Q Egg self.take_vi_views() rep_count = editor.format_touch_count() self.vi_print("{} after {} replaced".format(verbed, rep_count)) # FIXME report chars replaced this time, not since last save elif editor.finding_highlights: # *⌃C Egg, *Esc Egg, etc self.vi_print("{} Search".format(verbed)) editor.finding_highlights = None # Vim ⌃C, Esc Quirks leave highlights up elif verbed == "Escaped": self.suggest_quit_vi("Press ⇧Z⇧Z to save changes") # Esc Egg else: self.suggest_quit_vi("Press ⇧Z⇧Q to lose changes") # ⌃C Egg def suggest_quit_vi(self, how): """Print how to Quit Em Py or Vi Py, etc""" held_vi_file = self.held_vi_file nickname = held_vi_file.pick_file_nickname() if held_vi_file else None title_py = sys_argv_0_title_py() # "Vi Py", "Em Py", etc version = module_file_version_zero() self.vi_print( "{!r} {} and quit {} {}".format(nickname, how, title_py, version) ) # such as '/dev/stdout' Press ⇧Z⇧Q to lose changes and quit Vim Py 0.1.23 def do_continue_vi(self): # Vim ⇧Q V I Return # not Ex mode """Accept Q v i Return, without ringing the Terminal bell""" editor = self.editor self.check_vi_count() # raise NotImplementedError: Repeat Count # Offer to play a game self.vi_ask("Would you like to play a game? (y/n)") chords = editor.take_one_chord_cluster() editor.skin.nudge.suffix = chords # Begin game, or don't title_py = sys_argv_0_title_py() # "Vi Py", "Em Py", etc if chords in (b"y", b"Y"): self.vi_print("Ok, now try to quit {}".format(title_py)) # Qvi⌃My Egg else: self.vi_print("Ok") # Qvi⌃Mn Egg def vi_ask(self, *args): """Ask a question, but don't wait for its answer""" editor = self.editor message = " ".join(str(_) for _ in args) message += " " # place the cursor after a Space after the Message self.vi_print(message) # 'def vi_ask' calling vi_reply = self.format_vi_status(self.editor.skin.reply) ex = TerminalEx(editor, vi_reply=vi_reply) ex.flush_ex_status() def do_resume_vi(self): # Vim :vi\r # Vi Py :em\r """Set up XTerm Alt Screen & Keyboard, till Painter Exit""" editor = self.editor self.check_vi_count() # raise NotImplementedError: Repeat Count editor.do_resume_editor() # Vi Py :em without arg switches into running like Em Py # Vim :em Quirk defines :em only with args def do_vi_suspend_frame(self): # Vim ⌃Zfg """Don't save changes now, do stop Vi Py process, till like Bash 'fg'""" editor = self.editor self.check_vi_count() # raise NotImplementedError: Repeat Count reply = TerminalReplyOut(self.last_formatted_reply) reply.bell = False editor.do_suspend_frame() if self.seeking_column is not None: self.keep_up_vi_column_seek() editor.skin.reply = reply def do_cut_back_after_take_inserts(self): # Vim C """Call to cut from here to there, after next move, and then take inserts""" after_cut = self.after_cut editor = self.editor self.check_vi_count() # TODO: multiply Repeat Count into movement # Escape recursion of Vim C inside Vim C if self.editor.after_func and (after_cut == "changing"): self.after_cut = None self.after_pin = None assert self.after_did is None self.editor.after_func = None self.do_slip_first_chop_take_inserts() # Vim C C return # Call for 'def do_cut_back' and 'def take_inserts' after next move self.after_cut = "changing" self.after_pin = editor.spot_pin() self.after_did = None self.editor.after_func = self.do_cut_back self.take_one_bypass() self.vi_print("Move the cursor ahead past end, or back to start, of cut") # FIXME: Code up lots more of Vim C def do_cut_back_after(self): # Vim D """Call to cut from here to there, after next move""" after_cut = self.after_cut editor = self.editor self.check_vi_count() # TODO: multiply Repeat Count into movement # Escape recursion of Vim D inside Vim D if self.editor.after_func and (after_cut == "deleting"): self.after_cut = None self.after_pin = None self.after_did = self.do_chop_down # Vim D D self.editor.after_func = None self.after_did() return # Call for 'def do_cut_back' after next move self.after_cut = "deleting" self.after_pin = editor.spot_pin() self.after_did = None self.editor.after_func = self.do_cut_back self.take_one_bypass() self.vi_print("Move the cursor ahead past end, or back to start, of cut") # FIXME: For an empty File, Egg DG wrongly says "Cut back 1 lines" # FIXME: Code up lots more of Vim D # FIXME: Vim quirk deletes an extra Char at any of DFx D⇧Fx CFx C⇧Fx # FIXME: Vim quirk deletes an extra Line at D⇧G # FIXME: Vim quirk differs from Vi Py at ⇧MD⇧H def do_cut_back(self): """Cut from there to here""" after_cut = self.after_cut after_pin = self.after_pin # TODO: work on the pin/ after_pin/ head/ tail names editor = self.editor keyboard = editor.skin.keyboard pin = editor.spot_pin() # Stop calling for work after move self.after_cut = None self.after_pin = None self.after_did = None # Cut the Selection, after leaping to its Upper Left self.cut_across(here_pin=pin, there_pin=after_pin) # Also start taking Inserts, if changing and not just deleting self.vi_print() # Cancel the Status from Movement if after_cut != "changing": rep_count = editor.format_touch_count() self.vi_print("Cut back {}".format(rep_count)) else: if keyboard.intake_bypass: # TODO: make this less ugly keyboard.with_intake_bypass = "" keyboard.intake_beyond = keyboard.intake_bypass keyboard.intake_bypass = "" self.take_vi_inserts() def cut_across(self, here_pin, there_pin): """Cut the Selection, after leaping to its Upper Left""" editor = self.editor columns = editor.count_columns_in_row() # Sort the two Pins, so as to speak from Upper Left to Lower Right (here, there) = (here_pin, there_pin) if there_pin < here_pin: (here, there) = (there_pin, here_pin) # Leap to the upper left of the Selection editor.row = here.row editor.column = here.column # Option to just cut Chars within this Line if here.row == there.row: count = there.column - here.column touches = editor.delete_some_chars(count) self.held_vi_file.touches += touches else: # 1 ) Chop this Line touches = editor.delete_some_chars(count=columns) self.held_vi_file.touches += touches # 2 ) Delete the Lines in between count = there.row - here.row self.chop_down(count) # 3 ) Delete the Leftmost Chars of the last Line involved editor.row += 1 touches = editor.delete_some_chars(there.column) self.held_vi_file.touches += touches editor.row -= 1 def do_replay_cut(self): # Vim . """Replay input Keyboard Chords recorded when last cutting Chars""" self.check_vi_index(self.after_did) self.after_did() # FIXME: Code up lots more of Vim . def do_record_over_choice(self): # Vim Qx """Record input Keyboard Chords till next Q, into
<reponame>tobiasbp/workbook_exporter #!/usr/bin/env python3 import argparse from datetime import datetime, timedelta import logging import os import random import time from prometheus_client import start_http_server, Summary from prometheus_client.core import GaugeMetricFamily, HistogramMetricFamily, REGISTRY import workbook_api import yaml # The string to use when converting times in Workbook # Example: 2020-08-17T09:02:23.677Z TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ" # The field with the reported balance metric FINANCE_ACCOUNT_BALANCE_FIELD = 'AmountBeginning' # Create a metric to track time spent and requests made. #REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') # Fields to use for summing hours/week EMPLOYEE_HOURS_CAPACITY_FIELDS = [ "HoursNormalMonday", "HoursNormalTuesday", "HoursNormalWednesday", "HoursNormalThursday", "HoursNormalFriday", "HoursNormalSaturday", "HoursNormalSunday" ] # Decorate function with metric. #@<EMAIL>() #def process_request(t): # """A dummy function that takes some time.""" # time.sleep(t) def parse_date(wb_time): ''' Convert a Workbook time string to a datetime object ''' return datetime.strptime(wb_time, TIME_FORMAT) def data_to_histogram(observations, buckets): '''Returns a list of buckets with values and the sum of the observations Keyword arguments: observations (List): A list of numbers buckets (List): A list of bucket values ''' # Convert buckets to a dict with bucket values as keys buckets = {v:0 for v in buckets} # Add key "infinite" if missing if not float("inf") in buckets.keys(): buckets[float("inf")] = 0 # Sort observations in to the bucket dict for o in observations: for key in sorted(buckets.keys()): if o <= key: buckets[key] += 1 # List holding lists of [bucket_name, value] buckets_list = [] # Add the bucket data to the buckets_list for key in sorted(buckets.keys()): # Calculate bucket name if key < float("inf"): bucket_name = str(key) else: bucket_name = "+Inf" # Append bucket data [name, value] buckets_list.append([bucket_name, buckets[key]]) return(buckets_list, sum(observations)) def build_histogram(observations, buckets, name, desc, label_names, label_values): # Histogram billable (Total) bucket_values, buckets_sum = data_to_histogram( observations, buckets ) # Job age histogram billable h = HistogramMetricFamily(name, desc, labels=label_names) # Add data h.add_metric(label_values, bucket_values, buckets_sum) # Return histogram with data return h class WorkbookCollector(object): def __init__(self, wb_url, wb_user, wb_pass): # Workbook API object self.wb = workbook_api.WorkbookAPI(wb_url, wb_user, wb_pass) def collect(self): logging.info("Getting data from Workbook.") scrape_start_time = datetime.now() # Metric for status on getting data from WB workbook_up = GaugeMetricFamily( 'workbook_up', 'Is data beeing pulled from Workbook') # Assume no problems with getting data from Workbook wb_error = False # How many requests were made to workbook? no_of_wb_requests = 0 # Get all the data from WB try: # A dictionary mapping id to ISO name self.currencies = {c['Id']:c['Iso4127'] for c in self.wb.get_currencies()} no_of_wb_requests += 1 # A dictionary mapping id to company name companies = {c['Id']:c for c in self.wb.get_companies(active=True)} no_of_wb_requests += 1 # Delete any companies not in list in config file if COMPANIES_TO_GET: companies_to_delete = [] # Register company IDs to delete for c_id in companies.keys(): if c_id not in COMPANIES_TO_GET: companies_to_delete.append(c_id) # Delete the company IDs from the companies dict for c_id in companies_to_delete: companies.pop(c_id, None) # Warn if companies in COMPANIES_TO_GET are not found in WB only_in_config = set(COMPANIES_TO_GET) - set(companies.keys()) if only_in_config: logging.warning(("Company IDs {} not in Workbook. Likely a wrong" + \ " ID in config 'companies'.").format(only_in_config)) # Add currency_id to companies for c_id, c_data in companies.items(): # Get full company info from WB c_info = self.wb.get_company(CompanyId=c_id) no_of_wb_requests += 1 # Add currency to company dict c_data['CurrencyId'] = c_info['CurrencyID'] # A dictionary mapping IDs to employees employees = {} # Get employees for all companies for c_id in companies.keys(): for e in self.wb.get_employees(Active=True, CompanyId=c_id): employees[e['Id']] = e no_of_wb_requests += 1 # Capacity profiles (Hours pr/week for employees) # Employee ID is key capacity_profiles = {} for e in employees.values(): # We should only see an employee ID once assert not e['Id'] in capacity_profiles.keys() # Get all profiles for employee try: profiles = self.wb.get_capacity_profiles(e['Id']) no_of_wb_requests += 1 except Exception as e: logging.error("Could not get capacity profiles for employee '{}' with error: {}" .format(e['Id'], e)) # Abort this iteration continue logging.debug("No of capacity profiles for user '{}': {}" .format(e['EmployeeName'], len(profiles))) # Pick 1st profile in list p = profiles[-1] # Is there a newer profile in in list? for x in profiles: # Abort if profile is in the future if parse_date(x['ValidFrom']) > datetime.now(): continue # Use this profile, if valid from is more recent than current if parse_date(x['ValidFrom']) > parse_date(p['ValidFrom']): p = x logging.debug("Using capacity profile valid from {} for user '{}'" .format(p['ValidFrom'], e['EmployeeName'])) # Add calculated sum of work hours pr. week to profile p['hours_week'] = 0 for key in p.keys(): if key in EMPLOYEE_HOURS_CAPACITY_FIELDS: p['hours_week'] += p[key] # Add the profile to the profiles dict capacity_profiles[e['Id']] = p # A dictionary mapping IDs to departments departments = {d['Id']:d for d in self.wb.get_departments()} no_of_wb_requests += 1 # A dictionary mapping Job IDs to jobs jobs = {} # Get jobs for all companies for c_id in companies.keys(): for j in self.wb.get_jobs(Status=ACTIVE_JOBS,CompanyId=c_id): jobs[j['Id']] = j no_of_wb_requests += 1 # A dictionary mapping IDs to creditors creditors = {c['Id']:c for c in self.wb.get_creditors()} no_of_wb_requests += 1 # Employee prices prices = self.wb.get_employee_prices_hour(ActiveEmployees=True) no_of_wb_requests += 1 # Build a dictionary of current prices with employee IDs as key prices_dict = {} for p in prices: # Attempt to get entry for employee e = prices_dict.get(p['EmployeeId'], None) # Add price to dict if employee is not represented if not e: prices_dict[p['EmployeeId']] = p # Update price? else: # Date objects from dates current_date = parse_date(e['ValidFrom']) new_date = parse_date(p['ValidFrom']) # Replace price entry, if price is newer # than existing, and not in the future if new_date > current_date and new_date <= datetime.now(): prices_dict[p['EmployeeId']] = p # Get a list of finance accounts accounts = self.wb.get_finance_accounts( TypeIds=FINANCE_ACCOUNT_TYPES, Companies=companies.keys()) no_of_wb_requests += 1 # Add balance to accounts for a in accounts: balance_list = self.wb.get_finance_account_balance( CompanyId=a['CompanyId'], AccountId=a['Id'], ) no_of_wb_requests += 1 # Makes sure we have data. Some typeIds do not. if len(balance_list) > 0: # We want the latest balance entry. # Assume latest entry has highest ID # A dict with Ids as key b = {b['Id']:b for b in balance_list} # Get highest Id max_id = max(b.keys()) # Add field Balance to account a['balance'] = b[max_id].get( FINANCE_ACCOUNT_BALANCE_FIELD, 0) except Exception as e: logging.error("Could not get data from Workbook: {}".format(e)) # Report no data from Workbook workbook_up.add_metric([], 0) yield workbook_up return else: logging.info("Done getting data from Workbook") # FINANCE ACCOUNTS for a in accounts: # Some account types (2?) has no balance # It's only used cosmetically in Workbook if a.get('balance'): # Get the currency to use currency_id = companies[a['CompanyId']]['CurrencyId'] g = GaugeMetricFamily( 'workbook_finance_account_balance', 'Balance of finance account', labels=[ 'company_id', 'currency', 'account_id', 'account_description', 'account_number' ] ) g.add_metric( [str(a['CompanyId']), str(self.currencies[currency_id]), str(a['Id']), str(a['AccountDescription']), str(a['AccountNumber'])], a['balance'] ) yield g # Buckets for histograms # Add Buckets to config days_employed_buckets = [3*30, 5*30, 2*12*30+9*30, 5*12*30+8*30, 8*12*30+7*30] profit_buckets = [0.2, 0.4, 0.6, 0.8] hours_sale_buckets = [500, 1000, 1500, 2000] hours_cost_buckets = [250, 500, 750, 1000] # FIXME: Credit/Debit buckets should probably be currency dependant credit_buckets = [-50000, -25000, -10000, 0, 10000, 25000, 50000] debit_buckets = [-50000, -25000, -10000, 0, 10000, 25000, 50000, 100000] # FIXME: Add list of company ids to look for # Days to look in to the past for timeentries time_entry_days = 7 # TIME ENTRIES # # Time entries don't have ClientIds # FIxme: (We could look them up in jobs?) # Time period to get time entries for (Time where work was done) start_date = (datetime.today() - timedelta(days=time_entry_days)).isoformat() end_date = datetime.today().isoformat() # Top key is company_id:department_id time_entries_data = {c_id:{} for c_id in companies.keys()} # Add departments for c_id, c_data in time_entries_data.items(): for d_id, d_data in departments.items(): if d_data['CompanyId'] == c_id: c_data[d_id] = { 'billable': 0, 'total': 0, 'revenue': 0, 'resource_ids': set(), 'job_ids': set(), 'customer_ids': set() } # TIME ENTRIES # try: time_entries = self.wb.get_time_entries( Start=start_date, End=end_date,HasTimeRegistration=True) no_of_wb_requests += 1 except Exception as e: print("Could not get WB time entries with error: {}".format(e)) wb_error = True else: # FIXME: Number of clients worked on for e in time_entries: # Sometimes a resource is no longer an employee if not employees.get(e['ResourceId']): #print("No
sweep, unless using external reference. elif not using_external_references: logger.debug("First sweep will be used as reference for reindexing") first = self._sweep_handler.get_epochs()[0] si = self._sweep_handler.get_sweep_information(first) reference_expt = si.get_experiments() reference_refl = si.get_reflections() # Now reindex to be consistent with first dataset - run reindex on each # dataset with reference (unless did brehm diederichs and didn't supply # a reference file) if reference_refl and reference_expt: exp = load.experiment_list(reference_expt) reference_cell = exp[0].crystal.get_unit_cell().parameters() # ---------- REINDEX TO CORRECT (REFERENCE) SETTING ---------- logger.info("Reindexing all datasets to common reference") if using_external_references: epochs = self._sweep_handler.get_epochs() else: epochs = self._sweep_handler.get_epochs()[1:] for epoch in epochs: # if we are working with unified UB matrix then this should not # be a problem here (note, *if*; *should*) # what about e.g. alternative P1 settings? # see JIRA MXSW-904 if PhilIndex.params.xia2.settings.unify_setting: continue reindexer = DialsReindex() reindexer.set_working_directory(self.get_working_directory()) auto_logfiler(reindexer) si = self._sweep_handler.get_sweep_information(epoch) reindexer.set_reference_filename(reference_expt) reindexer.set_reference_reflections(reference_refl) reindexer.set_indexed_filename(si.get_reflections()) reindexer.set_experiments_filename(si.get_experiments()) reindexer.run() # At this point, CCP4ScalerA would reset in integrator so that # the integrater calls reindex, no need to do that here as # have access to the files and will never need to reintegrate. si.set_reflections(reindexer.get_reindexed_reflections_filename()) si.set_experiments(reindexer.get_reindexed_experiments_filename()) # FIXME how to get some indication of the reindexing used? exp = load.experiment_list( reindexer.get_reindexed_experiments_filename() ) cell = exp[0].crystal.get_unit_cell().parameters() # Note - no lattice check as this will already be caught by reindex logger.debug("Cell: %.2f %.2f %.2f %.2f %.2f %.2f" % cell) logger.debug("Ref: %.2f %.2f %.2f %.2f %.2f %.2f" % reference_cell) for j in range(6): if ( math.fabs((cell[j] - reference_cell[j]) / reference_cell[j]) > 0.1 ): raise RuntimeError( "unit cell parameters differ in %s and %s" % (reference_expt, si.get_reflections()) ) # Now make sure all batches ok before finish preparing # This should be made safer, currently after dials.scale there is no # concept of 'batch', dials.export uses the calculate_batch_offsets # to assign batches, giving the same result as below. experiments_to_rebatch = [] for epoch in self._sweep_handler.get_epochs(): si = self._sweep_handler.get_sweep_information(epoch) experiment = si.get_experiments() experiments_to_rebatch.append(load.experiment_list(experiment)[0]) offsets = calculate_batch_offsets(experiments_to_rebatch) for i, epoch in enumerate(self._sweep_handler.get_epochs()): si = self._sweep_handler.get_sweep_information(epoch) r = si.get_batch_range() si.set_batch_offset(offsets[i]) si.set_batches([r[0] + offsets[i], r[1] + offsets[i]]) def _scale(self): """Perform all of the operations required to deliver the scaled data.""" self.sweep_infos = [ self._sweep_handler.get_sweep_information(e) for e in self._sweep_handler.get_epochs() ] ### Set the parameters and datafiles for dials.scale self._scaler = DialsScale() self._scaler = self._updated_dials_scaler() if self._scaled_experiments and self._scaled_reflections: # going to continue-where-left-off self._scaler.add_experiments_json(self._scaled_experiments) self._scaler.add_reflections_file(self._scaled_reflections) else: for si in self.sweep_infos: self._scaler.add_experiments_json(si.get_experiments()) self._scaler.add_reflections_file(si.get_reflections()) # ensure we start with a clean slate in case we pre-scaled the data # before running dials.symmetry self._scaler.set_overwrite_existing_models(True) self._scalr_scaled_reflection_files = {"mtz_unmerged": {}, "mtz": {}} ### Set the resolution limit if applicable user_resolution_limits = {} highest_resolution = 100.0 for si in self.sweep_infos: dname = si.get_project_info()[2] sname = si.get_sweep_name() intgr = si.get_integrater() if intgr.get_integrater_user_resolution(): # record user resolution here but don't use it until later - why? dmin = intgr.get_integrater_high_resolution() if (dname, sname) not in user_resolution_limits: user_resolution_limits[(dname, sname)] = dmin elif dmin < user_resolution_limits[(dname, sname)]: user_resolution_limits[(dname, sname)] = dmin if (dname, sname) in self._scalr_resolution_limits: d_min, _ = self._scalr_resolution_limits[(dname, sname)] if d_min < highest_resolution: highest_resolution = d_min if highest_resolution < 99.9: self._scaler.set_resolution(d_min=highest_resolution) ### Setup final job details and run scale self._scaler.set_working_directory(self.get_working_directory()) auto_logfiler(self._scaler) FileHandler.record_log_file( f"{self._scalr_pname} {self._scalr_xname} SCALE", self._scaler.get_log_file(), ) self._scaler.scale() FileHandler.record_html_file( f"{self._scalr_pname} {self._scalr_xname} SCALE", self._scaler.get_html(), ) self._scaled_experiments = self._scaler.get_scaled_experiments() self._scaled_reflections = self._scaler.get_scaled_reflections() # make it so that only scaled.expt and scaled.refl are # the files that dials.scale knows about, so that if scale is called again, # scaling resumes from where it left off. self._scaler.clear_datafiles() ### Calculate the resolution limit and set done False if applicable highest_suggested_resolution = self.assess_resolution_limits( hklin=None, user_resolution_limits=user_resolution_limits, reflections=self._scaled_reflections, experiments=self._scaled_experiments, ) if not self.get_scaler_done(): # reset for when resolution limit applied logger.debug("Returning as scaling not finished...") return ### Want to do space group check after scaling. So run dials.symmetry ### with absences only before exporting merged and unmerged files ### again in correct s.g. if ( not PhilIndex.params.xia2.settings.small_molecule and not self._scalr_input_spacegroup ): logger.notice(banner("Systematic absences check")) symmetry = DialsSymmetry() symmetry.set_experiments_filename(self._scaled_experiments) symmetry.set_reflections_filename(self._scaled_reflections) symmetry.set_working_directory(self.get_working_directory()) symmetry.set_mode_absences_only() auto_logfiler(symmetry) symmetry.decide_pointgroup() # bad name - actually running absences here self._scaled_experiments = symmetry.get_output_experiments_filename() sg = load.experiment_list(self._scaled_experiments)[ 0 ].crystal.get_space_group() logger.info("Most likely space group: %s", sg.info()) self._scalr_likely_spacegroups = [sg.type().lookup_symbol()] FileHandler.record_more_data_file( f"{self._scalr_pname} {self._scalr_xname} scaled", self._scaled_experiments, ) FileHandler.record_more_data_file( f"{self._scalr_pname} {self._scalr_xname} scaled", self._scaled_reflections, ) # Run twotheta refine self._update_scaled_unit_cell_from_scaled_data() ### Now export and merge so that mtz files in correct space group. ### For MAD case, need to generate individual merged and unmerged mtz ### files. First split experiments on wavelength, then run dials.export ### and dials.merge on each # Find number of dnames (i.e. number of wavelengths) dnames_set = OrderedSet() experiments = load.experiment_list(self._scaled_experiments) wavelengths = flex.double( match_wavelengths(experiments) ) # in experiments order for si in self.sweep_infos: dnames_set.add( si.get_project_info()[2] ) # sweep info in same order as experiments assert len(wavelengths) == len(dnames_set) scaled_unmerged_mtz_path = os.path.join( self.get_working_directory(), f"{self._scalr_pname}_{self._scalr_xname}_scaled_unmerged.mtz", ) if len(dnames_set) > 1: self._scalr_scaled_refl_files = {} logger.debug("Splitting experiments by wavelength") # first split by wavelength splitter = SplitExperiments() splitter.add_experiments(self._scaled_experiments) splitter.add_reflections(self._scaled_reflections) splitter.set_by_wavelength(True) splitter.set_working_directory(self.get_working_directory()) auto_logfiler(splitter) splitter.run() nn = len(dnames_set) fmt = "%%0%dd" % (math.log10(nn) + 1) wl_sort = flex.sort_permutation(wavelengths) sorted_dnames_by_wl = [dnames_set[i] for i in wl_sort] for i, dname in enumerate(sorted_dnames_by_wl): # need to sort by wavelength from low to high nums = fmt % i exporter = ExportMtz() exporter.set_working_directory(self.get_working_directory()) expt_name = os.path.join( self.get_working_directory(), "split_%s.expt" % nums ) refl_name = os.path.join( self.get_working_directory(), "split_%s.refl" % nums ) FileHandler.record_temporary_file(expt_name) FileHandler.record_temporary_file(refl_name) exporter.crystal_name = self._scalr_xname exporter.project_name = self._scalr_pname exporter.set_experiments_filename(expt_name) exporter.set_reflections_filename(refl_name) exporter.set_intensity_choice("scale") exporter.set_partiality_threshold( PhilIndex.params.dials.scale.partiality_threshold ) # 0.4 default auto_logfiler(exporter) mtz_filename = os.path.join( self.get_working_directory(), scaled_unmerged_mtz_path.rstrip(".mtz") + "_%s.mtz" % dname, ) exporter.set_mtz_filename(mtz_filename) self._scalr_scaled_reflection_files["mtz_unmerged"][ dname ] = mtz_filename logger.debug("Exporting %s", mtz_filename) exporter.run() FileHandler.record_data_file(mtz_filename) # Export an mmCIF file using dials.export. exporter = ExportMMCIF() exporter.set_working_directory(self.get_working_directory()) exporter.set_experiments_filename(expt_name) exporter.set_reflections_filename(refl_name) exporter.set_compression("bz2") exporter.set_pdb_version( PhilIndex.params.xia2.settings.output.mmcif.pdb_version ) exporter.set_partiality_threshold( PhilIndex.params.dials.scale.partiality_threshold ) # 0.4 default mmcif_path = mtz_filename.rstrip(".mtz") + ".mmcif" exporter.set_filename(mmcif_path) auto_logfiler(exporter) logger.debug("Exporting %s", mmcif_path) exporter.run() FileHandler.record_temporary_file(mmcif_path) # now convert to .sca format convert_unmerged_mtz_to_sca(mtz_filename) merger = DialsMerge() # merge but don't truncate merger.set_working_directory(self.get_working_directory()) merger.set_experiments_filename(expt_name) merger.set_reflections_filename(refl_name) merger.set_project_name(self._scalr_pname) merger.set_crystal_names(self._scalr_xname) merger.set_dataset_names(dname) merger.set_partiality_threshold( PhilIndex.params.dials.scale.partiality_threshold ) auto_logfiler(merger) mtz_filename = os.path.join( self.get_working_directory(), "%s_%s_scaled_%s.mtz" % (self._scalr_pname, self._scalr_xname, dname), ) self._scalr_scaled_refl_files[dname] = mtz_filename self._scalr_scaled_reflection_files["mtz"][dname] = mtz_filename merger.set_mtz_filename(mtz_filename) logger.debug("Merging %s", mtz_filename) merger.run() FileHandler.record_data_file(mtz_filename) # now convert to .sca format convert_merged_mtz_to_sca(mtz_filename) ### For non-MAD case, run dials.export and dials.merge on scaled data. else: exporter = ExportMtz() exporter.crystal_name = self._scalr_xname exporter.project_name = self._scalr_pname exporter.set_working_directory(self.get_working_directory()) exporter.set_experiments_filename(self._scaled_experiments) exporter.set_reflections_filename(self._scaled_reflections) exporter.set_intensity_choice("scale") exporter.set_partiality_threshold( PhilIndex.params.dials.scale.partiality_threshold ) # 0.4 default auto_logfiler(exporter) exporter.set_mtz_filename(scaled_unmerged_mtz_path) logger.debug("Exporting %s", scaled_unmerged_mtz_path) exporter.run() self._scalr_scaled_reflection_files["mtz_unmerged"] = { dnames_set[0]: scaled_unmerged_mtz_path } FileHandler.record_data_file(scaled_unmerged_mtz_path) # now convert to .sca format convert_unmerged_mtz_to_sca(scaled_unmerged_mtz_path) merger = DialsMerge() merger.set_working_directory(self.get_working_directory()) merger.set_experiments_filename(self._scaled_experiments) merger.set_reflections_filename(self._scaled_reflections) merger.set_project_name(self._scalr_pname) merger.set_crystal_names(self._scalr_xname) merger.set_dataset_names(dnames_set[0]) merger.set_partiality_threshold( PhilIndex.params.dials.scale.partiality_threshold ) auto_logfiler(merger) mtz_filename = os.path.join( self.get_working_directory(), f"{self._scalr_pname}_{self._scalr_xname}_scaled.mtz", ) self._scalr_scaled_refl_files[dnames_set[0]] = mtz_filename self._scalr_scaled_reflection_files["mtz"][dnames_set[0]] = mtz_filename merger.set_mtz_filename(mtz_filename) logger.debug("Merging %s", mtz_filename) merger.run() FileHandler.record_data_file(mtz_filename) # now export to sca format convert_merged_mtz_to_sca(mtz_filename) # Export an mmCIF file using dials.export. exporter = ExportMMCIF() exporter.set_working_directory(self.get_working_directory()) exporter.set_experiments_filename(self._scaled_experiments) exporter.set_reflections_filename(self._scaled_reflections) exporter.set_compression("bz2") exporter.set_pdb_version( PhilIndex.params.xia2.settings.output.mmcif.pdb_version ) exporter.set_partiality_threshold( PhilIndex.params.dials.scale.partiality_threshold ) # 0.4 default mmcif_path = scaled_unmerged_mtz_path.rstrip(".mtz") + ".mmcif" exporter.set_filename(mmcif_path) auto_logfiler(exporter) logger.debug("Exporting %s", mmcif_path) exporter.run() FileHandler.record_temporary_file(mmcif_path) # Also export just integrated data. for si in self.sweep_infos: exporter = ExportMtz() exporter.crystal_name = self._scalr_xname exporter.project_name = self._scalr_pname exporter.set_reflections_filename(si.get_reflections()) exporter.set_experiments_filename(si.get_experiments()) exporter.set_intensity_choice("profile+sum") pname, xname, dname = si.get_project_info() sweep = si.get_integrater().get_integrater_sweep_name() tag = f"{pname} {xname} {dname} {sweep} INTEGRATE" mtz_filename = os.path.join( self.get_working_directory(), "%s_integrated.mtz" % sweep ) exporter.set_mtz_filename(mtz_filename) exporter.run() FileHandler.record_more_data_file(tag, mtz_filename) if PhilIndex.params.xia2.settings.merging_statistics.source == "cctbx": for key in self._scalr_scaled_refl_files: stats = self._compute_scaler_statistics( self._scalr_scaled_reflection_files["mtz_unmerged"][key], selected_band=(highest_suggested_resolution, None), wave=key, ) self._scalr_statistics[ (self._scalr_pname, self._scalr_xname, key) ] = stats # add CIF data expts = load.experiment_list(self._scaled_experiments) overall_absmin = 1.0 for expt in expts: if (expt.scaling_model.id_ == "physical") and ( "absorption" in expt.scaling_model.components ): plots = plot_absorption_plots(expt.scaling_model) correction = np.array(plots["absorption_surface"]["data"][0]["z"]) # correction is a 2D numpy array absmin = np.min(correction) / np.max(correction) if absmin > 0: # hope should always happen! overall_absmin = min(absmin, overall_absmin) dials_version = dials.util.version.dials_version() block = CIF.get_block("xia2") mmblock = mmCIF.get_block("xia2") mmblock["_exptl.entry_id"] = "xia2" mmblock["_exptl.method"] = "X-RAY DIFFRACTION" block["_exptl_absorpt_correction_T_min"] = mmblock[ "_exptl.absorpt_correction_T_min" ] = overall_absmin # = scaled relative to 1 block["_exptl_absorpt_correction_T_max"] = mmblock[ "_exptl.absorpt_correction_T_max" ] = 1.0 # block["_exptl_absorpt_correction_type"] = mmblock[ "_exptl.absorpt_correction_type" ] = "empirical"
from urbansim.utils import misc import os import sys import orca import yaml import datasources import variables from utils import parcel_id_to_geom_id, geom_id_to_parcel_id, add_buildings from urbansim.utils import networks import pandana.network as pdna from urbansim_defaults import models from urbansim_defaults import utils from urbansim.developer import sqftproforma, developer from urbansim.developer.developer import Developer as dev import subsidies import summaries import numpy as np import pandas as pd @orca.step('rsh_simulate') def rsh_simulate(buildings, aggregations, settings): utils.hedonic_simulate("rsh.yaml", buildings, aggregations, "residential_price", cast=True) if "rsh_simulate" in settings: low = float(settings["rsh_simulate"]["low"]) high = float(settings["rsh_simulate"]["high"]) buildings.update_col("residential_price", buildings.residential_price.clip(low, high)) print "Clipped rsh_simulate produces\n", \ buildings.residential_price.describe() @orca.step('nrh_simulate') def nrh_simulate(buildings, aggregations): return utils.hedonic_simulate("nrh.yaml", buildings, aggregations, "non_residential_price", cast=True) @orca.step('hlcm_simulate') def hlcm_simulate(households, buildings, aggregations, settings, low_income): fname = misc.config("hlcm.yaml") print "\nAffordable housing HLCM:\n" cfg = yaml.load(open(fname)) cfg["choosers_predict_filters"] = "income <= %d" % low_income open(misc.config("hlcm_tmp.yaml"), "w").write(yaml.dump(cfg)) # low income into affordable units utils.lcm_simulate( "hlcm_tmp.yaml", households, buildings, aggregations, "building_id", "residential_units", "vacant_affordable_units", settings.get( "enable_supply_correction", None), cast=True) os.remove(misc.config("hlcm_tmp.yaml")) print "\nMarket rate housing HLCM:\n" # then everyone into market rate units utils.lcm_simulate( "hlcm.yaml", households, buildings, aggregations, "building_id", "residential_units", "vacant_market_rate_units", settings.get( "enable_supply_correction", None), cast=True) @orca.step('households_transition') def households_transition(households, household_controls, year, settings): s = orca.get_table('households').base_income_quartile.value_counts() print "Distribution by income before:\n", (s/s.sum()) ret = utils.full_transition(households, household_controls, year, settings['households_transition'], "building_id") s = orca.get_table('households').base_income_quartile.value_counts() print "Distribution by income after:\n", (s/s.sum()) return ret @orca.step('households_relocation') def households_relocation(households, settings, years_per_iter): rate = settings['rates']['households_relocation'] rate = min(rate * years_per_iter, 1.0) return utils.simple_relocation(households, rate, "building_id") @orca.table(cache=True) def employment_relocation_rates(): df = pd.read_csv(os.path.join("data", "employment_relocation_rates.csv")) df = df.set_index("zone_id").stack().reset_index() df.columns = ["zone_id", "empsix", "rate"] return df # this is a list of parcel_ids which are to be treated as static @orca.injectable() def static_parcels(settings, parcels): # list of geom_ids to not relocate static_parcels = settings["static_parcels"] # geom_ids -> parcel_ids return geom_id_to_parcel_id( pd.DataFrame(index=static_parcels), parcels).index.values @orca.step() def jobs_relocation(jobs, employment_relocation_rates, years_per_iter, settings, static_parcels, buildings): # get buildings that are on those parcels static_buildings = buildings.index[ buildings.parcel_id.isin(static_parcels)] df = pd.merge(jobs.to_frame(["zone_id", "empsix"]), employment_relocation_rates.local, on=["zone_id", "empsix"], how="left") df.index = jobs.index # get the move rate for each job rate = (df.rate * years_per_iter).clip(0, 1.0) # get random floats and move jobs if they're less than the rate move = np.random.random(len(rate)) < rate # also don't move jobs that are on static parcels move &= ~jobs.building_id.isin(static_buildings) # get the index of the moving jobs index = jobs.index[move] # set jobs that are moving to a building_id of -1 (means unplaced) jobs.update_col_from_series("building_id", pd.Series(-1, index=index)) # this deviates from the step in urbansim_defaults only in how it deals with # demolished buildings - this version only demolishes when there is a row to # demolish in the csv file - this also allows building multiple buildings and # just adding capacity on an existing parcel, by adding one building at a time @orca.step("scheduled_development_events") def scheduled_development_events(buildings, development_projects, demolish_events, summary, year, parcels, settings, years_per_iter, parcels_geography, building_sqft_per_job, vmt_fee_categories): # first demolish demolish = demolish_events.to_frame().\ query("%d <= year_built < %d" % (year, year + years_per_iter)) print "Demolishing/building %d buildings" % len(demolish) l1 = len(buildings) buildings = utils._remove_developed_buildings( buildings.to_frame(buildings.local_columns), demolish, unplace_agents=["households", "jobs"]) orca.add_table("buildings", buildings) buildings = orca.get_table("buildings") print "Demolished %d buildings" % (l1 - len(buildings)) print " (this number is smaller when parcel has no existing buildings)" # then build dps = development_projects.to_frame().\ query("%d <= year_built < %d" % (year, year + years_per_iter)) if len(dps) == 0: return new_buildings = utils.scheduled_development_events( buildings, dps, remove_developed_buildings=False, unplace_agents=['households', 'jobs']) new_buildings["form"] = new_buildings.building_type_id.map( settings['building_type_map']).str.lower() new_buildings["job_spaces"] = new_buildings.building_sqft / \ new_buildings.building_type_id.fillna(-1).map(building_sqft_per_job) new_buildings["job_spaces"] = new_buildings.job_spaces.astype('int') new_buildings["geom_id"] = parcel_id_to_geom_id(new_buildings.parcel_id) new_buildings["SDEM"] = True new_buildings["subsidized"] = False new_buildings["zone_id"] = misc.reindex( parcels.zone_id, new_buildings.parcel_id) new_buildings["vmt_res_cat"] = misc.reindex( vmt_fee_categories.res_cat, new_buildings.zone_id) del new_buildings["zone_id"] new_buildings["pda"] = parcels_geography.pda_id.loc[ new_buildings.parcel_id].values summary.add_parcel_output(new_buildings) @orca.injectable("supply_and_demand_multiplier_func", autocall=False) def supply_and_demand_multiplier_func(demand, supply): s = demand / supply settings = orca.get_injectable('settings') print "Number of submarkets where demand exceeds supply:", len(s[s > 1.0]) # print "Raw relationship of supply and demand\n", s.describe() supply_correction = settings["enable_supply_correction"] clip_change_high = supply_correction["kwargs"]["clip_change_high"] t = s t -= 1.0 t = t / t.max() * (clip_change_high-1) t += 1.0 s.loc[s > 1.0] = t.loc[s > 1.0] # print "Shifters for current iteration\n", s.describe() return s, (s <= 1.0).all() # this if the function for mapping a specific building that we build to a # specific building type @orca.injectable("form_to_btype_func", autocall=False) def form_to_btype_func(building): settings = orca.get_injectable('settings') form = building.form dua = building.residential_units / (building.parcel_size / 43560.0) # precise mapping of form to building type for residential if form is None or form == "residential": if dua < 16: return 1 elif dua < 32: return 2 return 3 return settings["form_to_btype"][form][0] @orca.injectable("add_extra_columns_func", autocall=False) def add_extra_columns(df): for col in ["residential_price", "non_residential_price"]: df[col] = 0 if "deed_restricted_units" not in df.columns: df["deed_restricted_units"] = 0 else: print "Number of deed restricted units built = %d" %\ df.deed_restricted_units.sum() df["redfin_sale_year"] = 2012 if "residential_units" not in df: df["residential_units"] = 0 if "parcel_size" not in df: df["parcel_size"] = \ orca.get_table("parcels").parcel_size.loc[df.parcel_id] if "year" in orca.orca._INJECTABLES and "year_built" not in df: df["year_built"] = orca.get_injectable("year") if "form_to_btype_func" in orca.orca._INJECTABLES and \ "building_type_id" not in df: form_to_btype_func = orca.get_injectable("form_to_btype_func") df["building_type_id"] = df.apply(form_to_btype_func, axis=1) return df @orca.step('alt_feasibility') def alt_feasibility(parcels, settings, parcel_sales_price_sqft_func, parcel_is_allowed_func): kwargs = settings['feasibility'] config = sqftproforma.SqFtProFormaConfig() config.parking_rates["office"] = 1.5 config.parking_rates["retail"] = 1.5 utils.run_feasibility(parcels, parcel_sales_price_sqft_func, parcel_is_allowed_func, config=config, **kwargs) f = subsidies.policy_modifications_of_profit( orca.get_table('feasibility').to_frame(), parcels) orca.add_table("feasibility", f) @orca.step('residential_developer') def residential_developer(feasibility, households, buildings, parcels, year, settings, summary, form_to_btype_func, add_extra_columns_func, parcels_geography, limits_settings, final_year): kwargs = settings['residential_developer'] num_units = dev.compute_units_to_build( len(households), buildings["residential_units"].sum(), kwargs['target_vacancy']) targets = [] typ = "Residential" # now apply limits - limits are assumed to be yearly, apply to an # entire jurisdiction and be in terms of residential_units or job_spaces if typ in limits_settings: juris_name = parcels_geography.juris_name.\ reindex(parcels.index).fillna('Other') juris_list = limits_settings[typ].keys() for juris, limit in limits_settings[typ].items(): # the actual target is the limit times the number of years run # so far in the simulation (plus this year), minus the amount # built in previous years - in other words, you get rollover # and development is lumpy current_total = parcels.total_residential_units[ (juris_name == juris) & (parcels.newest_building >= 2010)]\ .sum() target = (year - 2010 + 1) * limit - current_total # make sure we don't overshoot the total development of the limit # for the horizon year - for instance, in Half Moon Bay we have # a very low limit and a single development in a far out year can # easily build over the limit for the total simulation max_target = (final_year - 2010 + 1) * limit - current_total if target <= 0: continue targets.append((juris_name == juris, target, max_target, juris)) num_units -= target # other cities not in the targets get the remaining target targets.append((~juris_name.isin(juris_list), num_units, None, "none")) else: # otherwise use all parcels with total number of units targets.append((parcels.index == parcels.index, num_units, None, "none")) for parcel_mask, target, final_target, juris in targets: print "Running developer for %s with target of %d" % \ (str(juris), target) # this was a fairly heinous bug - have to get the building wrapper # again because the buildings df gets modified by the run_developer # method below buildings = orca.get_table('buildings') new_buildings = utils.run_developer( "residential", households, buildings, "residential_units", parcels.parcel_size[parcel_mask], parcels.ave_sqft_per_unit[parcel_mask], parcels.total_residential_units[parcel_mask], feasibility, year=year, form_to_btype_callback=form_to_btype_func, add_more_columns_callback=add_extra_columns_func, num_units_to_build=target, **kwargs) buildings = orca.get_table('buildings') if new_buildings is not None: new_buildings["subsidized"] = False if final_target is not None and new_buildings is not None: # make sure we don't overbuild the target for the whole simulation overshoot = new_buildings.net_units.sum() - max_target if overshoot > 0: index = new_buildings.tail(1).index[0] index = int(index) # make sure we don't get into a negative unit situation overshoot = min(overshoot, buildings.local.loc[index, "residential_units"]) buildings.local.loc[index, "residential_units"] -= overshoot summary.add_parcel_output(new_buildings) @orca.step() def retail_developer(jobs, buildings, parcels, nodes, feasibility, settings, summary, add_extra_columns_func, net): dev_settings = settings['non_residential_developer'] all_units = dev.compute_units_to_build( len(jobs), buildings.job_spaces.sum(), dev_settings['kwargs']['target_vacancy']) target = all_units * float(dev_settings['type_splits']["Retail"]) # target here is in sqft target *= settings["building_sqft_per_job"][10] feasibility = feasibility.to_frame().loc[:, "retail"] feasibility = feasibility.dropna(subset=["max_profit"]) feasibility["non_residential_sqft"] = \ feasibility.non_residential_sqft.astype("int") feasibility["retail_ratio"] = parcels.retail_ratio feasibility = feasibility.reset_index() # create features f1 = feasibility.retail_ratio / feasibility.retail_ratio.max() f2 = feasibility.max_profit / feasibility.max_profit.max() # combine features in probability function - it's like combining expense # of building the building with the market in the neighborhood p = f1 * 1.5 + f2 p = p.clip(lower=1.0/len(p)/10) print "Attempting to build {:,} retail sqft".format(target) # order by weighted random sample feasibility = feasibility.sample(frac=1.0, weights=p) bldgs = buildings.to_frame(buildings.local_columns + ["general_type"]) devs = [] for dev_id, d in feasibility.iterrows(): if target
called each frame, as # rerendering is done only when needed. def update_info_boards(self, players): for i in xrange(10): # for each player number update_needed = False if self.player_info_board_images[i] == None: self.player_info_board_images[i] = self.gui_images["info board"].copy() update_needed = True player = None for one_player in players: if one_player.get_number() == i: player = one_player break if player == None: continue if player.info_board_needs_update(): update_needed = True if not update_needed or player == None: continue # rerendering needed here debug_log("updating info board " + str(i)) board_image = self.player_info_board_images[i] board_image.blit(self.gui_images["info board"],(0,0)) board_image.blit(self.font_small.render(str(player.get_kills()),True,(0,0,0)),(45,0)) board_image.blit(self.font_small.render(str(player.get_wins()),True,(0,0,0)),(65,0)) board_image.blit(self.font_small.render(Game.COLOR_NAMES[i],True,Renderer.darken_color(Renderer.COLOR_RGB_VALUES[i],100)),(4,2)) if player.is_dead(): board_image.blit(self.gui_images["out"],(15,34)) continue # render items x = 5 dy = 12 self.__render_info_board_item_row(x,20,5,GameMap.ITEM_BOMB,player,board_image) self.__render_info_board_item_row(x,20 + dy,5,GameMap.ITEM_FLAME,player,board_image) self.__render_info_board_item_row(x,20 + 2 * dy,9,GameMap.ITEM_SPEEDUP,player,board_image) y = 20 + 3 * dy items_to_check = [ GameMap.ITEM_SHOE, GameMap.ITEM_BOXING_GLOVE, GameMap.ITEM_THROWING_GLOVE, GameMap.ITEM_SPRING, GameMap.ITEM_MULTIBOMB, GameMap.ITEM_DETONATOR, GameMap.ITEM_DISEASE] for item in items_to_check: if player.get_item_count(item) or item == GameMap.ITEM_DISEASE and player.get_disease() != Player.DISEASE_NONE: board_image.blit(self.icon_images[item],(x,y)) x += self.icon_images[item].get_size()[0] + 1 #---------------------------------------------------------------------------- def process_animation_events(self, animation_event_list): for animation_event in animation_event_list: self.animations[animation_event[0]].play(animation_event[1]) #---------------------------------------------------------------------------- ## Renders text with outline, line breaks, formatting, etc. def render_text(self, font, text_to_render, color, outline_color = (0,0,0), center = False): text_lines = text_to_render.split("\n") rendered_lines = [] width = height = 0 first_line = True for text_line in text_lines: line = text_line.lstrip().rstrip() if len(line) == 0: continue line_without_format = re.sub(r"\^.......","",line) # remove all the markup in format ^#dddddd new_rendered_line = pygame.Surface(font.size(line_without_format),flags=pygame.SRCALPHA) x = 0 first = True starts_with_format = line[0] == "^" for subline in line.split("^"): if len(subline) == 0: continue has_format = starts_with_format if first else True first = False text_color = color if has_format: text_color = pygame.Color(subline[:7]) subline = subline[7:] new_rendered_subline = font.render(subline,True,outline_color) # create text with outline new_rendered_subline.blit(new_rendered_subline,(0,2)) new_rendered_subline.blit(new_rendered_subline,(1,0)) new_rendered_subline.blit(new_rendered_subline,(-1,0)) new_rendered_subline.blit(font.render(subline,True,text_color),(0,1)) new_rendered_line.blit(new_rendered_subline,(x,0)) x += new_rendered_subline.get_size()[0] rendered_lines.append(new_rendered_line) if not first_line: height += Renderer.MENU_LINE_SPACING first_line = False height += rendered_lines[-1].get_size()[1] width = max(width,rendered_lines[-1].get_size()[0]) result = pygame.Surface((width,height),flags=pygame.SRCALPHA) y_step = font.get_height() + Renderer.MENU_LINE_SPACING for i in xrange(len(rendered_lines)): result.blit(rendered_lines[i],(0 if not center else (width - rendered_lines[i].get_size()[0]) / 2,i * y_step)) return result #---------------------------------------------------------------------------- ## Updates images in self.menu_item_images (only if needed). def update_menu_item_images(self, menu): if self.menu_item_images == None: self.menu_item_images = {} # format: (row, column) : (item text, image) items = menu.get_items() item_coordinates = [] for j in xrange(len(items)): for i in xrange(len(items[j])): item_coordinates.append((j,i)) if len(menu.get_text()) != 0: item_coordinates.append(0) # this is the menu description text for menu_coordinates in item_coordinates: update_needed = False if not (menu_coordinates in self.menu_item_images): update_needed = True if menu_coordinates == 0: item_text = menu.get_text() center_text = True else: item_text = items[menu_coordinates[0]][menu_coordinates[1]] center_text = False if not update_needed and item_text != self.menu_item_images[menu_coordinates][0]: update_needed = True if update_needed: debug_log("updating menu item " + str(menu_coordinates)) new_image = self.render_text(self.font_normal,item_text,Renderer.MENU_FONT_COLOR,center = center_text) # text itself new_image.blit(new_image,(0,1)) self.menu_item_images[menu_coordinates] = (item_text,new_image) #---------------------------------------------------------------------------- def render_menu(self, menu_to_render, game): result = pygame.Surface(self.screen_resolution) if self.menu_background_image == None: self.menu_background_image = pygame.image.load(os.path.join(Game.RESOURCE_PATH,"gui_menu_background.png")) background_position = (self.screen_center[0] - self.menu_background_image.get_size()[0] / 2,self.screen_center[1] - self.menu_background_image.get_size()[1] / 2) profiler.measure_start("menu rend. backg.") result.blit(self.menu_background_image,background_position) profiler.measure_stop("menu rend. backg.") profiler.measure_start("menu rend. party") if game.cheat_is_active(Game.CHEAT_PARTY): for circle_info in self.party_circles: # draw circles circle_coords = (self.screen_center[0] + circle_info[0][0],self.screen_center[1] + circle_info[0][1]) radius_coefficient = (math.sin(pygame.time.get_ticks() * circle_info[4] / 100.0 + circle_info[3]) + 1) / 2.0 circle_radius = int(circle_info[1] * radius_coefficient) pygame.draw.circle(result,circle_info[2],circle_coords,circle_radius) for player_info in self.party_players: # draw players player_coords = (self.screen_center[0] + player_info[0][0],self.screen_center[1] + player_info[0][1]) player_direction = (int((pygame.time.get_ticks() + player_info[2]) / 150)) % 4 if not player_info[3]: player_direction = 3 - player_direction direction_string = ("up","right","down","left")[player_direction] if int(pygame.time.get_ticks() / 500) % 2 == 0: direction_string = "box " + direction_string result.blit(self.player_images[player_info[1]][direction_string],player_coords) for bomb_info in self.party_bombs: result.blit(self.bomb_images[0],(bomb_info[0],bomb_info[1])) bomb_info[0] += bomb_info[2] bomb_info[1] += bomb_info[3] if bomb_info[0] < 0: # border collision, change direction bomb_info[2] = 1 elif bomb_info[0] > self.screen_resolution[0] - 50: bomb_info[2] = -1 if bomb_info[1] < 0: # border collision, change direction bomb_info[3] = 1 elif bomb_info[1] > self.screen_resolution[1] - 50: bomb_info[3] = -1 profiler.measure_stop("menu rend. party") version_position = (3,1) result.blit(self.gui_images["version"],version_position) profiler.measure_start("menu rend. item update") self.update_menu_item_images(menu_to_render) # render menu description text y = self.screen_center[1] + Renderer.MENU_DESCRIPTION_Y_OFFSET if len(menu_to_render.get_text()) != 0: result.blit(self.menu_item_images[0][1],(self.screen_center[0] - self.menu_item_images[0][1].get_size()[0] / 2,y)) # menu description text image is at index 0 y += self.menu_item_images[0][1].get_size()[1] + Renderer.MENU_LINE_SPACING * 2 menu_items = menu_to_render.get_items() columns = len(menu_items) # how many columns there are column_x_space = 150 if columns % 2 == 0: xs = [self.screen_center[0] + i * column_x_space - ((columns - 1) * column_x_space / 2) for i in xrange(columns)] # even number of columns else: xs = [self.screen_center[0] + (i - columns / 2) * column_x_space for i in xrange(columns)] selected_coordinates = menu_to_render.get_selected_item() items_y = y profiler.measure_stop("menu rend. item update") # render scrollbar if needed rows = 0 for column in menu_items: rows = max(rows,len(column)) if rows > Menu.MENU_MAX_ITEMS_VISIBLE: x = xs[0] + Renderer.SCROLLBAR_RELATIVE_POSITION[0] result.blit(self.gui_images["arrow up"],(x,items_y)) result.blit(self.gui_images["arrow down"],(x,items_y + Renderer.SCROLLBAR_HEIGHT)) scrollbar_position = int(items_y + selected_coordinates[0] / float(rows) * Renderer.SCROLLBAR_HEIGHT) result.blit(self.gui_images["seeker"],(x,scrollbar_position)) mouse_coordinates = pygame.mouse.get_pos() # render items profiler.measure_start("menu rend. items") for j in xrange(len(menu_items)): y = items_y for i in xrange(min(Menu.MENU_MAX_ITEMS_VISIBLE,len(menu_items[j]) - menu_to_render.get_scroll_position())): item_image = self.menu_item_images[(j,i + menu_to_render.get_scroll_position())][1] x = xs[j] - item_image.get_size()[0] / 2 if (i + menu_to_render.get_scroll_position(),j) == selected_coordinates: # item is selected scale = (8 + math.sin(pygame.time.get_ticks() / 40.0)) / 7.0 # make the pulsating effect item_image = pygame.transform.scale(item_image,(int(scale * item_image.get_size()[0]),int(scale * item_image.get_size()[1]))) x = xs[j] - item_image.get_size()[0] / 2 pygame.draw.rect(result,(255,0,0),pygame.Rect(x - 4,y - 2,item_image.get_size()[0] + 8,item_image.get_size()[1] + 4)) result.blit(item_image,(x,y)) # did mouse go over the item? if (not game.get_settings().control_by_mouse) and (self.previous_mouse_coordinates != mouse_coordinates) and (x <= mouse_coordinates[0] <= x + item_image.get_size()[0]) and (y <= mouse_coordinates[1] <= y + item_image.get_size()[1]): item_coordinates = (i + menu_to_render.get_scroll_position(),j) menu_to_render.mouse_went_over_item(item_coordinates) y += Renderer.FONT_NORMAL_SIZE + Renderer.MENU_LINE_SPACING profiler.measure_stop("menu rend. items") mouse_events = game.get_player_key_maps().get_mouse_button_events() for i in xrange(len(mouse_events)): if mouse_events[i]: menu_to_render.mouse_button_pressed(i) self.previous_mouse_coordinates = mouse_coordinates # render confirm dialog if prompting if menu_to_render.get_state() == Menu.MENU_STATE_CONFIRM_PROMPT: width = 120 height = 80 x = self.screen_center[0] - width / 2 y = self.screen_center[1] - height / 2 pygame.draw.rect(result,(0,0,0),pygame.Rect(x,y,width,height)) pygame.draw.rect(result,(255,255,255),pygame.Rect(x,y,width,height),1) text_image = pygame.transform.rotate(self.gui_images["prompt"],math.sin(pygame.time.get_ticks() / 100) * 5) x = self.screen_center[0] - text_image.get_size()[0] / 2 y = self.screen_center[1] - text_image.get_size()[1] / 2 result.blit(text_image,(x,y)) # map preview profiler.measure_start("menu rend. preview") if isinstance(menu_to_render,MapSelectMenu): # also not too nice if menu_to_render.show_map_preview(): self.update_map_preview_image(menu_to_render.get_selected_map_name()) result.blit(self.preview_map_image,(self.screen_center[0] + 180,items_y)) profiler.measure_stop("menu rend. preview") # draw cursor only if control by mouse is not allowed - wouldn't make sense if not game.get_settings().control_by_mouse: result.blit(self.gui_images["cursor"],pygame.mouse.get_pos()) return result #---------------------------------------------------------------------------- def update_map_preview_image(self, map_filename): if map_filename == "": self.preview_map_name = "" self.preview_map_image = None return if self.preview_map_name != map_filename: debug_log("updating map preview of " + map_filename) self.preview_map_name = map_filename tile_size = 7 tile_half_size = tile_size / 2 map_info_border_size = 5 self.preview_map_image = pygame.Surface((tile_size * GameMap.MAP_WIDTH,tile_size * GameMap.MAP_HEIGHT + map_info_border_size + Renderer.MAP_TILE_HEIGHT)) with open(os.path.join(Game.MAP_PATH,map_filename)) as map_file: map_data = map_file.read() temp_map = GameMap(map_data,PlaySetup(),0,0) for y in xrange(GameMap.MAP_HEIGHT): for x in xrange(GameMap.MAP_WIDTH): tile = temp_map.get_tile_at((x,y)) tile_kind = tile.kind pos_x = x * tile_size pos_y = y * tile_size tile_special_object = tile.special_object if tile_special_object == None: if tile_kind == MapTile.TILE_BLOCK: tile_color = (120,120,120) elif tile_kind == MapTile.TILE_WALL: tile_color = (60,60,60) else: # floor tile_color = (230,230,230) else: if tile_special_object == MapTile.SPECIAL_OBJECT_LAVA: tile_color = (200,0,0) elif tile_special_object == MapTile.SPECIAL_OBJECT_TELEPORT_A or tile_special_object == MapTile.SPECIAL_OBJECT_TELEPORT_B: tile_color = (0,0,200) elif tile_special_object == MapTile.SPECIAL_OBJECT_TRAMPOLINE: tile_color = (0,200,0) elif tile_kind == MapTile.TILE_FLOOR: # arrow tile_color = (200,200,0) else: tile_color = (230,230,230) pygame.draw.rect(self.preview_map_image,tile_color,pygame.Rect(pos_x,pos_y,tile_size,tile_size)) starting_positions = temp_map.get_starting_positions() for player_index in xrange(len(starting_positions)): draw_position = (int(starting_positions[player_index][0]) *
numpy.ndarray" "for slices of a matrix with diagonal-origin or" "a tuple/list of a couple of numpy.ndarray-s" "for a slice of matrix with an arbitrary origin." ) # kernels must be a dict with kernel-names as keys # and kernel ndarrays as values. if not isinstance(kernels, dict): raise ValueError( "'kernels' must be a dictionary" "with name-keys and ndarrays-values." ) # balanced observed, from raw-observed # by element-wise multiply: O_bal = np.multiply(O_raw, np.outer(v_bal_i, v_bal_j)) # O_bal is separate from O_raw memory-wise. # fill lower triangle of O_bal and E_bal with NaNs # in order to prevent peak calling from the lower triangle # and also to provide fair locally adjusted expected # estimation for pixels very close to diagonal, whose # "donuts"(kernels) would be crossing the main diagonal. # The trickiest thing here would be dealing with the origin: io,jo. O_bal[np.tril_indices_from(O_bal, k=(io - jo) - 1)] = np.nan E_bal[np.tril_indices_from(E_bal, k=(io - jo) - 1)] = np.nan # raw E_bal: element-wise division of E_bal[i,j] and # v_bal[i]*v_bal[j]: E_raw = np.divide(E_bal, np.outer(v_bal_i, v_bal_j)) # let's calculate a matrix of common NaNs # shared between observed and expected: # check if it's redundant ? (is NaNs from O_bal sufficient? ) N_bal = np.logical_or(np.isnan(O_bal), np.isnan(E_bal)) # fill in common nan-s with zeroes, preventing # NaNs during convolution part of '_convolve_and_count_nans': O_bal[N_bal] = 0.0 E_bal[N_bal] = 0.0 # think about usinf copyto and where functions later: # https://stackoverflow.com/questions/6431973/how-to-copy-data-from-a-numpy-array-to-another # # # we are going to accumulate all the results # into a DataFrame, keeping NaNs, and other # unfiltered results (even the lower triangle for now): i, j = np.indices(O_raw.shape) # pack it into DataFrame to accumulate results: peaks_df = pd.DataFrame({"bin1_id": i.flatten() + io, "bin2_id": j.flatten() + jo}) with np.errstate(divide="ignore", invalid="ignore"): for kernel_name, kernel in kernels.items(): ############################### # kernel-specific calculations: ############################### # kernel paramters such as width etc # are taken into account implicitly ... ######################################## ##################### # unroll _convolve_and_count_nans function back # for us to test the dynamic donut criteria ... ##################### # Ek_raw, NN = _convolve_and_count_nans(O_bal, # E_bal, # E_raw, # N_bal, # kernel) # Dense versions of a bunch of matrices needed for convolution and # calculation of number of NaNs in a vicinity of each pixel. And a kernel to # be provided of course. # a matrix filled with the kernel-weighted sums # based on a balanced observed matrix: KO = convolve(O_bal, kernel, mode="constant", cval=0.0, origin=0) # a matrix filled with the kernel-weighted sums # based on a balanced expected matrix: KE = convolve(E_bal, kernel, mode="constant", cval=0.0, origin=0) # get number of NaNs in a vicinity of every # pixel (kernel's nonzero footprint) # based on the NaN-matrix N_bal. # N_bal is shared NaNs between O_bal E_bal, NN = convolve( N_bal.astype(np.int64), # we have to use kernel's # nonzero footprint: (kernel != 0).astype(np.int64), mode="constant", # there are only NaNs # beyond the boundary: cval=1, origin=0, ) ###################################### # using cval=0 for actual data and # cval=1 for NaNs matrix reduces # "boundary issue" to the "number of # NaNs"-issue # #################################### # now finally, E_raw*(KO/KE), as the # locally-adjusted expected with raw counts as values: Ek_raw = np.multiply(E_raw, np.divide(KO, KE)) # this is the place where we would need to extract # some results of convolution and multuplt it by the # appropriate factor "cooler._load_attrs(‘bins/weight’)[‘scale’]" ... if balance_factor and (kernel_name == "lowleft"): peaks_df[f"factor_balance.{kernel_name}.KerObs"] = ( balance_factor * KO.flatten() ) # KO*balance_factor: to be compared with 16 ... if verbose: logging.info(f"Convolution with kernel {kernel_name} is complete.") # # accumulation into single DataFrame: # store locally adjusted expected for each kernel # and number of NaNs in the footprint of each kernel peaks_df[f"la_exp.{kernel_name}.value"] = Ek_raw.flatten() peaks_df[f"la_exp.{kernel_name}.nnans"] = NN.flatten() # do all the filter/logic/masking etc on the complete DataFrame ... ##################################### # downstream stuff is supposed to be # aggregated over all kernels ... ##################################### peaks_df["exp.raw"] = E_raw.flatten() # obs.raw -> count peaks_df["count"] = O_raw.flatten() # TO BE REFACTORED/deprecated ... # compatibility with legacy API is completely BROKEN # post-processing allows us to restore it, see tests, # but we pay with the processing speed for it. mask_ndx = pd.Series(0, index=peaks_df.index, dtype=np.bool) for kernel_name, kernel in kernels.items(): # accummulating with a vector full of 'False': mask_ndx_kernel = ~np.isfinite(peaks_df["la_exp." + kernel_name + ".value"]) mask_ndx = np.logical_or(mask_ndx_kernel, mask_ndx) # returning only pixels from upper triangle of a matrix # is likely here to stay: upper_band = peaks_df["bin1_id"] < peaks_df["bin2_id"] # Consider filling lower triangle of the OBSERVED matrix tile # with NaNs, instead of this - we'd need this for a fair # consideration of the pixels that are super close to the # diagonal and in a case, when the corresponding donut would # cross a diagonal line. # selecting pixels in relation to diagonal - too far, too # close etc, is now shifted to the outside of this function # a way to simplify code. # return good semi-sparsified DF: return peaks_df[~mask_ndx & upper_band].reset_index(drop=True) ################################## # step-specific dot-calling functions ################################## def score_tile( tile_cij, clr, cis_exp, exp_v_name, clr_weight_name, kernels, nans_tolerated, band_to_cover, balance_factor, verbose, ): """ The main working function that given a tile of a heatmap, applies kernels to perform convolution to calculate locally-adjusted expected and then calculates a p-value for every meaningfull pixel against these l.a. expected values. Parameters ---------- tile_cij : tuple Tuple of 3: chromosome name, tile span row-wise, tile span column-wise: (chrom, tile_i, tile_j), where tile_i = (start_i, end_i), and tile_j = (start_j, end_j). clr : cooler Cooler object to use to extract Hi-C heatmap data. cis_exp : pandas.DataFrame DataFrame with cis-expected, indexed with 'name' and 'diag'. exp_v_name : str Name of a value column in expected DataFrame clr_weight_name : str Name of a value column with balancing weights in a cooler.bins() DataFrame. Typically 'weight'. kernels : dict A dictionary with keys being kernels names and values being ndarrays representing those kernels. nans_tolerated : int Number of NaNs tolerated in a footprint of every kernel. band_to_cover : int Results would be stored only for pixels connecting loci closer than 'band_to_cover'. balance_factor : float Balancing factor to turn sum of balanced matrix back approximately to the number of pairs (used for dynamic-donut criteria mostly). use None value to disable dynamic-donut criteria calculation. verbose : bool Enable verbose output. Returns ------- res_df : pandas.DataFrame results: annotated pixels with calculated locally adjusted expected for every kernels, observed, precalculated pvalues, number of NaNs in footprint of every kernels, all of that in a form of an annotated pixels DataFrame for eligible pixels of a given tile. """ # unpack tile's coordinates region_name, tilei, tilej = tile_cij origin = (tilei[0], tilej[0]) # we have to do it for every tile, because # region_name is not known apriori (maybe move outside) # use .loc[region, region] for symmetric cis regions to conform with expected v1.0 lazy_exp = LazyToeplitz(cis_exp.loc[region_name, region_name][exp_v_name].values) # RAW observed matrix slice: observed = clr.matrix(balance=False)[slice(*tilei), slice(*tilej)] # expected as a rectangular tile : expected = lazy_exp[slice(*tilei), slice(*tilej)] # slice of balance_weight for row-span and column-span : bal_weight_i = clr.bins()[slice(*tilei)][clr_weight_name].values bal_weight_j = clr.bins()[slice(*tilej)][clr_weight_name].values # do the convolutions result = get_adjusted_expected_tile_some_nans( origin=origin, observed=observed, expected=expected, bal_weights=(bal_weight_i, bal_weight_j), kernels=kernels, balance_factor=balance_factor, verbose=verbose, ) # Post-processing filters # (1) exclude pixels that connect loci further than 'band_to_cover' apart: is_inside_band = result["bin1_id"] > (result["bin2_id"] - band_to_cover) # (2) identify pixels that pass number of NaNs compliance test for ALL kernels: does_comply_nans = np.all( result[[f"la_exp.{k}.nnans" for k in kernels]] < nans_tolerated, axis=1 ) # so, selecting inside band and nNaNs compliant results: # ( drop dropping index maybe ??? ) ... res_df = result[is_inside_band & does_comply_nans].reset_index(drop=True) # ####################################################################### # # the following should be rewritten such that we return # # opnly bare minimum number of columns per chunk - i.e. annotating is too heavy # # to be performed here ... # # # # I'll do it
# Copyright 2020 QuantRocket - All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import six import json import requests from quantrocket.houston import houston from quantrocket.cli.utils.output import json_to_cli from quantrocket.cli.utils.stream import to_bytes from quantrocket.cli.utils.files import write_response_to_filepath_or_buffer from quantrocket.exceptions import ParameterError, NoMasterData def list_ibkr_exchanges(regions=None, sec_types=None): """ List exchanges by security type and country as found on the IBKR website. Parameters ---------- regions : list of str, optional limit to these regions. Possible choices: north_america, europe, asia, global sec_types : list of str, optional limit to these securitiy types. Possible choices: STK, ETF, FUT, CASH, IND Returns ------- dict """ params = {} if sec_types: params["sec_types"] = sec_types if regions: params["regions"] = regions response = houston.get("/master/exchanges/ibkr", params=params, timeout=180) houston.raise_for_status_with_json(response) return response.json() def _cli_list_ibkr_exchanges(*args, **kwargs): return json_to_cli(list_ibkr_exchanges, *args, **kwargs) def collect_alpaca_listings(): """ Collect securities listings from Alpaca and store in securities master database. Returns ------- dict status message """ response = houston.post("/master/securities/alpaca") houston.raise_for_status_with_json(response) return response.json() def _cli_collect_alpaca_listings(*args, **kwargs): return json_to_cli(collect_alpaca_listings, *args, **kwargs) def collect_edi_listings(exchanges=None): """ Collect securities listings from EDI and store in securities master database. Parameters ---------- exchanges : list or str, required collect listings for these exchanges (identified by MICs) Returns ------- dict status message Examples -------- Collect sample listings: >>> collect_edi_listings(exchanges="FREE") Collect listings for all permitted exchanges: >>> collect_edi_listings() Collect all Chinese stock listings: >>> collect_edi_listings(exchanges=["XSHG", "XSHE"]) """ params = {} if exchanges: params["exchanges"] = exchanges response = houston.post("/master/securities/edi", params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_collect_edi_listings(*args, **kwargs): return json_to_cli(collect_edi_listings, *args, **kwargs) def collect_figi_listings(): """ Collect securities listings from Bloomberg OpenFIGI and store in securities master database. OpenFIGI provides several useful security attributes including market sector, a detailed security type, and share class-level FIGI identifier. The collected data fields show up in the master file with the prefix "figi_*". This function does not directly query the OpenFIGI API but rather downloads a dump of all FIGIs which QuantRocket has previously mapped to securities from other vendors. Returns ------- dict status message Examples -------- Collect all available FIGI listings: >>> collect_figi_listings() """ response = houston.post("/master/securities/figi") houston.raise_for_status_with_json(response) return response.json() def _cli_collect_figi_listings(*args, **kwargs): return json_to_cli(collect_figi_listings, *args, **kwargs) def collect_ibkr_listings(exchanges=None, sec_types=None, currencies=None, symbols=None, universes=None, sids=None): """ Collect securities listings from Interactive Brokers and store in securities master database. Specify an exchange (optionally filtering by security type, currency, and/or symbol) to collect listings from the IBKR website and collect associated contract details from the IBKR API. Or, specify universes or sids to collect details from the IBKR API, bypassing the website. Parameters ---------- exchanges : list or str one or more IBKR exchange codes to collect listings for (required unless providing universes or sids). For sample data use exchange code 'FREE' sec_types : list of str, optional limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND currencies : list of str, optional limit to these currencies symbols : list of str, optional limit to these symbols universes : list of str, optional limit to these universes sids : list of str, optional limit to these sids Returns ------- dict status message Examples -------- Collect free sample listings: >>> collect_ibkr_listings(exchanges="FREE") Collect all Toronto Stock Exchange stock listings: >>> collect_ibkr_listings(exchanges="TSE", sec_types="STK") Collect all NYSE ARCA ETF listings: >>> collect_ibkr_listings(exchanges="ARCA", sec_types="ETF") Collect specific symbols from Nasdaq: >>> collect_ibkr_listings(exchanges="NASDAQ", symbols=["AAPL", "GOOG", "NFLX"]) Re-collect contract details for an existing universe called "japan-fin": >>> collect_ibkr_listings(universes="japan-fin") """ params = {} if exchanges: params["exchanges"] = exchanges if sec_types: params["sec_types"] = sec_types if currencies: params["currencies"] = currencies if symbols: params["symbols"] = symbols if universes: params["universes"] = universes if sids: params["sids"] = sids response = houston.post("/master/securities/ibkr", params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_collect_ibkr_listings(*args, **kwargs): return json_to_cli(collect_ibkr_listings, *args, **kwargs) def collect_sharadar_listings(countries="US"): """ Collect securities listings from Sharadar and store in securities master database. Parameters ---------- countries : list of str, required countries to collect listings for. Possible choices: US, FREE Returns ------- dict status message """ params = {} if countries: params["countries"] = countries response = houston.post("/master/securities/sharadar", params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_collect_sharadar_listings(*args, **kwargs): return json_to_cli(collect_sharadar_listings, *args, **kwargs) def collect_usstock_listings(): """ Collect US stock listings from QuantRocket and store in securities master database. Returns ------- dict status message """ response = houston.post("/master/securities/usstock") houston.raise_for_status_with_json(response) return response.json() def _cli_collect_usstock_listings(*args, **kwargs): return json_to_cli(collect_usstock_listings, *args, **kwargs) def collect_ibkr_option_chains(universes=None, sids=None, infilepath_or_buffer=None): """ Collect IBKR option chains for underlying securities. Note: option chains often consist of hundreds, sometimes thousands of options per underlying security. Be aware that requesting option chains for large universes of underlying securities, such as all stocks on the NYSE, can take numerous hours to complete. Parameters ---------- universes : list of str, optional collect options for these universes of underlying securities sids : list of str, optional collect options for these underlying sids infilepath_or_buffer : str or file-like object, optional collect options for the sids in this file (specify '-' to read file from stdin) Returns ------- dict status message """ params = {} if universes: params["universes"] = universes if sids: params["sids"] = sids if infilepath_or_buffer == "-": response = houston.post("/master/options/ibkr", params=params, data=to_bytes(sys.stdin)) elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"): if infilepath_or_buffer.seekable(): infilepath_or_buffer.seek(0) response = houston.post("/master/options/ibkr", params=params, data=to_bytes(infilepath_or_buffer)) elif infilepath_or_buffer: with open(infilepath_or_buffer, "rb") as f: response = houston.post("/master/options/ibkr", params=params, data=f) else: response = houston.post("/master/options/ibkr", params=params) houston.raise_for_status_with_json(response) return response.json() def _cli_collect_ibkr_option_chains(*args, **kwargs): return json_to_cli(collect_ibkr_option_chains, *args, **kwargs) def diff_ibkr_securities(universes=None, sids=None, infilepath_or_buffer=None, fields=None, delist_missing=False, delist_exchanges=None, wait=False): """ Flag security details that have changed in IBKR's system since the time they were last collected into the securities master database. Diff can be run synchronously or asynchronously (asynchronous is the default and is recommended if diffing more than a handful of securities). Parameters ---------- universes : list of str, optional limit to these universes sids : list of str, optional limit to these sids infilepath_or_buffer : str or file-like object, optional limit to the sids in this file (specify '-' to read file from stdin) fields : list of str, optional only diff these fields (field name should start with "ibkr") delist_missing : bool auto-delist securities that are no longer available from IBKR delist_exchanges : list of str, optional auto-delist securities that are associated with these exchanges wait : bool run the diff synchronously and return the diff (otherwise run asynchronously and log the results, if any, to flightlog) Returns ------- dict dict of sids and fields that have changed (if wait), or status message """ params = {} if universes: params["universes"] = universes if sids: params["sids"] = sids if fields: params["fields"] = fields if delist_missing: params["delist_missing"] = delist_missing if delist_exchanges: params["delist_exchanges"] = delist_exchanges if wait: params["wait"] = wait # if run synchronously use a high timeout timeout = 60*60*10 if wait else None if infilepath_or_buffer == "-": response = houston.get("/master/diff/ibkr", params=params, data=to_bytes(sys.stdin), timeout=timeout) elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"): if infilepath_or_buffer.seekable(): infilepath_or_buffer.seek(0) response = houston.get("/master/diff/ibkr", params=params, data=to_bytes(infilepath_or_buffer), timeout=timeout) elif infilepath_or_buffer: with open(infilepath_or_buffer, "rb") as f: response = houston.get("/master/diff/ibkr", params=params, data=f, timeout=timeout) else: response = houston.get("/master/diff/ibkr", params=params, timeout=timeout) houston.raise_for_status_with_json(response) return response.json() def _cli_diff_ibkr_securities(*args, **kwargs): return json_to_cli(diff_ibkr_securities, *args, **kwargs) def download_master_file(filepath_or_buffer=None, output="csv", exchanges=None, sec_types=None, currencies=None, universes=None, symbols=None, sids=None, exclude_universes=None, exclude_sids=None, exclude_delisted=False, exclude_expired=False, frontmonth=False, vendors=None, fields=None): """ Query security details from the securities master database and download to file. Parameters ---------- filepath_or_buffer : str or file-like object filepath to write the data to, or file-like object (defaults to stdout) output : str output format (csv or json, default is csv) exchanges : list of str, optional limit to these exchanges. You can specify exchanges using the MIC or the vendor's exchange code. sec_types : list of str, optional limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND, OPT, FOP,
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file contains the pattern-verbalizer pairs (PVPs) for all tasks. """ import copy import math import random import string from abc import ABC, abstractmethod from collections import defaultdict from typing import Tuple, List, Union, Dict import numpy as np from tasks.data_utils import InputExample, num_special_tokens_to_add, build_input_from_ids, build_sample, \ build_decoder_input, build_decoder_sample from utils import print_rank_0 FilledPattern = Tuple[List[Union[str, Tuple[str, bool]]], List[Union[str, Tuple[str, bool]]]] class PVP(ABC): """ This class contains functions to apply patterns and verbalizers as required by PET. Each task requires its own custom implementation of a PVP. """ def __init__(self, args, tokenizer, label_list, max_seq_length, pattern_id: int = 0, verbalizer_file: str = None, seed: int = 42, is_multi_token=False, max_segment_length=0, fast_decode: bool = False, split='train', num_prompt_tokens=0): """ Create a new PVP. :param args: the args :param tokenizer: the tokenizer :param label_list: the list of labels :param max_seq_length: the maximum length of the sequence :param pattern_id: the pattern id to use :param seed: a seed to be used for generating random numbers if necessary :param is_multi_token: if the verbalizers contain multiple tokens :param fast_decode: whether to use the fast decode mode for multi-token tasks :param continuous_prompt: whether to use continuous prompt optimization """ self.args = args self.tokenizer = tokenizer self.label_list = label_list self.max_seq_length = max_seq_length self.pattern_id = pattern_id self.num_prompt_tokens = num_prompt_tokens self.rng = random.Random(seed) self.num_truncated = 0 self.fast_decode = fast_decode self.split = split self.max_dec_seq_length = 16 self._is_multi_token = is_multi_token self.max_segment_length = max_segment_length self.task_mask = args.task_mask self.continuous_prompt = args.continuous_prompt self.prefix_prompt = args.prefix_prompt if self.continuous_prompt: print_rank_0(f"Prompt tokens in pvp {self.num_prompt_tokens} spell length {self.spell_length}") if verbalizer_file: self.verbalize = PVP._load_verbalizer_from_file(verbalizer_file, self.pattern_id) @property def is_multi_token(self): return self._is_multi_token @property def spell_length(self): return 0 @property def mask(self) -> str: """Return the underlying LM's mask token""" return self.tokenizer.get_command('MASK').Id @property def mask_id(self) -> int: """Return the underlying LM's mask id""" return self.tokenizer.get_command('MASK').Id @property def max_num_verbalizers(self) -> int: """Return the maximum number of verbalizers across all labels""" return max(len(self.verbalize(label)) for label in self.label_list) @staticmethod def shortenable(s): """Return an instance of this string that is marked as shortenable""" return s, True @staticmethod def remove_final_punc(s: Union[str, Tuple[str, bool]]): """Remove the final punctuation mark""" if isinstance(s, tuple): return PVP.remove_final_punc(s[0]), s[1] return s.rstrip(string.punctuation) @staticmethod def lowercase_first(s: Union[str, Tuple[str, bool]]): """Lowercase the first character""" if isinstance(s, tuple): return PVP.lowercase_first(s[0]), s[1] return s[0].lower() + s[1:] @staticmethod def uppercase_first(s: Union[str, Tuple[str, bool]]): """Lowercase the first character""" if isinstance(s, tuple): return PVP.uppercase_first(s[0]), s[1] return s[0].upper() + s[1:] @staticmethod def available_patterns(): return [0] def replace_prompt_tokens(self, parts_a, parts_b): if not self.continuous_prompt: parts_a = [part for part in parts_a if part is not None] parts_b = [part for part in parts_b if part is not None] return parts_a, parts_b num_prompt_tokens = self.num_prompt_tokens num_pos = 0 for parts in (parts_a, parts_b): for part in parts: if part is None: num_pos += 1 avg_prompt_tokens = math.ceil(num_prompt_tokens / num_pos) new_parts_a, new_parts_b = [], [] for part in parts_a: if part is None: if num_prompt_tokens > 0: if num_prompt_tokens >= avg_prompt_tokens: new_parts_a.append(avg_prompt_tokens) num_prompt_tokens -= avg_prompt_tokens else: new_parts_a.append(num_prompt_tokens) num_prompt_tokens = 0 else: new_parts_a.append(part) for part in parts_b: if part is None: if num_prompt_tokens > 0: if num_prompt_tokens >= avg_prompt_tokens: new_parts_b.append(avg_prompt_tokens) num_prompt_tokens -= avg_prompt_tokens else: new_parts_b.append(num_prompt_tokens) num_prompt_tokens = 0 else: new_parts_b.append(part) return new_parts_a, new_parts_b def encode(self, example: InputExample, priming: bool = False, labeled: bool = False): """ Encode an input example using this pattern-verbalizer pair. :param example: the input example to encode :param priming: whether to use this example for priming :param labeled: if ``priming=True``, whether the label should be appended to this example :return: A tuple, consisting of a list of input ids and a list of token type ids """ if not priming: assert not labeled, "'labeled' can only be set to true if 'priming' is also set to true" tokenizer = self.tokenizer raw_parts_a, raw_parts_b = self.get_parts(example) raw_parts_a = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_a] prompt_id = tokenizer.num_tokens def encode_input(raw_parts): parts = [] for x, s in raw_parts: if isinstance(x, str): x = tokenizer.EncodeAsIds(x) elif isinstance(x, int): x = [prompt_id] * x else: pass parts.append((x, s)) return parts parts_a = encode_input(raw_parts_a) if self.prefix_prompt > 0: parts_a = [([prompt_id] * self.prefix_prompt, False)] + parts_a parts_b = None if raw_parts_b: raw_parts_b = [x if isinstance(x, tuple) else (x, False) for x in raw_parts_b] parts_b = encode_input(raw_parts_b) if self.is_multi_token: answers = self.get_answers(example) if example.label is not None: label = self.label_list.index(example.label) else: label = 0 if not self.fast_decode: ids_list, positions_list, sep_list, mask_list, target_list, prompt_list = [], [], [], [], [], [] segment_id_list = [] if priming: answer = answers[label] answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False) self.num_truncated += self.truncate(parts_a, parts_b, answer_ids, max_length=self.max_seq_length) tokens_a = [token_id for part, _ in parts_a for token_id in part] tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None input_ids = tokens_a if tokens_b: input_ids += tokens_b if labeled: mask_idx = input_ids.index(self.mask_id) input_ids = input_ids[:mask_idx] + answer_ids + input_ids[mask_idx + 1:] return input_ids else: for idx, answer in enumerate(answers): this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b) answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False) answer_ids = answer_ids + [tokenizer.get_command('eop').Id] self.num_truncated += self.truncate(this_parts_a, this_parts_b, answer_ids, max_length=self.max_seq_length) tokens_a = [token_id for part, _ in this_parts_a for token_id in part] tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None if self.max_segment_length > 0: num_segments = (len(answer_ids) - 1) // self.max_segment_length + 1 segments = [ answer_ids[index * self.max_segment_length: (index + 1) * self.max_segment_length] for index in range(num_segments)] segment_id_list += [idx] * len(segments) else: segments = [answer_ids] for segment in segments: data = build_input_from_ids(tokens_a, tokens_b, segment, self.max_seq_length, self.tokenizer, args=self.args, add_cls=True, add_sep=False, add_piece=True, mask_id=self.mask_id) ids, types, paddings, position_ids, sep, target_ids, loss_masks = data prompt_pos = [idx for idx, token in enumerate(ids) if token == prompt_id] ids = [idx if idx != prompt_id else 0 for idx in ids] prompt_list.append(prompt_pos) ids_list.append(ids) positions_list.append(position_ids) sep_list.append(sep) target_list.append(target_ids) mask_list.append(loss_masks) if self.mask in tokens_a: mask_pos = tokens_a.index(self.mask) tokens_a = tokens_a[:mask_pos] + segment + tokens_a[mask_pos:] else: mask_pos = tokens_b.index(self.mask) tokens_b = tokens_b[:mask_pos] + segment + tokens_b[mask_pos:] segment_id_list = segment_id_list if segment_id_list else None sample = build_sample(ids_list, positions=positions_list, masks=sep_list, label=label, logit_mask=mask_list, target=target_list, unique_id=example.guid, segment_ids=segment_id_list, prompt_ids=prompt_list) return sample else: this_parts_a, this_parts_b = copy.deepcopy(parts_a), copy.deepcopy(parts_b) self.num_truncated += self.truncate(this_parts_a, this_parts_b, None, max_length=self.max_seq_length) tokens_a = [token_id for part, _ in this_parts_a for token_id in part] tokens_b = [token_id for part, _ in this_parts_b for token_id in part] if parts_b else None data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer, args=self.args, add_cls=True, add_sep=False, add_piece=False) ids, types, paddings, position_ids, sep, target_ids, loss_masks = data sample = build_sample(ids, positions=position_ids, masks=sep, label=label, unique_id=example.guid) ids_list, positions_list, mask_list, target_list, logit_mask_list = [], [], [], [], [] for answer in answers: answer_ids = get_verbalization_ids(answer, tokenizer, force_single_token=False) answer_ids = answer_ids + [tokenizer.get_command('eop').Id] answer_ids = answer_ids[:self.max_dec_seq_length] data = build_decoder_input(ids, answer_ids, self.max_seq_length, self.max_dec_seq_length, tokenizer) dec_ids, _, _, dec_position_ids, _, dec_target_ids, dec_loss_masks = data ids_list.append(dec_ids) positions_list.append(dec_position_ids) mask_list.append(sep) target_list.append(dec_target_ids) logit_mask_list.append(dec_loss_masks) sample = build_decoder_sample(sample, ids_list, positions_list, mask_list, target_list, logit_mask_list) return sample else: self.num_truncated += self.truncate(parts_a, parts_b, [], max_length=self.max_seq_length) tokens_a = [token_id for part, _ in parts_a for token_id in part] tokens_b = [token_id for part, _ in parts_b for token_id in part] if parts_b else None if priming: input_ids = tokens_a if tokens_b: input_ids += tokens_b if labeled: mask_idx = input_ids.index(self.mask_id) verbalizer = self.verbalize(example.label) assert len(verbalizer) == 1, 'priming only supports one verbalization per label' verbalizer = verbalizer[0] verbalizer_id = get_verbalization_ids(verbalizer, self.tokenizer, force_single_token=True) input_ids[mask_idx] = verbalizer_id return input_ids data = build_input_from_ids(tokens_a, tokens_b, None, self.max_seq_length, self.tokenizer, args=self.args, add_cls=True, add_sep=False, add_piece=True) ids, types, paddings, position_ids, sep, target_ids, loss_masks = data prompt_pos = [idx for
<reponame>cameronliang/pycos ######################################################################################## # # align.py (c) <NAME> # University of Chicago # <EMAIL> # <EMAIL> # ######################################################################################## """ This module corrects wavelength-dependent offset between various xd1sum 1D QSO spectra to regisiter offsets (shifts) against a reference spectra (also a x1dsum 1D spectra). All spectra are read from a file list based on the output of x1dsum.py These are G130M, G160M graiting and the combined list. With the offsets, users can choose to a polynomial of degree n (n = 1-3) fit to correct the wavelength array. New spectrum will be written with a 'rect_' prefix to the original name. """ ######################################################################################## import sys import os import shutil import numpy as np import matplotlib.pyplot as pl import Utilities # Make sure not to use any latex related to speed up rendering pl.rc('font', family='Bitstream Vera Sans') pl.rc('text', usetex=False) #For interactive key_press_event pl.switch_backend('tkAgg') ######################################################################################## def readfile(path_to_filelist, filelist): """ Reading in list of file names and append the input path to each individual file name. Parameters --------------------------------------------------------------------------- path_to_filelist: str FUll path to the ascii file and the associated ascii spectra filelist: str Full path to the file which contains a list of names of ascii spectra produced by x1dsum.py; Each ascii contains at least (wave, flux, error) Returns --------------------------------------------------------------------------- files_with_path: array of str Full path to each indivial ascii spectra see also --------------------------------------------------------------------------- x1dsum.py """ files = np.loadtxt(filelist,dtype = 'str') files_with_path = [] for f in files: files_with_path.append(path_to_filelist + '/'+f) return files_with_path ######################################################################################## def zoom_region(central_wavelength,dwave,dflux_window_up,dflux_window_down): """ Define zoom in window in spectrum OUTPUT goes to arguments for pl.xlim, pl.ylim in plottting """ wave_window_left = central_wavelength - dwave wave_window_right = central_wavelength + dwave flux_window_up = -1e-14 + dflux_window_up flux_window_down = 2e-14 + dflux_window_down return [wave_window_left,wave_window_right],[flux_window_up, flux_window_down] ######################################################################################## def wave_res(spec1,line_region): """ Calculate the resolution in wavelength of a pixel element at a given wavelength. (take median of all the resolution elements) """ wave1,flux1,error1 = np.loadtxt(spec1,unpack=True,usecols = [0,1,2]) if not line_region: pass else: waves = wave1[(wave1 >= line_region - 0.1) & (wave1 < line_region + 0.1)] # Raw resolution of COS at a given wavelength delta_wave_per_pix = np.median(wave1[1:] - wave1[:-1]) return delta_wave_per_pix ######################################################################################## def plotting(spec1, spec2, line_region, dwave = 10, dflux_window_up = 0.0, dflux_window_down=0.0, replot = True): """ Plots a references spectrum 'spec1' and another to be aligned 'spec2'; Go through the alignment process to output new spec2 with rectified wavelength array based on a polynomial fit correction for the wavelength-dependent offsets. Parameters --------------------------------------------------------------------------- spec1: str Full path to filename of the reference spectrum spec2: str Full path to filename of the to-be-aligned spectrum line_region: float A wavelength region for the plotting window (to start with) dwave: float plotting window from the centroid of the absorption line By default it assumes +/- 10 Angstrom dflux_window_up: float plotting window in the flux direction; mainly used for initialzation in the argument dflux_window_down: float plotting window in the flux direction; mainly used for initialzation in the argument replot: bool By default it makes plotting window based on the intial window parameters. It can be reset to be 'True' after zooming of window of any kind. Returns --------------------------------------------------------------------------- None """ wave1,flux1,error1 = np.loadtxt(spec1,unpack=True,usecols = [0,1,2]) wave2,flux2,error2 = np.loadtxt(spec2,unpack=True,usecols = [0,1,2]) if not line_region: both_wave = np.array(wave1,wave2).flatten() line_region = np.min(both_wave) else: waves = wave1[(wave1 >= line_region - 0.1) & (wave1 < line_region + 0.1)] fig1 = pl.figure(figsize=(10, 20)) # segment b points to fit central_wave_segb = []; shift_segb = []; scale_segb = [] # segment b best fit break_wavelength = Utilities.break_wave_calc(wave2) best_fit_wave_segb = [w2 for w2 in wave2 if w2 <= break_wavelength] best_fit_shift_segb= np.zeros(len(best_fit_wave_segb)) # segment a points to fit central_wave_sega = []; shift_sega = []; scale_sega = [] # segment a best fit best_fit_wave_sega = [w2 for w2 in wave2 if w2 > break_wavelength] best_fit_shift_sega= np.zeros(len(best_fit_wave_sega)) ############################################################ # Top Panel: Offset as a function of wavelength ############################################################ ax1 = fig1.add_subplot(311) dfits_segb, = pl.plot(central_wave_segb,shift_segb, 'bo',label= 'segment b') best_fit_line_b, = pl.plot(best_fit_wave_segb, best_fit_shift_segb, c = 'b') dfits_sega, = pl.plot(central_wave_sega,shift_sega, 'go',label= 'segment a') best_fit_line_a, = pl.plot(best_fit_wave_sega, best_fit_shift_sega, c = 'g') ax1.set_xlabel(r'$\lambda$ ($\AA$)') ax1.set_ylabel(r'$\Delta \lambda$') pl.xlim([min(wave2),max(wave2)]) pl.legend(loc='best') ############################################################ # Middle Panel: Chi Squared as a function of shift in pixels ############################################################ ax2 = fig1.add_subplot(312) dpix = np.zeros(100); chi2 = np.zeros(100) my_chi_waveshift = [0,0] my_chi2 = [0,10] chi2_line, = pl.step(dpix,chi2) my_shift_vline, = pl.plot(my_chi_waveshift,my_chi2,linewidth = 2.0) pl.xlim([-60,60]) ax2.set_xlabel(r'$\Delta$pix') ax2.set_ylabel(r'$\chi^2$') ############################################################ # Bottom Panel: Spectrum plot ############################################################ ax3 = fig1.add_subplot(313) ax3.clear() pl.step(wave1,flux1,color = 'k', label = str(spec1[-16:-7])) # Reference spec line, = pl.step(wave2,flux2,color = 'r', label=str(spec2[-16:-7])) pl.legend(loc=1) ax3.set_xlabel(r'$\lambda$ ($\AA$)') ax3.set_ylabel('Flux') if replot: pass else: pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) delta_wave_per_pix = wave_res(spec2,line_region) # Initialize dummy variables dx = 0.0; scale_factor = 1.0 dwave = 10; new_line_region = 0.0 dflux_window_up = 0.0; dflux_window_down = 0.0 small_shift = 0.5; big_shift = 5.0 # Shift Spectrum left/right zoom = 0.1; big_zoom = 0.5 # Wavelength zoom in/out flux_zoom = 2e-15; big_flux_zoom = 5e-14 # Flux zoom in/out # Shift and Scaling records shift_record = []; scale_record = [] ######################################################################################## def shift_spec(event): """ Interactive click/plotting Event Write out rectified spectra from a polynomial fit correction of the wavelength-dependent offset from side effects of the function. Parameters --------------------------------------------------------------------------- event: obj Mouse clicks or button press when focus on the plotting window Returns --------------------------------------------------------------------------- None """ global dx; global scale_factor global dwave; global line_region global dflux_window_up; global dflux_window_down global dpix; global chi2 global fit_order_b; global fit_order_a best_fit_wave_segb = []; best_fit_shift_segb = [] best_fit_wave_sega = []; best_fit_shift_sega = [] ####################################################################### # # # WINDOW Control # # # ####################################################################### if event.key == '}': line_region += big_shift pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == '{': line_region -= big_shift pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == ']': line_region += small_shift pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == '[': line_region -= small_shift pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == '-': dwave += zoom pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == '=': dwave -= zoom pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == '_': dwave += big_zoom pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key == '+': dwave -= big_zoom pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) elif event.key =='b': dflux_window_up += flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='B': dflux_window_up -= flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='t': dflux_window_down -= flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='T': dflux_window_down += flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='m': dflux_window_up += big_flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='M': dflux_window_up -= big_flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='u': dflux_window_down -= big_flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='U': dflux_window_down += big_flux_zoom pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) elif event.key =='r': dwave = 4; dflux_window_up = 0.0; dflux_window_down = 0.0 pl.xlim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[0]) pl.ylim(zoom_region(line_region,dwave,dflux_window_up,dflux_window_down)[1]) ####################################################################### # # # Fitting Control # # # ####################################################################### elif event.key == 'enter': # Register a aligned point delta lambda based on the given offset. print '\n' print 'central Wavelength = %f' % line_region print 'Shift = %f' % np.sum(shift_record) print 'Scale = %f\n' % np.prod(scale_record) if line_region >= break_wavelength: central_wave_sega.append(line_region) shift_sega.append(np.sum(shift_record)) scale_sega.append(np.prod(scale_record)) else: central_wave_segb.append(line_region) shift_segb.append(np.sum(shift_record)) scale_segb.append(np.prod(scale_record)) elif event.key == 'x': # Compute chi^2 based on data currently displayed on plotting window wa = line_region - dwave wb = line_region + dwave print 'Range for chi^2 = (%f, %f)' % (wa,wb) temp_chi2_output = Utilities.chi_squared_calc(wave1,flux1,error1,wave2,flux2,error2,wa,wb) dpix = np.array(temp_chi2_output[0]) chi2 = np.array(temp_chi2_output[1]) elif event.key == '4' or event.key == '5' or event.key == '6' or event.key == '7': # Choose an polynomial fit of order n and write out rectified # spectrum based on the correction to wavelength array fit_order_b = int(event.key); fit_order_a = int(event.key) if len(central_wave_sega) == 0 or len(central_wave_sega) == 0: print '\n' print 'Both Segment a and b needs to be have at least 1 point to make fits.\n' pass else: fit_order_a,fit_order_b = Utilities.ChooseFitOrders(fit_order_a,fit_order_b, central_wave_sega,central_wave_segb) segment_a = central_wave_sega,shift_sega, fit_order_a, scale_sega segment_b = central_wave_segb,shift_segb, fit_order_b, scale_segb segment_a_fit, segment_b_fit = Utilities.Perform_Fits(spec2, segment_a,segment_b, break_wavelength) # For Displaying fits on plot. best_fit_wave_sega,best_fit_shift_sega = segment_a_fit best_fit_wave_segb,best_fit_shift_segb = segment_b_fit elif event.key == ')' or event.key == '!' or event.key == '@' or event.key == '#': if len(central_wave_segb) == 0: print 'Segment a needs to be have at least 1 point to make fits.\n' pass else: if event.key == ')': if len(central_wave_segb) >= 1: fit_order_b = 0 else: print 'Require at least 1 point to fit 0 order.' elif event.key == '!': if len(central_wave_segb) >= 2: fit_order_b = 1 else: print 'Require at least 2 point to fit 1 order.' elif event.key == '@': if len(central_wave_segb) >= 3: fit_order_b = 2 else: print 'Require at least 3 point to fit 2 order.' elif event.key == '#': if len(central_wave_segb) >= 4: fit_order_b = 3 else: print 'Require at least 4 point to fit 3 order.' try: fit_order_b except NameError: print 'Segment a order not assigned.' else: print 'Segment a Polynomial order = ', fit_order_b segment_b = central_wave_segb,shift_segb, fit_order_b, scale_segb best_fit_wave_segb,best_fit_shift_segb = Utilities.Get_Best_Fit_curve(spec2, segment_b,break_wavelength,'b') elif event.key == '0' or event.key == '1' or event.key == '2' or event.key == '3': if len(central_wave_sega) == 0: print 'Segment b needs to be have at least 1 point to make fits.\n' pass else: if event.key == '0': if len(central_wave_sega) >= 1: fit_order_a = 0 else: print 'Require at least 1 point to fit 0 order.' elif event.key == '1': if len(central_wave_sega) >= 2: fit_order_a = 1 else: print 'Require at least 2 point to fit 1 order.' elif event.key == '2': if len(central_wave_sega) >= 3: fit_order_a = 2 else: print 'Require at least 3 point to fit 2 order.' elif event.key == '3': if len(central_wave_sega) >= 4: fit_order_a = 3 else: print 'Require at least 4 point to fit 3 order.' try: fit_order_a except NameError: print 'Fitting b order not assigned.' else: print 'Segment b Polynomial order = ', fit_order_a segment_a = central_wave_sega,shift_sega, fit_order_a, scale_sega best_fit_wave_sega,best_fit_shift_sega = Utilities.Get_Best_Fit_curve(spec2, segment_a,break_wavelength,'a') elif event.key == 'w': if fit_order_a >= 0 and fit_order_b >= 0: print 'order in a: ', fit_order_a print 'order in b: ', fit_order_b segment_a = central_wave_sega,shift_sega, fit_order_a, scale_sega segment_b = central_wave_segb,shift_segb, fit_order_b, scale_segb segment_a_fit, segment_b_fit = Utilities.Perform_Fits(spec2, segment_a,segment_b, break_wavelength) # For Displaying fits on plot. best_fit_wave_sega,best_fit_shift_sega = segment_a_fit best_fit_wave_segb,best_fit_shift_segb = segment_b_fit else: print 'Fitting orders are not assigned yet. ' elif event.key == 'D': # Delete the last registered offset in segment b if not shift_segb: print 'Segment b is
<reponame>UdK-VPT/BIM2Modelica # -*- coding: utf-8 -*- # import ifcopenshell import ifcopenshell.geom import libdm.BuildingDataModel as bdm import libdm.DataModelGenerator as dmg import IfcLib.Ifc2x3Lib as ifcLib from IfcLib import DataClasses import math import re import os def cmp(a, b): ''' Compares a and b and returns -1 if a < b, 0 if a == b and 1 if a > b ''' return (a > b) - (a < b) def azimuthAngle(x,y,z): ''' returns the azimuth angle of a surface based on its normal vector ''' if x == 0.0 and y == 0.0: return 0.0 if y < 0 and x == 0: return math.acos(y*1.0/math.sqrt(x*x+y*y))/math.pi*180.0 else: return math.acos(y*1.0/math.sqrt(x*x+y*y))/math.pi*180.0*cmp(x,0) def tiltAngle(x,y,z): ''' returns the tilt angle of a surface based on its normal vector ''' return math.acos(z*1.0/math.sqrt(x*x+y*y+z*z))/math.pi*180.0 def mapIFCtoBuildingDataModel(file,filename): ''' Analyses the IFC file regarding the information, which defines the building zones and the building elements and maps it to the building data model ''' settings = ifcopenshell.geom.settings() settings.set(settings.USE_PYTHON_OPENCASCADE, True) # Elements need two representations (SweptSolid and Curve2D). If those are not present, # it is likely that the building element has childrens containing the necessary representations. # Elements/Walls with one representation will throw an error when calling create_shape(). # This problem appeared for some IfcWall with air gaps. # The IfcWall were composed by several "IfcBuildingElementPart" elements # which contain the necessary representations. # Here, the list all_walls obtained with file.by_type("IfcWall") is filtered. Removing the # elements with less than 2 Representations. all_walls = file.by_type("IfcWall") originalWalls = [] walls = [] walls_decomposed = [] for w in all_walls: shape_tup = ifcopenshell.geom.create_shape(settings, w) toposhape = shape_tup.geometry mesh = DataClasses.Mesh(toposhape) originalWalls.append(mesh) if len(w.Representation.Representations) >= 2: walls.append([w, toposhape]) else: walls_decomposed.append(w) # Add element parts of IfcWall with one representation to the "walls" list # walls includes ifcWall (incl. IfcWallStandardCase, ...) # and ifcBuildingElementPart elements. BuildingElementPart = file.by_type("IfcBuildingElementPart") for be in BuildingElementPart: walls.append([be, ifcopenshell.geom.create_shape(settings, be).geometry]) all_doors = file.by_type("IfcDoor") originalDoors = [] for d in all_doors: shape_tup = ifcopenshell.geom.create_shape(settings, d) toposhape = shape_tup.geometry mesh = DataClasses.Mesh(toposhape) originalDoors.append(mesh) # Slabs and Colums are not filtered, but stored using the same data structure # that was used for the walls: [Ifc element, shape] all_slabs = file.by_type("IfcSlab") originalSlabs = [] slabs = [] for s in all_slabs: shape_tup = ifcopenshell.geom.create_shape(settings, s) toposhape = shape_tup.geometry mesh = DataClasses.Mesh(toposhape) originalSlabs.append(mesh) slabs.append([s, ifcopenshell.geom.create_shape(settings, s).geometry]) all_columns = file.by_type("IfcColumn") originalColumns = [] columns = [] for c in all_columns: shape_tup = ifcopenshell.geom.create_shape(settings, c) toposhape = shape_tup.geometry mesh = DataClasses.Mesh(toposhape) originalColumns.append(mesh) columns.append([c, ifcopenshell.geom.create_shape(settings, c).geometry]) all_windows = file.by_type("IfcWindow") originalWindows = [] for w in all_windows: shape_tup = ifcopenshell.geom.create_shape(settings, w) toposhape = shape_tup.geometry mesh = DataClasses.Mesh(toposhape) originalWindows.append(mesh) # Dictionary with key name of Ifc element (IfcWall, ...) containg a list of the element-id which # for some reason (i.e. wrong definition, too complex, ...) should be disregarded. black_list = {} # Main programm: # 1.Check Ifc file # Search for spaces overlapping with building elements and correct them # overlappedId,overlappedShape = ifcLib.getOverlappedelements(ifc_file) # get black list of spaces by adding spaces (Id) of redundand spaces black_list["IfcSpace"] = ifcLib.getOverlappedSpaces(file) # Search for wrong defined IfcSpaces that do not fill a whole space/room # (gap between building elements and IfcSpace) and correct them # 2.Obtain information # Definition of the building constructions MaterialLayerset = ifcLib.LayerSet_toLayers(file) # Definition of the constructions and combination with the building elements BuildingElementToMaterialLayerSet, BuildingElementToMaterial = ifcLib.BuildingElement_toMaterialLayer(file) # Building element dictionaries WindowToStyle, DoorToStyle = ifcLib.WindowAndDoor_toStyle(file) WallInfo, problematicWalls = ifcLib.getWallInfo(walls) SlabsInfo = ifcLib.getSlabInfo(slabs) ColumnsInfo = ifcLib.getColumninfo(columns) # Definition of the building site Site = file.by_type("IfcSite") # Instantiation of SpaceContainer using Spaces Volumes SpacesW, Spaces = ifcLib.initSpaceContainer(file, black_list["IfcSpace"]) # Find contacts between space boundaries (from space volume) and IfcWalls SpacesW, black_list["IfcWall"], black_list["IfcSlab"] = ifcLib.RelatedElementsWalls(SpacesW, file, WallInfo, SlabsInfo) # Modify SpaceContainer accordingly with Walls information. SpacesW = ifcLib.SecondLvLBoundariesWalls(SpacesW, WallInfo, SlabsInfo) # Remove any boundary attached to a IfcWall of the previously initialized SpaceContainer list -Spaces- Spaces = ifcLib.SpaceBoundariesWithoutWalls(Spaces, SpacesW) # Find relation/contact between IFC classes (openings,slabs,.. even spaces) # to boundaries of each SpaceContainer within Spaces. Spaces, OpeningsDict, giv = ifcLib.RelatedElements(Spaces, file, WallInfo, ColumnsInfo) # Dictionary Spaces.RelatedOpening is checked. Removing/filtering boundary definitions in excess. Spaces = ifcLib.OverlappedOpenings(Spaces) # Spaces = ifcLib.addVirtualBoundaries(Spaces) # Update SpaceContainer list Spaces. Editing boundaries based on connected/related IFC classes Spaces = ifcLib.SecondLvLBoundaries(Spaces, SpacesW, WallInfo, OpeningsDict, SlabsInfo, ColumnsInfo) # Als normals of the boundaries show to the ambient of a space Spaces = ifcLib.CorrectNormalVector(Spaces) # Analysis of the adjacent space (other space, ambient, other building element) and modification # of the correspodent boundaries Spaces = ifcLib.UpdateSecondLvLBoundaries(Spaces, WallInfo, ColumnsInfo, black_list["IfcWall"]) Spaces = ifcLib.CorrectNormalVector(Spaces) # Spaces = ifcLib.ExploreSurroundings(Spaces) # Correct some of the 3rd level boundaries which OtherSideSpace is not properly defined ("unknown") # The definition of 3rd level boundaries is not working properly (failing for complex cases). Some code has # been commented out in - UpdateSecondLvLBoundaries -. Until improvements, to call CorrectThirdLevelBoundaries # is no further necessary # Spaces = ifcLib.CorrectThirdLevelBoundaries(Spaces, ifc_file, WallInfo, ColumnsInfo) # Estimation of all height and with for all boundaries Spaces = ifcLib.BoundariesHeightWidth(Spaces, WindowToStyle, DoorToStyle, file) # Get points of the profile of each boundary face (ignore points of gaps) Spaces = ifcLib.Profiles(Spaces) # Definition of all positions (spaces, elements) Spaces = ifcLib.DefinePosition(Spaces) # Add the id's of included space boundaries to their related space boundaries Spaces = ifcLib.StoreEnclosedBoundaries(Spaces, WallInfo, OpeningsDict) ''' Instantiation and parameterisation of the building data model ''' modelName = os.path.basename(filename).split('.')[0] # Remove invalid characters modelName = re.sub('[^0-9a-zA-Z_]', '', modelName) # Remove leading characters until a letter or underscore occurs modelName = re.sub('^[^a-zA-Z_]+', '', modelName) buildingData = bdm.Building(name=modelName, pos=(0.0,0.0,0.0)) ico = 1 treatedCon = {} ## Original walls for owa in originalWalls: buildingData.addOriginalWall(owa) ## Original doors for odo in originalDoors: buildingData.addOriginalDoor(odo) ## Original slabs for osl in originalSlabs: buildingData.addOriginalSlab(osl) ## Original slabs for owi in originalWindows: buildingData.addOriginalWindow(owi) ## Construction types for con in MaterialLayerset.items(): thickness = [] material = [] treatedCon[con[0]] = "Construction"+str(ico) for layer in con[1]: if layer.Thickness > 1.0: # length unit in the IFC file in mm thickness.append(layer.Thickness/1000.0) else: # length unit in the IFC file in m thickness.append(layer.Thickness) material.append("BuildingSystems.HAM.Data.MaterialProperties.Thermal.Masea.Concrete") buildingData.addConstruction(bdm.Construction(name="Construction"+str(ico), numberOfLayers=len(con[1]), thickness=thickness, material=material)) ico = ico + 1 treatedBuildingEle = {} treatedZones = {} ## Thermal zones izo = 1 for space in Spaces: treatedZones[space.Space.GlobalId] = "zone_" + str(izo) izo = izo + 1 iwa = 1 isl = 1 ido = 1 iwi = 1 for space in Spaces: iel=0 iwaz = 0 islz = 0 idoz = 0 iwiz = 0 heightMin = 0.0 heightMax = 0.0 ## Construction elements for bound in space.Boundaries: if bound.OtherSideSpace in treatedZones.keys() or bound.OtherSideSpace == "EXTERNAL": side1 = treatedZones[space.Space.GlobalId] if bound.OtherSideSpace == "EXTERNAL": side2 = "AMB" else: side2 = treatedZones[bound.OtherSideSpace] ## Walls if bound.RelatedBuildingElement in WallInfo.keys() and bound.thickness[0] > 0.0: iel = iel + 1 iwaz = iwaz + 1 if bound.OtherSideBoundary not in treatedBuildingEle.keys(): treatedBuildingEle[bound.Id] = "wall_"+str(iwa) includedWindows = [] includedDoors = [] for b in space.Boundaries: if b.Id in bound.IncludedBoundariesIds: if b.RelatedBuildingElement in WindowToStyle.keys(): includedWindows.append((b.Width,b.Height)) if b.RelatedBuildingElement in DoorToStyle.keys(): includedDoors.append((b.Width,b.Height)) buildingData.addOpaqueElement(bdm.BuildingElementOpaque(id=bound.Id, name="wall_"+str(iwa), pos=(bound.Position.X(),bound.Position.Y(),bound.Position.Z()), angleDegAzi=azimuthAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()), angleDegTil=tiltAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()), adjZoneSide1=side1, adjZoneSide2=side2, width=bound.Width, height=bound.Height, areaNet=bound.Area, thickness=bound.thickness[0], constructionData=treatedCon[BuildingElementToMaterialLayerSet[bound.RelatedBuildingElement]], mesh=DataClasses.Mesh(bound.Face), includedWindows=includedWindows, includedDoors=includedDoors)) iwa = iwa + 1 ## Slabs if bound.RelatedBuildingElement in SlabsInfo.keys() and tiltAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()) in [0.0,180.0]: iel = iel + 1 islz = islz + 1 if bound.OtherSideBoundary not in treatedBuildingEle.keys(): treatedBuildingEle[bound.Id] = "slab_"+str(isl) if bound.Position.Z() > heightMax: heightMax = bound.Position.Z() if bound.Position.Z() < heightMin: heightMin = bound.Position.Z() buildingData.addOpaqueElement(bdm.BuildingElementOpaque(id=bound.Id, name="slab_"+str(isl), pos=(bound.Position.X(),bound.Position.Y(),bound.Position.Z()), angleDegAzi=azimuthAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()), angleDegTil=tiltAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()), adjZoneSide1=side1, adjZoneSide2=side2, width=bound.Width, height=bound.Height, areaNet=bound.Area, thickness=bound.thickness[0], constructionData=treatedCon[BuildingElementToMaterialLayerSet[bound.RelatedBuildingElement]], mesh=DataClasses.Mesh(bound.Face), includedWindows=[], includedDoors=[])) isl = isl + 1 ## Doors if bound.RelatedBuildingElement in DoorToStyle.keys(): iel = iel + 1 idoz = idoz + 1 if bound.OtherSideBoundary not in treatedBuildingEle.keys(): mesh=DataClasses.Mesh(bound.Face) treatedBuildingEle[bound.Id] = "door_"+str(ido) buildingData.addDoorElement(bdm.BuildingElementDoor(id=bound.Id, name="door_"+str(ido), pos=(bound.Position.X(),bound.Position.Y(),bound.Position.Z()), angleDegAzi=azimuthAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()), angleDegTil=tiltAngle(bound.Normal.X(),bound.Normal.Y(),bound.Normal.Z()), adjZoneSide1=side1, adjZoneSide2=side2, width=bound.Width, height=bound.Height, areaNet=bound.Area, thickness=bound.thickness[0], constructionData="Construction1", mesh=DataClasses.Mesh(bound.Face))) ido = ido + 1 ## Transparent elements if bound.RelatedBuildingElement in WindowToStyle.keys(): iel = iel + 1 iwiz = iwiz + 1 if bound.OtherSideBoundary not in treatedBuildingEle.keys(): mesh=DataClasses.Mesh(bound.Face) treatedBuildingEle[bound.Id] =
# # Blueprint for the GUI # from flask import Flask, Blueprint, url_for, jsonify, make_response, app, \ render_template, request, session, redirect, flash, g from flask_login import login_required, login_user, logout_user, current_user from flask_mail import Message import re import logging from datetime import datetime from file_read_backwards import FileReadBackwards from sqlalchemy.orm import aliased from sqlalchemy import or_ from project.models import * from project import app, db, is_admin, mail, get_user from project.gui.forms import * from project.gui.logic import * gui_blueprint = Blueprint('gui_blueprint', __name__, url_prefix="/fpa") def get_version (): session["version"] = app.config["VERSION"] # Checks to see if token is set as a session variable def is_loggedin (): try: if session['token']: return True else: return False except: return False ################################################################# # ROOT # ######## @gui_blueprint.route("/") def _index (): if current_user.is_authenticated: admin = Booking.query.all() bookings = Booking.query.filter(Booking.owner_id.ilike(get_user())).all() return render_template("dashboard.html", data1=admin, data2=bookings) else: return redirect(url_for('gui_blueprint._login')) ################################################################# # LOGIN # ######### @gui_blueprint.route("/login", methods=["POST","GET"]) def _login (): form = UserForm() motd = Parameter.query.filter_by(id=124).first() if request.method=='POST': if request.form['login_id'] and request.form['password']: user = User.query.filter_by(login_id=request.form['login_id']).first() if not user: app.logger.warning ("Log in error for " + request.form['login_id'] + ", no account found") flash('No account found for ' + request.form['login_id'], 'warning') return render_template("login.html", form=form, motd=motd) else: role = Role.query.filter_by(id=user.role).first() _role = re.search("LOGIN", role.role_app_sections) if user.is_correct_password(request.form['password']) and _role: # check hash OK and user can actually log in session['login_id'] = request.form['login_id'] # update the LAST_LOGIN field user.last_login = datetime.now() db.session.commit() # Log in user login_user(user) # log this app.logger.info ("Successfully logged in " + request.form['login_id']) if current_user.is_authenticated: flash('You were successfully logged in', 'success') if request.form["next"]: return redirect(request.form["next"]) return redirect(url_for('gui_blueprint._index')) flash('Error with login! id or password wrong or account currently locked', 'warning') return render_template("login.html", form=form, motd=motd) else: flash('Login or password missing!', 'warning') return render_template("login.html", form=form, motd=motd) else: return render_template("login.html", form=form, motd=motd) ################################################################# # LOGOUT # ########## @gui_blueprint.route("/logout", methods=["GET"]) def _logout (): if request.method == 'GET': if current_user.is_authenticated: session['login_id'] = None logout_user() return redirect(url_for('gui_blueprint._index')) ################################################################# # REGISTER # ############ @gui_blueprint.route("/register", methods=["GET", "POST"]) def _register (): form = UserForm() form.savebtn.label.text = 'Register' form.role.data = 6 # Add as a new user if request.method == 'GET': return render_template("register.html", form=form) if form.validate_on_submit(): user = User.query.filter(User.login_id.ilike(form.login_id.data)).first() if user: print (model_as_dict(user)) flash ('Error, account already exists for login "{}"'.format(user.login_id), 'warning') return render_template("register.html", form=form) user = User(form.login_id.data, form.forename.data, form.surname.data, form.comment.data, form.password.data, form.email.data, form.role.data, form.vendor.data) db.session.add(user) if not form.password.data: del form.password form.populate_obj(user) user.last_modified = datetime.now() user.modified_by = session["login_id"] user.last_login = None if not form.created_date.data: user.created_date = datetime.now() db.session.commit() flash ('User details registered successfully, please wait for a member of the team to contact you with your login details.', 'success') return redirect(url_for('gui_blueprint._index')) else: flash_errors(form) return render_template("register.html", form=form) ################################################################# # COMPLEXES # ############# @gui_blueprint.route("/admin/complexes") @login_required def _complexes (): if is_admin(): a = aliased(Parameter) b = aliased(Parameter) c = aliased(Parameter) d = aliased(Parameter) complexes = db.session.query(Complex, a, b, c, d).\ filter(Complex.complex_manager==a.id).\ filter(Complex.complex_type==b.id).\ filter(Complex.complex_country==c.id).\ filter(Complex.complex_active==d.id).all() return render_template("complexes.html", data = complexes) else: return render_template("403.html", error = "You are not an administrator") @gui_blueprint.route("/admin/editcomplex/<id>", methods=["GET","POST"]) @login_required def _editcomplex (id): if is_admin(): complex = Complex.query.filter_by(id=id).first() form = ComplexForm(obj=complex) if request.method == "GET": return render_template("editcomplex.html", data=complex, form=form) if form.validate_on_submit(): if not complex: complex = Complex() db.session.add(complex) form.populate_obj(complex) complex.complex_updated = datetime.now() db.session.commit() flash ('Complex saved successfully', 'success') return redirect(url_for('gui_blueprint._complexes')) else: flash_errors(form) return render_template("editcomplex.html", data=complex, form=form) else: return render_template("403.html", error = "You are not an administrator") ################################################################# # LOGS # ######## @gui_blueprint.route("/admin/logs", methods=["GET","POST"]) @login_required def _logs (): if is_admin(): form = LogForm() # Prevent the rec # counter from breaking the app - default to 10 try: records = int(request.args.get('log_records')) except: records = 5 # Prevent the log name from breaking the app - default to LOG_FILE variable try: rec_t = request.args.get('log_options') log_t = app.config[rec_t] except: log_t = app.config["LOG_FILE"] form.log_records.data = str(records) form.log_options.data = rec_t data = FileReadBackwards(log_t, encoding="utf-8") return render_template("logs.html", data=data, form=form, counter=records) else: return render_template("403.html", error = "You are not an administrator") ################################################################# # USERS # ######### @gui_blueprint.route("/admin/users", methods=["GET"]) @login_required def _users (): if is_admin(): users = db.session.query(User,Role,Parameter).join(Role).outerjoin(Parameter).order_by(User.id.asc()) return render_template("users.html", data = users) else: return render_template("403.html", error = "You are not an administrator") @gui_blueprint.route("/admin/edituser/<id>", methods=["GET","POST"]) @login_required def _edituser (id): if is_admin(): user = User.query.filter_by(id=id).first() form = UserForm(obj=user) if request.method == "GET": return render_template("edituser.html", data=user, form=form) if form.validate_on_submit(): if form.savebtn.data: if not user: user = User(form.login_id.data, form.forename.data, form.surname.data, form.comment.data, form.password.data, form.email.data, form.role.data, form.vendor.data) user.last_login = None db.session.add(user) form.populate_obj(user) user.last_modified = datetime.now() user.modified_by = session["login_id"] if not form.created_date.data: user.created_date = datetime.now() db.session.commit() flash ('User saved successfully', 'success') return redirect(url_for('gui_blueprint._users')) if form.deletebtn.data: user = User.query.filter_by(id=id).first() db.session.delete(user) flash ('User removed successfully', 'success') db.session.commit() return redirect(url_for('gui_blueprint._users')) else: flash_errors (form) return render_template("edituser.html", data=user, form=form) else: return render_template("403.html", error = "You are not an administrator") ################################################################# # ROLES # ######### @gui_blueprint.route("/admin/roles", methods=["GET"]) @login_required def _roles (): if is_admin(): roles = Role.query.order_by(Role.id.asc()) return render_template("roles.html", data = roles) else: return render_template("403.html", error = "You are not an administrator") @gui_blueprint.route("/admin/editrole/<id>", methods=["GET","POST"]) @login_required def _editrole (id): if is_admin(): role = Role.query.filter_by(id=id).first() form = RoleForm(obj=role) if request.method == "GET": return render_template("editrole.html", form=form, data=role) if form.validate_on_submit(): if form.savebtn.data: if not role: role = Role(form.role_name.data, form.role_admin.data, form.role_app_sections.data, form.enabled.data) db.session.add(role) form.populate_obj(role) role.created_date = datetime.now() db.session.commit() flash ('Role saved successfully', 'success') return redirect(url_for('gui_blueprint._roles')) if form.deletebtn.data: role = Role.query.filter_by(id=id).first() counter = User.query.filter_by(role=role.id).count() if counter == 0: # there are no user accounts using this role db.session.delete(role) flash ('Role removed successfully', 'success') db.session.commit() else: flash ('Cannot delete role as it is in use', 'warning') else: flash_errors (form) return render_template("editrole.html", form=form, data=role) return redirect(url_for('gui_blueprint._roles')) ################################################################# # DATES # ######### @gui_blueprint.route("/admin/dates", methods=["GET"]) @login_required def _dates (): if is_admin(): a = aliased(Parameter) b = aliased(Parameter) c = aliased(Parameter) d = aliased(DateOfInterest) dates = db.session.query(d.id, d.doi_name, d.doi_regions, d.doi_start_dt, d.doi_end_dt, a.param_value, \ b.param_name, c.param_name).join(a, d.doi_priority==a.id).\ join(b, d.doi_hap==b.id).join(c, d.doi_locked==c.id).order_by(d.doi_start_dt.asc()).all() return render_template("dates.html", data=dates) else: return render_template("403.html", error = "You are not an administrator") @gui_blueprint.route("/admin/editdate/<id>", methods=["GET","POST"]) @login_required def _editdate (id): if is_admin(): doi = DateOfInterest.query.filter_by(id=id).first() form = DOIForm(obj=doi) if request.method == "GET": if doi: form.doi_start_dt.data = datetime.strftime(datetime.strptime(doi.doi_start_dt, '%Y-%m-%d %H:%M:%S'), '%d/%m/%Y %H:%M') form.doi_end_dt.data = datetime.strftime(datetime.strptime(doi.doi_end_dt, '%Y-%m-%d %H:%M:%S'), '%d/%m/%Y %H:%M') return render_template("editdate.html", form=form, data=doi) if form.deletebtn.data: doi = DateOfInterest.query.filter_by(id=id).first() db.session.delete(doi) flash ('Date removed successfully', 'success') db.session.commit() return redirect(url_for('gui_blueprint._dates')) if form.validate_on_submit(): start_dt = datetime.strptime(form.doi_start_dt.data, '%d/%m/%Y %H:%M') end_dt = datetime.strptime(form.doi_end_dt.data, '%d/%m/%Y %H:%M') if form.savebtn.data: if not doi: doi = DateOfInterest(form.doi_name.data, form.doi_priority.data, form.doi_comment.data, \ start_dt, end_dt, form.doi_regions, form.doi_locked, form.doi_hap) db.session.add(doi) form.populate_obj(doi) doi.doi_start_dt = start_dt doi.doi_end_dt = end_dt db.session.commit() flash ('Date saved successfully', 'success') return redirect(url_for('gui_blueprint._dates')) else: flash_errors(form) return render_template("editdate.html", form=form, data=doi) return redirect(url_for('gui_blueprint._dates')) else: return render_template("403.html", error = "You are not an administrator") ################################################################# # SEARCH # ########## @gui_blueprint.route("/search", methods=["GET","POST"]) def _search (): a = aliased(DateOfInterest) b = aliased(Booking) form = MainSearchForm() if request.method == 'POST': # get query query = request.form.get('search_input', None) if query: #################################################################### # DATES conditions = [] for qry in query.split(" "): conditions.append((a.doi_comment+' '+a.doi_name).ilike(('%{}%').format(qry))) results1 = db.session.query(a.id, a.doi_name.label("name"), a.doi_comment.label("description"), \ a.doi_start_dt.label("start"), a.doi_end_dt.label("end")).\ filter(or_(*conditions)).all() #################################################################### # BOOKINGS conditions = [] for qry in query.split(" "): conditions.append((b.description+' '+b.title).ilike(('%{}%').format(qry))) results2 = db.session.query(b.id, b.title.label("name"), b.description, \ b.start_dt.label("start"), b.end_dt.label("end")).\ filter(or_(*conditions)).all() results = [] results.extend(results1) results.extend(results2) return render_template("search.html", query=query, results=results, form=form) else: return render_template("search.html", query=None, results=None, form=form) else: return render_template("search.html", query=None, results=None, form=form) ################################################################# # HELP # ######## @gui_blueprint.route("/usage") def _help (): if request.method == 'GET': help = db.session.query(Parameter.param_name, Parameter.param_value).filter(Parameter.param_group==113).order_by(Parameter.id.asc()).all() return render_template("help.html", data = help) ################################################################# # BOOKINGS # ############ @gui_blueprint.route("/bookings", methods=["GET"]) def _bookings (): # Show the booking calendar view if request.method == "GET": form = ComplexNameSelectForm() defdate = request.args.get("defdate", datetime.now()) return render_template("calendar.html", form=form, defdate=defdate) # This is waaaayy too long!! @gui_blueprint.route("/editbooking/<id>", methods=["GET","POST"]) def _editbooking (id): #for key, value in request.form.items(): # print(key, value) booking = Booking.query.filter_by(id=id).first() form = BookingForm(obj=booking) if request.method == "GET": complex = Complex.query.filter(Complex.id==int(form.complex.data)).first() # Get complex object from query form.tmp_date.data = datetime.strptime(form.start_dt.data.split()[0], '%Y-%m-%d').strftime('%d-%m-%Y') form.tmp_start_t.data = form.start_dt.data.split()[1] form.tmp_end_t.data = form.end_dt.data.split()[1] form.complex_text.data = complex.complex_name flash_errors(form) return render_template("editbooking.html", form=form) if request.method == "POST": if request.form.get("deletebtn", False): db.session.delete(booking) db.session.commit() flash("Booking deleted successfully", 'success') return redirect(url_for('gui_blueprint._index')) # check from initial select form submitted as "Next" if request.form.get("nextbtn", False): cplx = int(request.form.get("complex_select", 1)) # Get Complex ID complex = Complex.query.filter(Complex.id==int(cplx)).first() # Get complex object from query s_time = complex.complex_push_start e_time = complex.complex_push_end form.start_dt.default = request.form.get("start") + " " + s_time form.end_dt.default = request.form.get("start") + " " + e_time form.tmp_date.default = request.form.get("start").replace("-","/") form.tmp_start_t.default = s_time form.tmp_end_t.default = e_time form.owner_id.data = get_user() if is_day_allowed(request.form.get("start"), complex.complex_push_days): form.approval_required.default = "0" # may change once the booking has been checked else: form.approval_required.default = "1" form.complex.default = request.form.get("complex_select", 1) form.complex_text.default = complex.complex_name form.process() return render_template("editbooking.html", form=form) # This is the actual booking page and we're trying to save the record # VALID if form.validate_on_submit(): # We have made it to the process/checking section. Data is
return opts class ZabbixAPIEvent(ZabbixAPISubClass): @checkauth @dojson('event.get') def get(self,**opts): """ * Get events data * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $options * @param array $options['itemids'] * @param array $options['hostids'] * @param array $options['groupids'] * @param array $options['eventids'] * @param array $options['applicationids'] * @param array $options['status'] * @param array $options['templated_items'] * @param array $options['editable'] * @param array $options['extendoutput'] * @param array $options['count'] * @param array $options['pattern'] * @param array $options['limit'] * @param array $options['order'] * @return array|int item data as array or false if error """ return opts @checkauth @dojson('event.add') def add(self,**opts): """ * Add events ( without alerts ) * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $events multidimensional array with events data * @param array $events[0,...]['source'] * @param array $events[0,...]['object'] * @param array $events[0,...]['objectid'] * @param array $events[0,...]['clock'] OPTIONAL * @param array $events[0,...]['value'] OPTIONAL * @param array $events[0,...]['acknowledged'] OPTIONAL * @return boolean """ return opts @checkauth @dojson('event.delete') def delete(self,**opts): """ * Delete events by eventids * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $eventids * @param array $eventids['eventids'] * @return boolean """ return opts @checkauth @dojson('event.deleteByTriggerIDs') def deleteByTriggerIDs(self,**opts): """ * Delete events by triggerids * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $triggerids * @return boolean """ return opts @checkauth @dojson('event.acknowledge') def acknowledge(self,**opts): """ events eventids triggers triggerids message """ return opts class ZabbixAPIGraph(ZabbixAPISubClass): @checkauth @dojson('graph.get') def get(self,**opts): """ * Get graph data * * <code> * $options = array( * array 'graphids' => array(graphid1, graphid2, ...), * array 'itemids' => array(itemid1, itemid2, ...), * array 'hostids' => array(hostid1, hostid2, ...), * int 'type' => 'graph type, chart/pie' * boolean 'templated_graphs' => 'only templated graphs', * int 'count' => 'count', * string 'pattern' => 'search hosts by pattern in graph names', * integer 'limit' => 'limit selection', * string 'order' => 'deprecated parameter (for now)' * ); * </code> * * @static * @param array $options * @return array|boolean host data as array or false if error """ return opts @checkauth @dojson('graph.getObjects') def getObjects(self,**opts): """ * Get graphid by graph name * * <code> * $graph_data = array( * *string 'graph' => 'graph name' * ); * </code> * * @static * @param array $graph_data * @return string|boolean graphid """ return opts @checkauth @dojson('graph.add') def add(self,**opts): """ * Add graph * * <code> * $graphs = array( * *string 'name' => null, * int 'width' => 900, * int 'height' => 200, * int 'ymin_type' => 0, * int 'ymax_type' => 0, * int 'yaxismin' => 0, * int 'yaxismax' => 100, * int 'ymin_itemid' => 0, * int 'ymax_itemid' => 0, * int 'show_work_period' => 1, * int 'show_triggers' => 1, * int 'graphtype' => 0, * int 'show_legend' => 0, * int 'show_3d' => 0, * int 'percent_left' => 0, * int 'percent_right' => 0 * ); * </code> * * @static * @param array $graphs multidimensional array with graphs data * @return boolean """ return opts @checkauth @dojson('graph.update') def update(self,**opts): """ * Update graphs * * @static * @param array $graphs multidimensional array with graphs data * @return boolean """ return opts @checkauth @dojson('graph.delete') def delete(self,**opts): """ * Delete graphs * * @static * @param _array $graphs * @param array $graphs['graphids'] * @return boolean """ return opts @checkauth @dojson('graph.addItems') def addItems(self,**opts): """ * Add items to graph * * <code> * $items = array( * *string 'graphid' => null, * array 'items' => ( * 'item1' => array( * *int 'itemid' => null, * int 'color' => '000000', * int 'drawtype' => 0, * int 'sortorder' => 0, * int 'yaxisside' => 1, * int 'calc_fnc' => 2, * int 'type' => 0, * int 'periods_cnt' => 5, * ), ... ) * ); * </code> * * @static * @param array $items multidimensional array with items data * @return boolean """ return opts @checkauth @dojson('graph.deleteItems') def deleteItems(self,**opts): """ /** * Delete graph items * * @static * @param array $items * @return boolean */ """ return opts class ZabbixAPIGraphItem(ZabbixAPISubClass): @checkauth @dojson('graphitem.get') def get(self,**opts): """ * Get GraphItems data * * @static * @param array $options * @return array|boolean """ return opts @checkauth @dojson('graphitem.getObjects') def getObjects(self,**opts): """ * Get graph items by graph id and graph item id * * @static * @param _array $gitem_data * @param array $gitem_data['itemid'] * @param array $gitem_data['graphid'] * @return string|boolean graphid """ return opts @checkauth @dojson('maintenance.get') def get(self,**opts): """ * Get maintenances data * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param array $options * @param array $options['itemids'] * @param array $options['hostids'] * @param array $options['groupids'] * @param array $options['triggerids'] * @param array $options['maintenanceids'] * @param boolean $options['status'] * @param boolean $options['templated_items'] * @param boolean $options['editable'] * @param boolean $options['count'] * @param string $options['pattern'] * @param int $options['limit'] * @param string $options['order'] * @return array|int item data as array or false if error """ return opts @checkauth @dojson('maintenance.getObjects') def getObjects(self,**opts): """ * Get Maintenance ID by host.name and item.key * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param array $maintenance * @param array $maintenance['name'] * @param array $maintenance['hostid'] * @return int|boolean """ return opts @checkauth @dojson('maintenance.add') def add(self,**opts): """ * Add maintenances * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $maintenances * @param array $maintenance['name'] * @param array $maintenance['hostid'] * @return boolean """ return opts @checkauth @dojson('maintenance.update') def update(self,**opts): """ * Update maintenances * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $maintenances * @param array $maintenance['name'] * @param array $maintenance['hostid'] * @return boolean """ return opts @checkauth @dojson('maintenance.delete') def delete(self,**opts): """ * Delete maintenances * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $maintenanceids * @param _array $maintenanceids['maintenanceids'] * @return boolean """ return opts class ZabbixAPIMap(ZabbixAPISubClass): @checkauth @dojson('map.get') def get(self,**opts): """ * Get Map data * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $options * @param array $options['nodeids'] Node IDs * @param array $options['groupids'] HostGroup IDs * @param array $options['hostids'] Host IDs * @param boolean $options['monitored_hosts'] only monitored Hosts * @param boolean $options['templated_hosts'] include templates in result * @param boolean $options['with_items'] only with items * @param boolean $options['with_monitored_items'] only with monitored items * @param boolean $options['with_historical_items'] only with historical items * @param boolean $options['with_triggers'] only with triggers * @param boolean $options['with_monitored_triggers'] only with monitored triggers * @param boolean $options['with_httptests'] only with http tests * @param boolean $options['with_monitored_httptests'] only with monitored http tests * @param boolean $options['with_graphs'] only with graphs * @param boolean $options['editable'] only with read-write permission. Ignored for SuperAdmins * @param int $options['extendoutput'] return all fields for Hosts * @param int $options['count'] count Hosts, returned column name is rowscount * @param string $options['pattern'] search hosts by pattern in host names * @param int $options['limit'] limit selection * @param string $options['sortorder'] * @param string $options['sortfield'] * @return array|boolean Host data as array or false if error """ return opts @checkauth @dojson('map.add') def add(self,**opts): """ * Add Map * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $maps * @param string $maps['name'] * @param array $maps['width'] * @param int $maps['height'] * @param string $maps['backgroundid'] * @param string $maps['highlight'] * @param array $maps['label_type'] * @param int $maps['label_location'] * @return boolean | array """ return opts @checkauth @dojson('update.') def update(self,**opts): """ * Update Map * * {@source} * @access public * @static * @since 1.8 * @version 1 * * @param _array $maps multidimensional array with Hosts data * @param string
<reponame>s-kganz/yodapy<filename>yodapy/datasources/ooi/m2m_client.py # -*- coding: utf-8 -*- """ m2m_client.py Client module for the M2M Interface, originally developed by <NAME>. https://github.com/kerfoot/uframe-m2m """ from __future__ import (division, absolute_import, print_function, unicode_literals) import os import logging import requests import re from dateutil import parser from dateutil.relativedelta import relativedelta as tdelta import datetime import pytz requests.packages.urllib3.disable_warnings() from yodapy.utils.files import CREDENTIALS_FILE HTTP_STATUS_OK = 200 HTTP_STATUS_NOT_FOUND = 404 DEPLOYMENT_STATUS_TYPES = ['all', 'active', 'inactive'] _valid_relativedeltatypes = ('years', 'months', 'weeks', 'days', 'hours', 'minutes', 'seconds') class M2MClient: def __init__(self, timeout=120, api_username=None, api_token=None, **kwargs): """Lightweight OOI UFrame client for making GET requests to the UFrame API via the machine to machine (m2m) API or directly to UFrame. Parameters: base_url: UFrame API base url which must begin with https:// kwargs: m2m: If true <Default>, specifies that all requests should be created and sent throught the m2m API timeout: request timeout, in seconds api_username: API username from the UI user settings api_token: API password from the UI user settings """ self._base_url = 'https://ooinet.oceanobservatories.org' self._m2m_base_url = None self._timeout = timeout self._api_username = api_username self._api_token = api_token self._session = requests.Session() self._pool_connections = kwargs.get('pool_connections', 100) self._pool_maxsize = kwargs.get('pool_maxsize', 100) a = requests.adapters.HTTPAdapter(pool_connections=self._pool_connections, # noqa pool_maxsize=self._pool_maxsize) self._session.mount('https://', a) self._is_m2m = True self._instruments = [] self._subsites = [] self._instrument_streams = [] self._streams = [] self._toc = None self._logger = logging.getLogger(__name__) # properties for last m2m request self._request_url = None self._response = None self._status_code = None self._reason = None self._response_headers = None # Set the base url self._logger.info( 'Creating M2mClient instance ({:s})'.format(self._base_url)) self.base_url = self._base_url @property def base_url(self): return self._base_url @property def api_username(self): return self._api_username @property def api_token(self): return self._api_token @base_url.setter def base_url(self, url): self._logger.debug('Setting UFrame credentials.') self._use_existing_credentials() self._logger.debug('Setting UFrame base url: {:s}'.format(url)) if not url: self._logger.warning('No UFrame base_url specified') return if not url.startswith('http'): self._logger.warning('base_url must start with http') return self._base_url = url.strip('/') self._m2m_base_url = '{:s}/api/m2m'.format(self._base_url) self._logger.debug('UFrame base_url: {:s}'.format(self.base_url)) self._logger.debug( 'UFrame m2m base_url: {:s}'.format(self.m2m_base_url)) # Try to get the sensor invetory subsite list to see if we're able to connect self.fetch_subsites() if self._status_code != HTTP_STATUS_OK: self._logger.critical('Unable to connect to UFrame instance') self._base_url = None # self._valid_uframe = False return # Create the instrument list self._create_instrument_list() @property def is_m2m(self): return self._is_m2m @is_m2m.setter def is_m2m(self, status): """Configures the instance to send requests either via the m2m <Default> API or directly to the UFrame API""" if type(status) != bool: self._logger.error('status must be True or False') return self._is_m2m = status @property def m2m_base_url(self): return self._m2m_base_url @property def timeout(self): return self._timeout @timeout.setter def timeout(self, seconds): if type(seconds) != int: self._logger.warning('timeout must be an integer') return self._timeout = seconds @property def last_request_url(self): return self._request_url @property def last_response(self): return self._response @property def last_status_code(self): return self._status_code @property def last_reason(self): return self._reason @property def instruments(self): return self._instruments @property def streams(self): return self._streams @property def toc(self): return self._toc def fetch_table_of_contents(self): toc = self.build_and_send_request(12576, 'sensor/inv/toc') if self.last_status_code != HTTP_STATUS_OK: self._logger.error('Failed to create instruments list') return # Save the table of contents if we fetch it self._toc = toc return True def fetch_subsites(self): """Fetch all registered subsites from the /sensor/inv API endpoint""" self._logger.debug('Fetching sensor subsites') port = 12576 end_point = '/sensor/inv' request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def fetch_deployment_subsites(self): """Fetch all registered subsites from the /events/deployment/inv API endpoint""" self._logger.debug('Fetching deployment subsites') port = 12587 end_point = '/events/deployment/inv' request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def fetch_instrument_streams(self, ref_des): """Fetch all streams produced by the fully-qualified reference designator""" self._logger.debug('Fetching {:s} streams'.format(ref_des)) r_tokens = ref_des.split('-') if len(r_tokens) != 4: self._logger.error( 'Incomplete reference designator specified {:s}'.format( ref_des)) return None port = 12576 end_point = '/sensor/inv/{:s}/{:s}/{:s}-{:s}/metadata/times'.format( r_tokens[0], r_tokens[1], r_tokens[2], r_tokens[3]) request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return [] def fetch_instrument_parameters(self, ref_des): """Fetch all parameters in the streams produced by the fully-qualified reference designator""" self._logger.debug( '{:s} - Fetching instrument parameters'.format(ref_des)) r_tokens = ref_des.split('-') port = 12576 end_point = '/sensor/inv/{:s}/{:s}/{:s}-{:s}/metadata/parameters'.format( r_tokens[0], r_tokens[1], r_tokens[2], r_tokens[3]) request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def fetch_instrument_metadata(self, ref_des): """Fetch all streams and all parameters produced by the fully-qualified reference designator""" self._logger.debug( '{:s} - Fetching instrument metadata'.format(ref_des)) r_tokens = ref_des.split('-') if len(r_tokens) != 4: self._logger.error( 'Incomplete reference designator specified {:s}'.format( ref_des)) return None port = 12576 end_point = '/sensor/inv/{:s}/{:s}/{:s}-{:s}/metadata'.format( r_tokens[0], r_tokens[1], r_tokens[2], r_tokens[3]) request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def fetch_instrument_deployments(self, ref_des): """Fetch all deployment events for the fully or partially qualified reference designator""" self._logger.debug('Fetching {:s} deployments'.format(ref_des)) port = 12587 end_point = '/events/deployment/query?refdes={:s}'.format(ref_des) request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def fetch_stream_metadata(self, stream_name): """ Fetch stream information given its name. Args: stream_name: Returns: """ self._logger.debug(f'{stream_name} - Fetching stream metadata') port = 12575 end_point = f'/stream/byname/{stream_name}' request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def filter_deployments_by_status(self, deployments, status='all'): if status not in DEPLOYMENT_STATUS_TYPES: self._logger.error( 'Invalid deployment status type specified {:s}'.format(status)) return filtered_deployments = [] if status == 'all': return deployments now = datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) if status == 'active': for d in deployments: if not d['eventStopTime']: filtered_deployments.append(d) continue dt1 = datetime.datetime.utcfromtimestamp( d['eventStopTime'] / 1000).replace(tzinfo=pytz.UTC) if dt1 >= now: filtered_deployments.append(d) else: for d in deployments: if not d['eventStopTime']: continue dt1 = datetime.datetime.utcfromtimestamp( d['eventStopTime'] / 1000).replace(tzinfo=pytz.UTC) if dt1 < now: filtered_deployments.append(d) return filtered_deployments def search_instruments(self, ref_des): """Search all instruments for the fully-qualified reference designators matching the fully or partially-qualified ref_des string""" return [i for i in self._instruments if i.find(ref_des) > -1] def stream_to_instruments(self, stream): """Return the list of instruments that produce the specified full or partial stream name""" return [i for i in self._instrument_streams if i['stream'].find(stream) > -1] def _use_existing_credentials(self): if os.path.exists(CREDENTIALS_FILE): import json with open(CREDENTIALS_FILE) as f: creds = json.load(f)['ooi'] self._api_username = creds['username'] self._api_token = creds['api_key'] else: self._logger.error('Please authenticate by using yodapy.utils.set_ooi_credentials_file!') # noqa def build_and_send_request(self, port, end_point): """Build and send the request url for the specified port and end_point""" request_url = self.build_request(port, end_point) # Send the request self.send_request(request_url) if self._status_code == HTTP_STATUS_OK: return self._response else: return None def build_request(self, port, end_point): """Build the request url for the specified port and end_point""" if self._is_m2m: url = '{:s}/{:0.0f}/{:s}'.format(self._m2m_base_url, port, end_point.strip('/')) else: url = '{:s}:{:0.0f}/{:s}'.format(self._base_url, port, end_point.strip('/')) return url def send_request(self, url): """Send the request url through either the m2m API or directly to UFrame. The method used is determined by the is_m2m property. If set to True, the request is sent through the m2m API. If set to False, the request is sent directly to UFrame""" # if not self._valid_uframe: # self._logger.critical('Unable to connect to UFrame instance') # return self._request_url = url self._response = None self._status_code = None self._reason = None self._response_headers = None if self.is_m2m and not url.startswith(self.m2m_base_url): self._logger.error( 'URL does not point to the m2m base url ({:s})'.format( self.m2m_base_url)) return elif not url.startswith(self.base_url): self._logger.error( 'URL does not point to the base url ({:s})'.format( self.base_url)) return try: self._logger.debug('Sending GET request: {:s}'.format(url)) if self._api_username and self._api_token: r = self._session.get(url, auth=(self._api_username, self._api_token), timeout=self._timeout, verify=False) else: r = self._session.get(url, timeout=self._timeout, verify=False) except ( requests.exceptions.ReadTimeout, requests.exceptions.MissingSchema, requests.exceptions.ConnectionError) as e: self._logger.error('{:} - {:s}'.format(e, url)) return self._status_code = r.status_code self._reason = r.reason if self._status_code == HTTP_STATUS_NOT_FOUND: self._logger.warning('{:s}: {:s}'.format(r.reason, url)) elif self._status_code != HTTP_STATUS_OK: self._logger.error( 'Request failed {:s} ({:s})'.format(url, r.reason)) self._response_headers = r.headers try: self._response = r.json() # Return the json response if there was one return self._response except ValueError as e: self._logger.warning('{:} ({:s})'.format(e, url)) self._response = r.text return None def _create_instrument_list(self): self._instruments = [] self._streams = [] self._instrument_streams = [] self._logger.debug('Fetching UFrame table of contents') if not self._toc: self.fetch_table_of_contents() self._logger.debug('Creating instruments list') # Create an array of dicts with the instrument name and the stream it produces self._instrument_streams = [ {'instrument': i['reference_designator'], 'stream': s['stream']} for i in self._toc['instruments'] for s in i['streams']] # Create the unique list of streams streams = list(set([i['stream'] for i in self._instrument_streams])) streams.sort() self._streams
<reponame>S3v3ru5/Monkey """Evaluator of Monkey Language""" from typing import List import operator as py_operator from monkey.ast import ast from monkey.evaluator import mobjects from monkey.evaluator.environment import Environment from monkey.evaluator.builtins import builtins TRUE = mobjects.Boolean(True) FALSE = mobjects.Boolean(False) NULL = mobjects.Null() def construct_boolean(value: bool) -> mobjects.Boolean: return TRUE if value else FALSE def m_type(value_obj: mobjects.Object) -> str: """return type of the value_obj.""" return value_obj.type() def m_is_type(value_obj: mobjects.Object, obj_type: str) -> bool: """check if value_obj is of given type""" return m_type(value_obj) == obj_type def m_error(msg) -> mobjects.Error: """create an error in Monkey""" return mobjects.Error(msg) def m_is_error(obj: mobjects.Object) -> bool: return m_is_type(obj, mobjects.ERROR_OBJ) def m_is_true(value_obj: mobjects.Object) -> bool: """determine whether an value/object evaluates to true. Check whether the given value is considered true or not. Integer values are considered true for all values other than zero. Null values are considered false. Args: value_obj: object to check. Returns: True if value_obj is considered "true" in Monkey else False. """ if m_is_type(value_obj, mobjects.BOOLEAN_OBJ): return value_obj.value elif m_is_type(value_obj, mobjects.NULL_OBJ): return False elif m_is_type(value_obj, mobjects.INTEGER_OBJ): return value_obj.value != 0 return True def m_eval_identifier(node: ast.Identifier, env: Environment) -> mobjects.Object: """evaluate a identifier.""" value = env.get(node.name) if value is not None: return value value = builtins.get(node.name) if value is not None: return value return m_error(f"name '{node.name}' is not defined") def m_eval_not_operator(right: mobjects.Object) -> mobjects.Boolean: """evaluate boolean not operator Args: right: value to apply on. Returns: boolean value representing the result examples: "!true" -> Boolean(False) "!false" -> Boolean(True) """ if m_is_true(right): return FALSE return TRUE def m_eval_prefix_sub_operator(right: mobjects.Object) -> mobjects.Integer: """evaluate prefix sub(minus) operator. Args: right: right side of prefix expression Returns: returns NULL if right is not of type Integer else returns new Integer object with value of -1*right.value. """ if not m_is_type(right, mobjects.INTEGER_OBJ): return m_error(f"unsupported operand type for -: '{right.type()}'") return mobjects.Integer(-right.value) def m_eval_prefix_expression(operator: str, right: mobjects.Object) -> mobjects.Object: """evaluate prefix(unary) expression. Args: operator: "-" or "!" right: expression to apply operator to. Returns: resultant value. """ if operator == "!": return m_eval_not_operator(right) elif operator == "-": return m_eval_prefix_sub_operator(right) return m_error(f"unknown prefix operator: {operator}") def m_eval_infix_integer_expression( left: mobjects.Object, operator: str, right: mobjects.Object) -> mobjects.Object: """Evaluate infix(binary) integer expresssion.""" func = { "+": py_operator.add, "-": py_operator.sub, "*": py_operator.mul, "/": py_operator.floordiv, }.get(operator) if func is not None: result = func(left.value, right.value) return mobjects.Integer(value = result) func = { "<": py_operator.lt, ">": py_operator.gt, "==": py_operator.eq, "!=": py_operator.ne, }.get(operator) if func is not None: return construct_boolean(func(left.value, right.value)) return m_error(f"unknown infix operator {operator}: {left} {operator} {right}") def m_eval_infix_string_expression( left: mobjects.String, operator: str, right: mobjects.String) -> mobjects.Object: """Evaluate string operators Args: left: lhs of the expression operator: "+", "==" or "!=" right: rhs of the expression Returns: returns error if given operator is not one of the above mentioned otherwise returns the evaluated result. """ if operator == "+": return mobjects.String(left.value + right.value) elif operator == "==": return construct_boolean(left.value == right.value) elif operator == "!=": return construct_boolean(left.value != right.value) return m_error(f"unsupported operand type for {operator}: 'STRING' and 'STRING'") def m_eval_infix_expression( left: mobjects.Object, operator: str, right: mobjects.Object) -> mobjects.Object: """evaluate infix(binary) expression. Args: left: left side(lhs) of the expression. operator: one of "+", "-", "*", "/", "==", "<", ">", "!=" right: right side(rhs) of the expression. Returns: return NULL if given operator is not binary operator. """ if m_is_type(left, mobjects.INTEGER_OBJ) and m_is_type(right, mobjects.INTEGER_OBJ): return m_eval_infix_integer_expression(left, operator, right) elif m_is_type(left, mobjects.STRING_OBJ) and m_is_type(right, mobjects.STRING_OBJ): return m_eval_infix_string_expression(left, operator, right) elif operator == "==": return construct_boolean(left == right) elif operator == "!=": return construct_boolean(left != right) return m_error(f"unsupported operand type for {operator}: '{left.type()}' and '{right.type()}'") def m_eval_if_expression(node: ast.IfExpression, env: Environment) -> mobjects.Object: """evaluate if expression. evaluate "if" block if condition is "true" and "else" block otherwise. if condition is "false" and no else is present then "NULL" is returned.result of the last statement is returned whichever block is evaluated. Args: node: root node of "if" expression in ast. Returns: result of last statement of whichever block is executed. """ condition = m_eval(node.condition, env) if m_is_error(condition): return condition if m_is_true(condition): return m_eval(node.consequence, env) if node.alternative is not None: return m_eval(node.alternative, env) return NULL def m_eval_while_expression(node: ast.WhileExpression, env: Environment) -> mobjects.Object: """evaluate while expression. evaluate while body repeatedly until the condition is "false" or a return statement is evaluated. """ result = NULL while True: condition = m_eval(node.condition, env) if not m_is_true(condition): return result result = m_eval_block_statement(node.body, env) if (m_is_type(result, mobjects.RETURN_VALUE_OBJ) or m_is_type(result, mobjects.ERROR_OBJ)): return result def m_eval_array_index(left: mobjects.Array, index: mobjects.Integer) -> mobjects.Object: index = index.value if index < 0: return m_error("negative indexes are not supported") if not index < len(left.elements): return m_error(f"array index({index}) out of range") return left.elements[index] def m_eval_index_expression(left: mobjects.Object, index: mobjects.Object) -> mobjects.Object: """evaluate index expression. Args: left: Object to access from. index: index of the element to acess. Returns: value at index if index is in range of array len. returns error object if index is out of bounds or index is negative. """ if m_is_type(left, mobjects.ARRAY_OBJ) and m_is_type(index, mobjects.INTEGER_OBJ): return m_eval_array_index(left, index) return m_error(f"{left.type()} is not subscriptable") def m_eval_call_expression( function: mobjects.Object, args: List[mobjects.Object]) -> mobjects.Object: """Evaluate a call expression(function call). Args: function: function to call. args: arguments to pass to given function. Returns: returns the result of function call. """ if m_is_type(function, mobjects.BUILTIN_OBJ): return function.function(*args) if not m_is_type(function, mobjects.FUNCTION_OBJ): return m_error(f"{function.type()} is not callable") if len(function.parameters) != len(args): msg = f"function expected {len(function.parameters)} arguments but" msg += f" {len(args)} were given" return m_error(msg) extended_env = Environment(outer=function.env) for ind, parameter in enumerate(function.parameters): extended_env.set(parameter.name, args[ind]) result = m_eval(function.body, extended_env) if m_is_type(result, mobjects.RETURN_VALUE_OBJ): return result.value return result def m_eval_expressions( expressions: List[ast.Expression], env: Environment) -> List[mobjects.Object]: """evaluate list of expressions. Args: expressions: list of expressions to evaluate. env: current environment to evaluate expressions in. Returns: result: list of result objects corresponding to each expression in given list. error: None if all expressions are evaluated without any error else corresponding error. """ result = [] for expr in expressions: value = m_eval(expr, env) if m_is_error(value): return result, value result.append(value) return result, None def m_eval_statements(stmts: List[ast.Statement], env: Environment) -> mobjects.Object: """evaluate list of statements. linear evaluation of the statements stops at the first "return" statement if there is one. value of "return" statement is returned when a "return" statement is evaluated. Args: stmts: List of statements. Returns: result of evaluating last statement. """ result = NULL for stmt in stmts: result = m_eval(stmt, env) if m_is_type(result, mobjects.RETURN_VALUE_OBJ): return result.value return result def m_eval_block_statement(block: ast.BlockStatement, env: Environment) -> mobjects.Object: """evaluate block of statements. Args: block: object of type ast.BlockStatement representing block of statements in syntax tree. Returns: result of last statement evaluated. """ for stmt in block.statements: result = m_eval(stmt, env) if result is not None: if (m_is_type(result, mobjects.RETURN_VALUE_OBJ) or m_is_type(result, mobjects.ERROR_OBJ)): return result return result def m_eval_program(program: ast.Program, env: Environment) -> mobjects.Object: """evaluate Program node.""" result = NULL for stmt in program.statements: result = m_eval(stmt, env) if m_is_type(result, mobjects.RETURN_VALUE_OBJ): return result.value elif m_is_type(result, mobjects.ERROR_OBJ): return result return result def m_eval(node: ast.Node, env: Environment) -> mobjects.Object: """Monkey evaluator function. Args: node: present node in syntax tree to evaluate. Returns: An instance of mobjects.Object is returned representing the result of evaluating given node. """ if isinstance(node, ast.Program): return m_eval_program(node, env) elif isinstance(node, ast.LetStatement): value = m_eval(node.expression, env) if m_is_error(value): return value name = node.identifier.name env.set(name, value) return NULL elif isinstance(node, ast.ReturnStatement): value = m_eval(node.expression, env) if m_is_error(value): return value return mobjects.ReturnValue(value = value) elif isinstance(node, ast.ExpressionStatement): return m_eval(node.expression, env) elif isinstance(node, ast.BlockStatement): return m_eval_block_statement(node, env) elif isinstance(node, ast.PrefixExpression): right = m_eval(node.right, env) if m_is_error(right): return right return m_eval_prefix_expression(node.operator, right) elif isinstance(node, ast.InfixExpression): left = m_eval(node.left, env) if m_is_error(left): return left right = m_eval(node.right, env) if m_is_error(right): return right return m_eval_infix_expression(left, node.operator, right) elif isinstance(node, ast.IfExpression): return m_eval_if_expression(node, env) elif isinstance(node, ast.IndexExpression): left = m_eval(node.left, env) if m_is_error(left): return left index = m_eval(node.index, env) if m_is_error(index): return index return m_eval_index_expression(left, index) elif isinstance(node, ast.CallExpression): function = m_eval(node.function, env) if m_is_error(function): return function args, error = m_eval_expressions(node.arguments, env) if error is not
<gh_stars>10-100 # Copyright (c) 2017-2019 Carnegie Mellon University. All rights reserved. # Use of this source code is governed by BSD 3-clause license. import argparse import base64 import datetime from datetime import timedelta from datetime import datetime import json import queue import os import socket import struct import sys import threading import time import traceback import sys import cv2 import numpy import requests import deepgaze import get_time as gt import math import logging from logging.handlers import WatchedFileHandler import traceback import gaze_3d from centroidtracker import * from headpose import * from render import * from process import * default_schema = "edusense-video" default_keyword = "edusense-keyword" RTSP=False skipframe = 0 class SocketReaderThread(threading.Thread): def __init__(self, queue, server_address, keep_frame_number, logger_pass, profile=False): threading.Thread.__init__(self) self.queue = queue self.server_address = server_address self.keep_frame_number = keep_frame_number self.frame_number = 0 self.profile = profile self.logger_base = logger_pass.getChild('reader_thread') self.logger = logging.LoggerAdapter(self.logger_base, {}) def start(self): logger = self.logger sock = None if isinstance(self.server_address, tuple): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) else: # Make sure the socket does not already exist try: os.unlink(self.server_address) except: if os.path.exists(self.server_address): logger.info("Socket already exists") raise # create a unix domain socket sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # bind the socket to the port sock.bind(self.server_address) logger.info("Bound socket to port") # listen for incoming connections sock.listen(1) # wait for incoming connection self.conn, self.client_address = sock.accept() # mark this is running self.is_running = True # start super(SocketReaderThread, self).start() def stop(self): self.is_running = False def run(self): logger = self.logger try: # receive data in small chunks while self.is_running: socket_read_start_time = time.time() # Read size of the payload size_data = self.conn.recv(4) msg_len = struct.unpack('=I', size_data)[0] # termination signal if msg_len == 0: break chunks = [] bytes_recvd = 0 # Read payload while bytes_recvd < msg_len: chunk = self.conn.recv(min(msg_len - bytes_recvd, 1048576)) if chunk == b'': logger.info("socket connection broken") raise RuntimeError("socket connection broken") chunks.append(chunk) bytes_recvd = bytes_recvd + len(chunk) msg = b''.join(chunks) self.frame_number += 1 numbered_msg = (None, msg) if self.keep_frame_number \ else (self.frame_number, msg) if self.profile: logger.info('socket_read,%f' % (time.time() - socket_read_start_time)) if self.queue is not None: self.queue.put(numbered_msg, True, None) except Exception as e: is_running = False logger.info("Exception thrown") traceback.print_exc(file=sys.stdout) finally: self.conn.close() class ConsumerThread(threading.Thread): def __init__(self, input_queue, process_real_time, process_gaze, gaze_3d, channel, area_of_interest,fps,start_date,start_time, logger_pass, backend_params=None, file_params=None, profile=False,skipframe=0): threading.Thread.__init__(self) self.input_queue = input_queue self.process_real_time = process_real_time self.channel = channel self.counter= 0; self.skipframe = skipframe self.fps=fps; self.currentframe = 1; self.start_date=start_date; self.start_time=start_time; if area_of_interest is not None: self.area_of_interest = np.array(area_of_interest).reshape((-1, 1, 2)) else: self.area_of_interest = None # Initialize file posting self.file_params = file_params if file_params is not None and file_params['video'] is not None: self.video_out = cv2.VideoWriter( os.path.join(self.file_params['base_dir'], file_params['video']), cv2.VideoWriter_fourcc(*'XVID'), 5.0, (1920, 1080)) else: self.video_out = None # Initialize backend posting self.backend_params = backend_params # Initialize centroid tracker self.centroid_tracker = CentroidTracker() self.centroid_time_live = 10 self.centroid_initialize_time = 1 self.state = { 'prev_objects': None, 'prev_pose': None, 'current_pose': None, 'prev_time': None } # configure machine learning self.process_gaze = process_gaze self.gaze_3d = gaze_3d self.profile = profile self.logger_base = logger_pass.getChild('consumer_thread') self.logger = logging.LoggerAdapter(self.logger_base, {}) def start(self): self.input_queue = self.input_queue self.is_running = True super(ConsumerThread, self).start() def run(self): logger = self.logger while self.is_running: if self.process_real_time: numbered_datum = None cnt = 0 try: while True: numbered_datum_temp = self.input_queue.get_nowait() numbered_datum = numbered_datum_temp cnt += 1 except Exception: if numbered_datum is None: continue # process data logger.info("Starting to process frame") raw_image, frame_data = self.process_frame(numbered_datum) if not RTSP: time=int(frame_data['frameNumber']/self.fps) frame_data['timestamp']=self.start_date+'T'+str(self.start_time + timedelta(seconds=time))+'Z' logger.info('...........................') logger.info(frame_data['timestamp']) logger.info(frame_data['frameNumber']) logger.info('...........................') # post data self.post_frame(raw_image, frame_data) for i in range(cnt): self.input_queue.task_done() else: while self.currentframe < self.skipframe: self.currentframe = self.currentframe+1 self.input_queue.get() self.input_queue.task_done() self.currentframe = 1 raw_image, frame_data = self.process_frame(self.input_queue.get()) time = float(frame_data['frameNumber'] / self.fps) frame_data['timestamp'] = self.start_date + 'T' + str( self.start_time + timedelta(seconds=time)) + 'Z' logger.info('...........................') logger.info(frame_data['timestamp']) logger.info(frame_data['frameNumber']) logger.info('...........................') # post data self.post_frame(raw_image, frame_data) self.input_queue.task_done() def stop(self): self.input_queue.join() self.is_running = False if self.video_out is not None: self.video_out.release() def process_frame(self, numbered_datum): logger = self.logger start_time = time.time() frame_data = None raw_image = None try: json_time = 0 featurization_time = 0 thumbnail_time = 0 interframe_time = 0 frame_number, datum = numbered_datum frame_data = json.loads(datum.decode('utf-8')) if frame_number is not None: frame_data['frameNumber'] = frame_number image_rows = 0 image_cols = 0 has_raw_image = "rawImage" in frame_data.keys() has_thumbnail = "thumbnail" in frame_data.keys() raw_image = None if has_raw_image: image_rows = frame_data["rawImage"]["rows"] image_cols = frame_data["rawImage"]["columns"] image_bytes = base64.standard_b64decode( frame_data["rawImage"]["binary"]) image_array = numpy.frombuffer(image_bytes, dtype=numpy.uint8) raw_image = numpy.reshape( image_array, (image_rows, image_cols, 3)) del frame_data["rawImage"] if not has_thumbnail: logger.info("Start Thumbnail") thumb_start_time = time.time() resized_image = cv2.resize(raw_image, (240, 135)) r, buf = cv2.imencode('.jpg', resized_image, [ int(cv2.IMWRITE_JPEG_QUALITY), 50]) frame_data['thumbnail'] = { 'binary': base64.standard_b64encode(buf).decode('ascii'), 'originalCols': image_cols, 'originalRows': image_rows } thumbnail_time = time.time() - thumb_start_time logger.info("Finish Thumbnail %f", thumbnail_time) elif has_thumbnail: image_rows = frame_data["thumbnail"]["originalRows"] image_cols = frame_data["thumbnail"]["originalColumns"] # Featurization logger.info("Start Featurization") featurization_start_time = time.time() # extract key points bodies = frame_data['people'] rects = [] person_poses = [] if self.area_of_interest is not None: bodies = list(map(lambda b: get_pts_of_interest_from_person(b, self.area_of_interest), bodies)) bodies = list(filter(lambda b: check_body_pts(b['body']), bodies)) for body in bodies: body_keypoints = body["body"] face_keypoints = body["face"] if "face" in body.keys() else None # prune body keypoints body_keypoints = prune_body_pts(body_keypoints) body["body"] = body_keypoints pose = get_pose_pts(body_keypoints) body['inference'] = { 'posture': {}, 'face': {}, 'head': {} } # prepare inter-frame tracking box = get_pose_box(pose) rects.append(box.astype("int")) person_poses.append(pose) # Interframe logger.info("Start Interframe") interframe_start_time = time.time() tracking_id = None objects, poses = self.centroid_tracker.update(rects, person_poses) for body in bodies: body_keypoints = body["body"] pose = get_pose_pts(body_keypoints) for (objectID, person_pose) in poses.items(): if pose[1][0] == person_pose[1][0] and pose[1][1] == person_pose[1][1]: body['inference']['trackingId'] = objectID + 1 if self.video_out is not None or (self.file_params is not None and self.file_params['image']): text = "ID {}".format(objectID) cv2.putText(raw_image, text, (person_pose[1][0] - 10, person_pose[1][1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) cv2.circle( raw_image, (person_pose[1][0], person_pose[1][1]), 4, (0, 255, 0), -1) break interframe_time = time.time() - interframe_start_time logger.info("Finish Interframe %f", interframe_time) if self.channel == 'instructor': y_min = None instructor_body_track = None for (objectID, person_pose) in poses.items(): if y_min is None and person_pose[1][1] > 0 and person_pose[1][0] > 0: y_min = person_pose[1][1] instructor_body_track = person_pose else: if person_pose[1][1] < y_min and person_pose[1][1] > 0 and person_pose[1][0] > 0: y_min = person_pose[1][1] instructor_body_track = person_pose new_bodies = [] if instructor_body_track is not None: for body in bodies: body_keypoints = body["body"] pose = get_pose_pts(body_keypoints) if pose[1][0] == instructor_body_track[1][0] and pose[1][1] == instructor_body_track[1][1]: new_bodies.append(body) break bodies = new_bodies for body in bodies: body_keypoints = body["body"] face_keypoints = body["face"] if "face" in body.keys( ) else None pose = get_pose_pts(body_keypoints) # face orientation faceOrientation = None faceOrientation = get_facing_direction(pose) # Sit stand sit_stand, color_stand, pts = predict_sit_stand(body_keypoints) if self.video_out is not None or (self.file_params is not None and self.file_params['image']): cv2.putText(raw_image, sit_stand, (int(pts[1][0])-10, int( pts[1][1])+30), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color_stand, 2, cv2.LINE_AA) # Armpose armpose, color_pose, pts = predict_armpose(body_keypoints) if self.video_out is not None or (self.file_params is not None and self.file_params['image']): cv2.putText(raw_image, armpose, (int( pts[1][0])-10, int(pts[1][1])+10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color_pose, 2, cv2.LINE_AA) raw_image = render_pose_draw( [pose], raw_image, color_pose, color_stand) # Mouth mouth = None smile = None if face_keypoints is not None: mouth, _, smile, _ = predict_mouth(face_keypoints) tvec = None yaw = None pitch = None roll = None gaze_vector = None face = get_face(pose) if (self.gaze_3d): if face is not None: bboxes = [ face[0][0], face[0][1], face[1][0], face[1][1] ] bboxes = np.array(bboxes) bboxes = bboxes.reshape(-1, 4) # print(face) # logger.info('.......') tvec, rvec, point_2d,face = gaze_3d.get_3d_pose(raw_image, bboxes,face) ##TODO-: change the face variablr # logger.info(point_2d) tvec = tvec.tolist() rvec = rvec.tolist() # print(face) if point_2d[0][0] is not None: gaze_vector = point_2d pitch, roll, yaw = rvec if pitch is not None: # convert to degree pitch = (pitch * 180) / math.pi roll = (roll * 180) / math.pi yaw = (yaw * 180) / math.pi elif (self.process_gaze): tvec = get_3d_head_position(pose, raw_image.shape) # face box if face is not None: face_crop = raw_image[face[0][1]:face[1][1], face[0][0]:face[1][0]] if face_crop.shape[0] >= 64 and face_crop.shape[1] >= 64: yaw, pitch, roll, gaze_start, gaze_stop = get_head_pose_vector( face_crop, face) yaw = yaw.flatten().tolist()[0] pitch = pitch.flatten().tolist()[0] roll = roll.flatten().tolist()[0] gaze_vector = [gaze_start, gaze_stop] cv2.line(raw_image, gaze_start, gaze_stop, (255, 255, 255), 2) if armpose is not None: body['inference']['posture']['armPose'] = armpose if sit_stand is not None: body['inference']['posture']['sitStand'] = sit_stand if face is not None: body['inference']['face']['boundingBox'] = face if mouth is not None: body['inference']['face']['mouth']
{'март': 1}, {'конкурент': 1}, {'наглядно': 1}, {'сервис': 1}, {'имущество': 1}, {'подробно': 1}, {'вебинар': 1}, {'неликвид': 3}, {'непрофильный': 1}, {'оптовый': 1}, {'скапливаться': 1}, {'скупка': 1}, {'фирмам': 1}], [{'возврат': 1}, {'плохо': 1}, {'стоять': 1}, {'время': 1}, {'связать': 1}, {'сделка': 1}, {'день': 1}, {'золото': 1}, {'вариант': 1}, {'вход': 2}, {'реальный': 1}, {'смотреть': 2}, {'источник': 1}, {'рабочий': 1}, {'аналитик': 1}, {'выход': 1}, {'доллар': 1}, {'коррекция': 1}, {'рубль': 1}, {'заходить': 1}, {'иена': 1}, {'фунт': 1}, {'канал': 1}, {'довольно': 1}, {'отличный': 1}, {'волна': 1}, {'обратный': 1}, {'показательно': 1}, {'поезд': 1}, {'будни': 1}, {'выходной': 1}, {'добития': 1}, {'дождаться': 1}, {'доллариена': 1}, {'долларрубль': 1}, {'ежедневно': 1}, {'завершенныи': 1}, {'закрыться': 1}, {'замечу': 1}, {'золотодоллар': 1}, {'импульс': 1}, {'канале': 1}, {'корреляция': 1}, {'низ': 1}, {'отрабатываться': 1}, {'перетрубации': 1}, {'телеграмма': 1}, {'финансовыирынок': 1}, {'фунтдоллар': 1}, {'чёткий': 1}, {'экспирациеи': 1}], [{'год': 2}, {'итог': 1}, {'компания': 1}, {'международнои': 1}, {'международный': 1}, {'оказаться': 2}, {'падать': 1}, {'расти': 1}, {'темп': 1}, {'транспорт': 1}, {'цена': 4}, {'бизнес': 1}, {'гражданин': 2}, {'кредитор': 1}, {'мочь': 1}, {'порядок': 1}, {'прийтись': 2}, {'увеличивать': 1}, {'удаться': 1}, {'уровень': 3}, {'экономика': 3}, {'главный': 1}, {'граница': 1}, {'группа': 1}, {'дороже': 2}, {'думать': 1}, {'интересный': 1}, {'конкуренция': 1}, {'магазин': 1}, {'новый': 1}, {'однако': 2}, {'описать': 1}, {'первый': 1}, {'период': 1}, {'помощь': 2}, {'последний': 1}, {'посмотреть': 1}, {'поэтому': 2}, {'резко': 1}, {'решить': 1}, {'самую': 1}, {'сильно': 1}, {'ситуация': 1}, {'способность': 1}, {'становиться': 1}, {'стать': 2}, {'страна': 10}, {'товар': 1}, {'высоко': 2}, {'добавить': 1}, {'другие': 1}, {'касаться': 1}, {'максимум': 1}, {'низкий': 2}, {'остальным': 1}, {'открытие': 1}, {'падение': 1}, {'показатель': 1}, {'проблема': 3}, {'упасть': 1}, {'виноватый': 1}, {'доход': 1}, {'забыть': 1}, {'понравиться': 1}, {'отвечать': 1}, {'рост': 4}, {'также': 1}, {'власть': 1}, {'государство': 1}, {'значить': 1}, {'налог': 1}, {'нужный': 2}, {'подняться': 1}, {'превратить': 1}, {'простой': 2}, {'смотреть': 1}, {'собственно': 1}, {'согласный': 1}, {'частный': 1}, {'необходимость': 1}, {'деиствия': 1}, {'мнение': 1}, {'небольшои': 1}, {'ожидаться': 1}, {'регулятор': 1}, {'фон': 1}, {'часть': 1}, {'зарплата': 4}, {'кризис': 1}, {'политика': 1}, {'правительство': 1}, {'придумать': 1}, {'рабочий': 2}, {'экономическии': 1}, {'экономический': 1}, {'годовой': 1}, {'лежать': 1}, {'мера': 1}, {'обмен': 1}, {'общественный': 1}, {'оказываться': 1}, {'предложение': 1}, {'сектор': 1}, {'сокращение': 1}, {'статистика': 1}, {'шаг': 1}, {'сильный': 2}, {'аналогичный': 1}, {'бюджетный': 1}, {'данным': 1}, {'включая': 2}, {'восстановление': 1}, {'помочь': 1}, {'инфляция': 5}, {'бороться': 2}, {'политик': 3}, {'включить': 1}, {'печатныи': 1}, {'повлиять': 2}, {'равный': 1}, {'станок': 1}, {'сфера': 1}, {'значительно': 1}, {'введение': 1}, {'заметно': 1}, {'сильнее': 1}, {'слабый': 1}, {'недвижимость': 1}, {'рост_цен': 4}, {'течение': 1}, {'экономист': 2}, {'жить': 1}, {'якобы': 1}, {'фискальный': 1}, {'большеи': 1}, {'немецкий': 3}, {'страдать': 1}, {'заметить': 1}, {'сократиться': 2}, {'бизнесмен': 1}, {'безработица': 1}, {'греческий': 2}, {'долговой': 1}, {'конкурентный': 1}, {'монетарный': 1}, {'монополист': 1}, {'предоставление': 1}, {'уверять': 1}, {'денежнои': 1}, {'первои': 1}, {'серьезно': 1}, {'существование': 1}, {'трата': 1}, {'официальный': 1}, {'согласиться': 1}, {'лёгкий': 1}, {'сегодняшнии': 1}, {'десятилетие': 1}, {'почувствовать': 1}, {'австрия': 1}, {'актуальнои': 1}, {'афинам': 2}, {'взамен': 1}, {'возобновиться': 1}, {'грек': 1}, {'греция': 1}, {'дефляциеи': 1}, {'дефляционный': 1}, {'доминирование': 1}, {'еврозона': 2}, {'еврозоны': 1}, {'европеиским': 2}, {'жадный': 1}, {'ирландия': 1}, {'испанскии': 1}, {'короткии': 1}, {'легче': 1}, {'нагрузка': 1}, {'негативно': 1}, {'обходиться': 1}, {'остроты': 1}, {'периферииных': 1}, {'периферия': 2}, {'подорвать': 1}, {'подрывать': 1}, {'покупательный': 1}, {'проблемный': 2}, {'работник': 1}, {'раздутый': 1}, {'сбить': 1}, {'скептически': 1}, {'соревнование': 1}, {'супермаркет': 1}, {'таргетируемая': 1}, {'тормозя': 1}, {'ультиматум': 1}, {'эллада': 1}, {'южный': 1}], [{'большой': 1}, {'гигант': 1}, {'год': 3}, {'давно': 1}, {'итог': 1}, {'лидер': 1}, {'промышленность': 1}, {'расти': 2}, {'рынок': 11}, {'сегодня': 4}, {'снижение': 2}, {'снизиться': 2}, {'спасибо': 1}, {'темп': 1}, {'хороший': 2}, {'цена': 3}, {'банк': 1}, {'мочь': 1}, {'обратиться': 1}, {'пока': 2}, {'прийтись': 1}, {'расход': 1}, {'сразу': 1}, {'ставка': 1}, {'стоять': 1}, {'удаться': 1}, {'уровень': 3}, {'брать': 1}, {'видеть': 1}, {'время': 1}, {'говорить': 2}, {'давать': 2}, {'изменение': 2}, {'менее': 1}, {'настроение': 1}, {'небольшой': 2}, {'однако': 1}, {'очень': 1}, {'покупка': 3}, {'последний': 3}, {'развитый': 1}, {'рассуждение': 1}, {'ситуация': 2}, {'состояние': 2}, {'стать': 1}, {'страна': 1}, {'товар': 1}, {'условие': 1}, {'учитывать': 1}, {'фактор': 1}, {'цель': 1}, {'число': 2}, {'являться': 2}, {'акция': 3}, {'день': 7}, {'ждать': 1}, {'инвест': 1}, {'инвестор': 1}, {'неделя': 3}, {'ниже': 2}, {'низкий': 1}, {'никак': 2}, {'открытие': 3}, {'падение': 2}, {'показывать': 1}, {'россиискии': 1}, {'сессия': 2}, {'составить': 1}, {'торг': 3}, {'тренд': 5}, {'упасть': 1}, {'утро': 1}, {'золото': 1}, {'успешный': 1}, {'входить': 2}, {'значительный': 1}, {'минус': 1}, {'реальный': 1}, {'рост': 12}, {'также': 1}, {'вновь': 1}, {'государство': 1}, {'инвестиция': 1}, {'находиться': 1}, {'начать': 2}, {'отчёт': 1}, {'спекулянт': 1}, {'значение': 1}, {'поддержка': 2}, {'скорее': 1}, {'услуга': 1}, {'анализ': 3}, {'вверх': 4}, {'выше': 1}, {'днеи': 2}, {'зона': 2}, {'индикатор': 2}, {'линия': 1}, {'небольшои': 1}, {'ожидать': 1}, {'ожидаться': 1}, {'опубликовать': 1}, {'портфель': 1}, {'пробой': 1}, {'протяжение': 1}, {'рекомендация': 2}, {'средний': 1}, {'торговаться': 2}, {'фон': 1}, {'заседание': 1}, {'октябрь': 3}, {'политика': 1}, {'причина': 2}, {'продолжаться': 1}, {'близкий': 1}, {'выход': 2}, {'данные': 1}, {'доллар': 1}, {'информация': 1}, {'напомнить': 1}, {'положение': 2}, {'потребителеи': 1}, {'предыдущий': 2}, {'развиваться': 1}, {'сектор': 2}, {'сигнал': 1}, {'статистика': 3}, {'торговый': 1}, {'формирование': 1}, {'вчера': 1}, {'вырасти': 10}, {'индекс': 10}, {'картина': 1}, {'коррекция': 3}, {'локальный': 1}, {'начало': 1}, {'нефть': 6}, {'пункт': 1}, {'рамка': 1}, {'сообщение': 1}, {'сопротивление': 6}, {'руб': 4}, {'процентный': 1}, {'данным': 1}, {'эксперт': 2}, {'верить': 1}, {'деталь': 1}, {'закрыть': 2}, {'отыгрывать': 1}, {'стоп': 1}, {'увидеть': 2}, {'четверг': 3}, {'мотив': 1}, {'закончиться': 1}, {'надежда': 1}, {'собраться': 1}, {'январь': 1}, {'китаи': 2}, {'площадка': 1}, {'привести': 1}, {'заказ': 1}, {'общем': 1}, {'подбирать': 1}, {'пользование': 1}, {'праздник': 2}, {'тема': 1}, {'вспомнить': 1}, {'рекордно': 1}, {'среднем': 1}, {'управление': 1}, {'американский': 4}, {'достигнуть': 1}, {'запас': 8}, {'низкии': 1}, {'очередной': 1}, {'спасение': 2}, {'потребительский': 1}, {'баррель': 1}, {'встать': 2}, {'заявка': 1}, {'идеальнои': 1}, {'марка': 1}, {'среда': 4}, {'деловой': 1}, {'обойтись': 1}, {'четвёртый': 1}, {'бензин': 2}, {'стопа': 1}, {'заметить': 1}, {'доверие': 3}, {'долгосрочный': 1}, {'доходность': 1}, {'рекордный': 2}, {'увеличиться': 1}, {'команда': 1}, {'безработица': 3}, {'видимо': 1}, {'публикация': 1}, {'утверждать': 1}, {'порадовать': 1}, {'уверенность': 1}, {'столь': 1}, {'закрытие': 5}, {'выкуп': 1}, {'кредитнои': 1}, {'подряд': 2}, {'закрыться': 2}, {'автогигант': 1}, {'автопроме': 1}, {'адр': 1}, {'аналитика': 1}, {'баррелеи': 3}, {'безработице': 1}, {'благодарение': 3}, {'вероятность': 1}, {'вероятный': 1}, {'внушительныи': 1}, {'вчерашнии': 1}, {'вчерашний': 2}, {'выросщая': 1}, {'выстрелить': 1}, {'выходнои': 1}, {'вялыи': 1}, {'гепом': 1}, {'гэпу': 1}, {'давление': 1}, {'денежно': 1}, {'дизтоплива': 1}, {'дистиллят': 2}, {'длительный': 1}, {'драиверов': 1}, {'забвение': 1}, {'завершиться': 1}, {'заветный': 1}, {'заявить': 1}, {'зеленои': 1}, {'злне': 1}, {'комментировать': 1}, {'конг': 1}, {'корпоративный': 1}, {'корректироваться': 1}, {'коррекциеи': 1}, {'котировка': 2}, {'краснои': 1}, {'критически': 2}, {'круг': 1}, {'крыло': 1}, {'мазут': 1}, {'миллиона_баррелеи': 4}, {'накануне': 2}, {'неитральнои': 1}, {'неопределённый': 2}, {'низходящего': 1}, {'нисходящий': 1}, {'ноябрь': 3}, {'обезает': 1}, {'образоваться': 1}, {'оказать': 1}, {'отметиться': 1}, {'отыграть': 2}, {'падении': 1}, {'перевернуться': 2}, {'плюсе': 1}, {'подкачали': 1}, {'подорожать': 1}, {'позитив': 1}, {'позитивнои': 1}, {'полюс': 1}, {'полюсзолото': 1}, {'последовать': 1}, {'пособие': 2}, {'посрамить': 1}, {'потихоньку': 1}, {'потрясать': 1}, {'походить': 1}, {'преддверие': 3}, {'предстоять': 1}, {'преждевременно': 1}, {'прежнему': 1}, {'преодолеть': 1}, {'прибавить': 2}, {'примета': 1}, {'прореагировать': 1}, {'протокол': 1}, {'проход': 1}, {'процентныи': 1}, {'пускало': 1}, {'пятница': 2}, {'развороте': 1}, {'разрешиться': 1}, {'расплата': 1}, {'симметричныи': 1}, {'сладкий': 1}, {'слому': 1}, {'случаях': 1}, {'снизу': 2}, {'статистик': 1}, {'сырье': 1}, {'темои': 1}, {'тестировать': 1}, {'треугольник': 5}, {'трёхдневный': 1}, {'узнавать': 1}, {'уралкалии': 1}, {'фантазия': 1}, {'финансист': 1}, {'фьючерсы': 1}], [{'год': 1}, {'раз': 1}, {'банк': 1}, {'мочь': 1}, {'сразу': 1}, {'говорить': 1}, {'дороже': 1}, {'отношение': 1}, {'рублеи': 1}, {'стоить': 1}, {'смочь': 1}, {'курс': 1}, {'доллар': 2}, {'вернуться': 1}, {'вырасти': 1}, {'евро': 2}, {'рубль': 1}, {'разный': 1}, {'цифра': 1}, {'паника': 1}, {'достигнуть': 1}, {'электронный': 1}, {'рассчитать': 1}, {'прошлое': 1}, {'убрать': 1}, {'карточка': 1}, {'легче': 1}, {'барабан': 1}, {'гауна': 1}, {'дили': 1}, {'кучи': 1}, {'назначить': 1}, {'ненужный': 1}, {'обменник': 1}, {'платиновый': 1}, {'поменять': 1}, {'помнится': 1}, {'рубли': 1}, {'рублёвый': 1}, {'скупаю': 1}, {'табло': 1}], [{'год': 3}, {'история': 1}, {'компания': 3}, {'кстати': 1}, {'любопытный': 1}, {'назад': 1}, {'почему': 1}, {'производство': 1}, {'рынок': 1}, {'плохо': 1}, {'стоять': 1}, {'читать': 1}, {'видеть': 1}, {'говорить': 1}, {'группа': 1}, {'купить': 2}, {'настоящий': 1}, {'нематериальный': 1}, {'потерять': 1}, {'рублеи': 2}, {'слово': 2}, {'сначала': 1}, {'страна': 1}, {'хотеть': 1}, {'ценность': 1}, {'чувство': 1}, {'явный': 1}, {'акция': 1}, {'аудитория': 1}, {'готовый': 1},
import numpy as np import cv2 import os import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from sklearn import linear_model from scipy import stats from collections import deque from camera_cal_test import camera_cal import gradients_colors_thresholding_test as grad_color_thres from perspective_transform_test import perspective_transform # Define a class to receive the characteristics of each line detection class Line(): """ line class for lane detection wraped_shape: shape of input image N: number of frames to track ym_per_pix: how many meters one pixel represent along y direction """ def __init__(self, wraped_shape, N, ym_per_pix, xm_per_pix): # was the line detected in the last iteration? self.detected = False # x values of the last n fits of the line self.recent_x_bottom = deque(maxlen=N) self.recent_x_top = deque(maxlen=N) self.best_x_bottom = None self.best_x_top = None #average x values of the fitted line over the last n iterations self.bestx = None #polynomial coefficients averaged over the last n iterations self.best_fit = None self.recent_fit = deque(maxlen=N) #polynomial coefficients for the most recent fit # self.current_fit = [np.array([False])] #radius of curvature of the line in some units self.radius_of_curvature = None #distance in meters of vehicle center from the line self.line_base_pos = None #difference in fit coefficients between last and new fits self.diffs = np.array([0,0,0], dtype='float') #x values for detected line pixels self.allx = None #y values for detected line pixels self.ally = np.linspace(0, wraped_shape[0]-1, wraped_shape[0]) self.curv_y_eval = wraped_shape[0] self.ym_per_pix = ym_per_pix self.xm_per_pix = xm_per_pix def update_recent_fit(self, fit): """ update the polynomial coefficients""" self.recent_fit.append(fit) def update_recent_x_bottom(self, x): """ update the bottom x coordinate """ self.recent_x_bottom.append(x) def update_recent_x_top(self, x): """ update the top x coordinate """ self.recent_x_top.append(x) def calc_best_x_bottom(self): """ average the bottom x coordinate """ x_points = np.asarray(list(self.recent_x_bottom)) # self.best_x_bottom = np.mean(x_points) self.best_x_bottom = stats.trim_mean(x_points, proportiontocut=0.3) def calc_best_x_top(self): """ average the top x coordinate """ x_points = np.asarray(list(self.recent_x_top)) # self.best_x_top = np.mean(x_points) self.best_x_top = stats.trim_mean(x_points, proportiontocut=0.3) def calc_best_fit(self): """ average the polynomial coefficients """ fits = np.concatenate(tuple(self.recent_fit), axis=1) # self.best_fit = np.mean(fits,axis=1) self.best_fit = stats.trim_mean(fits, proportiontocut=0.3, axis=1) def calc_radius_of_curvature(self): """ calculate the curvature """ # self.radius_of_curvature = (1 + (2 * self.best_fit[0] * (self.curv_y_eval*self.ym_per_pix) + self.best_fit[1])**2)**(3/2)/(2*abs(self.best_fit[0])) # self.radius_of_curvature = (1 + (2 * (self.best_fit[0]*self.xm_per_pix/self.ym_per_pix**2) * # (self.curv_y_eval) + self.best_fit[1]*self.xm_per_pix/self.ym_per_pix)**2)**(3/2)/(2*abs(self.best_fit[0]*self.xm_per_pix/self.ym_per_pix**2)) real_a = self.best_fit[0] * self.xm_per_pix/(self.ym_per_pix**2) real_b = self.best_fit[1] * self.xm_per_pix/self.ym_per_pix numerator = (1 + (2*real_a*(self.curv_y_eval*self.ym_per_pix)+real_b)**2)**1.5 denominator = 2*abs(real_a) self.radius_of_curvature = numerator/denominator def calc_line_base_pos(self): """ update the current position of line """ # self.line_base_pos = self.best_x_bottom self.line_base_pos = np.poly1d(self.best_fit)(self.curv_y_eval) def calc_allx(self): """ calculate the x according to the fitted polynomial """ self.allx = self.best_fit[0]*self.ally**2 + self.best_fit[1]*self.ally + self.best_fit[2] def find_lane_pixels_scratch(binary_warped, part = 'left'): """ modification of find lane pixels based on sliding window method for video processing Input: binary_warped: binary warped image part: left or right lane """ # Take a histogram of the bottom half of the image histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0) # Create an output image to draw on and visualize the result # out_img = np.dstack((binary_warped, binary_warped, binary_warped)) # Find the peak of the left and right halves of the histogram # These will be the starting point for the left and right lines midpoint = np.int(histogram.shape[0]//2) if part == 'left': base = np.argmax(histogram[:midpoint]) else: base = np.argmax(histogram[midpoint:]) + midpoint # Choose the number of sliding windows nwindows = 9 # Set the width of the windows +/- margin margin = 100 # Set minimum number of pixels found to recenter window minpix = 50 # Set height of windows - based on nwindows above and image shape window_height = np.int(binary_warped.shape[0]//nwindows) # Identify the x and y positions of all nonzero pixels in the image nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) if len(nonzero) == 0: print('no active pixels found in the warped binary img') return None else: # Current positions to be updated later for each window in nwindows x_current = base # Create empty lists to receive left and right lane pixel indices lane_inds = [] for window in range(nwindows): # Identify window boundaries in x and y (and right and left) win_y_low = binary_warped.shape[0] - (window+1)*window_height win_y_high = binary_warped.shape[0] - window*window_height ### TO-DO: Find the four below boundaries of the window ### win_x_low = int(x_current - margin) # Update this win_x_high = int(x_current + margin) # Update this ### TO-DO: Identify the nonzero pixels in x and y within the window ### good_inds = ((nonzerox >= win_x_low) & (nonzerox < win_x_high) & (nonzeroy >= win_y_low) & (nonzeroy < win_y_high)).nonzero()[0] # Append these indices to the lists lane_inds.append(good_inds) ### TO-DO: If you found > minpix pixels, recenter next window ### ### (`right` or `leftx_current`) on their mean position ### # x_p = x_current if len(good_inds) > minpix: x_current = np.mean(nonzerox[good_inds]) # Concatenate the arrays of indices (previously was a list of lists of pixels) try: lane_inds = np.concatenate(lane_inds) except ValueError: # Avoids an error if the above is not implemented fully pass if lane_inds.size == 0: print('no active points found in the interest areas') return None else: # Extract left and right line pixel positions x = nonzerox[lane_inds] y = nonzeroy[lane_inds] return (x, y) def fit_polynomial(binary_warped, line, part = 'left'): """ fit polynomial method for video processing Input: binary_warped: warped image line: left lane or right lane object part: left lane or right lane """ # Find our lane pixels first # leftx, lefty, rightx, righty, out_img = find_lane_pixels_scratch(binary_warped, part = 'left') points = find_lane_pixels_scratch(binary_warped, part = part) ### check whether the sliding window method find the lane if points is not None: x, y = points ### TO-DO: Fit a second order polynomial to each using `np.polyfit` ### fit = np.polyfit(y, x, deg=2) x_bottom = np.poly1d(fit)(binary_warped.shape[0]) x_top = np.poly1d(fit)(0) line.detected = True line.update_recent_fit(fit[:,np.newaxis]) line.update_recent_x_bottom(x_bottom) line.update_recent_x_top(x_top) line.calc_best_fit() line.calc_best_x_bottom() line.calc_best_x_top() line.calc_allx() line.calc_line_base_pos() line.calc_radius_of_curvature() else: line.detected = False def search_around_poly(binary_warped, margin, line, part='left'): """ modified search around the exist polynomial fitted line for video processing Input: binary_warped: input binary warped image margin: designed margin area to search line: left or right lane line object part: left or right lane """ # HYPERPARAMETER # Choose the width of the margin around the previous polynomial to search # The quiz grader expects 100 here, but feel free to tune on your own! # margin = 100 # Grab activated pixels nonzero = binary_warped.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) if len(nonzero) == 0: print('no active pixels found in the warped binary img') line.detected = False # return None else: ### TO-DO: Set the area of search based on activated x-values ### ### within the +/- margin of our polynomial function ### ### Hint: consider the window areas for the similarly named variables ### ### in the previous quiz, but change the windows to our new search area ### lane_inds = ((nonzerox >= (np.poly1d(line.best_fit)(nonzeroy) - margin)) & (nonzerox < (np.poly1d(line.best_fit)(nonzeroy) + margin))) # Again, extract left and right line pixel positions x = nonzerox[lane_inds] y = nonzeroy[lane_inds] if len(lane_inds) == 0: print('previous polynomial fit cannot help find the current active points') print('create the polinomial from scratch again') line.detected = False # fit_polynomial(binary_warped, line, part = 'left') # if line.detected == False: # print('cannot find polinomial from scratch, use recorded parameter to draw') # return None else: fit = np.polyfit(y, x, deg=2) x_bottom = np.poly1d(fit)(binary_warped.shape[0]) x_top = np.poly1d(fit)(0) line.detected = True line.update_recent_fit(fit[:,np.newaxis]) line.update_recent_x_bottom(x_bottom) line.update_recent_x_top(x_top) line.calc_best_fit() line.calc_best_x_bottom() line.calc_best_x_top() line.calc_allx() line.calc_line_base_pos() line.calc_radius_of_curvature() # return 1 def drawing_lane_line_area(warped_size, left_lane, right_lane, image, Minv): """ modified function for video processing """ # Create an image to draw the lines on warp_zero = np.zeros(warped_size).astype(np.uint8) color_warp = np.dstack((warp_zero, warp_zero, warp_zero)) # Recast the x and y points into usable format for cv2.fillPoly() pts_left = np.array([np.transpose(np.vstack([left_lane.allx, left_lane.ally]))]) pts_right = np.array([np.flipud(np.transpose(np.vstack([right_lane.allx, right_lane.ally])))]) pts = np.hstack((pts_left, pts_right)) # Draw the lane onto the warped blank image cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0)) # Warp the blank back to original image
from random import shuffle import numpy as np import torch import torch.nn as nn import math import torch.nn.functional as F from matplotlib.colors import rgb_to_hsv, hsv_to_rgb from PIL import Image from utils.utils import bbox_iou, merge_bboxes def clip_by_tensor(t,t_min,t_max): t=t.float() result = (t >= t_min).float() * t + (t < t_min).float() * t_min result = (result <= t_max).float() * result + (result > t_max).float() * t_max return result def MSELoss(pred,target): return (pred-target)**2 def BCELoss(pred,target): epsilon = 1e-7 pred = clip_by_tensor(pred, epsilon, 1.0 - epsilon) output = -target * torch.log(pred) - (1.0 - target) * torch.log(1.0 - pred) return output class YOLOLoss(nn.Module): def __init__(self, anchors, num_classes, img_size): super(YOLOLoss, self).__init__() self.anchors = anchors self.num_anchors = len(anchors) self.num_classes = num_classes self.bbox_attrs = 5 + num_classes self.img_size = img_size self.ignore_threshold = 0.5 self.lambda_xy = 1.0 self.lambda_wh = 1.0 self.lambda_conf = 1.0 self.lambda_cls = 1.0 def forward(self, input, targets=None): # input为bs,3*(5+num_classes),13,13 # 一共多少张图片 bs = input.size(0) # 特征层的高 in_h = input.size(2) # 特征层的宽 in_w = input.size(3) # 计算步长 # 每一个特征点对应原来的图片上多少个像素点 # 如果特征层为13x13的话,一个特征点就对应原来的图片上的32个像素点 stride_h = self.img_size[1] / in_h stride_w = self.img_size[0] / in_w # 把先验框的尺寸调整成特征层大小的形式 # 计算出先验框在特征层上对应的宽高 scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in self.anchors] # bs,3*(5+num_classes),13,13 -> bs,3,13,13,(5+num_classes) prediction = input.view(bs, int(self.num_anchors/3), self.bbox_attrs, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous() # 对prediction预测进行调整 x = torch.sigmoid(prediction[..., 0]) # Center x y = torch.sigmoid(prediction[..., 1]) # Center y w = prediction[..., 2] # Width h = prediction[..., 3] # Height conf = torch.sigmoid(prediction[..., 4]) # Conf pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred. # 找到哪些先验框内部包含物体 mask, noobj_mask, tx, ty, tw, th, tconf, tcls, box_loss_scale_x, box_loss_scale_y =\ self.get_target(targets, scaled_anchors, in_w, in_h, self.ignore_threshold) noobj_mask = self.get_ignore(prediction, targets, scaled_anchors, in_w, in_h, noobj_mask) box_loss_scale_x = (box_loss_scale_x).cuda() box_loss_scale_y = (box_loss_scale_y).cuda() box_loss_scale = 2 - box_loss_scale_x*box_loss_scale_y mask, noobj_mask = mask.cuda(), noobj_mask.cuda() tx, ty, tw, th = tx.cuda(), ty.cuda(), tw.cuda(), th.cuda() tconf, tcls = tconf.cuda(), tcls.cuda() # losses. loss_x = torch.sum(BCELoss(x, tx) / bs * box_loss_scale * mask) loss_y = torch.sum(BCELoss(y, ty) / bs * box_loss_scale * mask) loss_w = torch.sum(MSELoss(w, tw) / bs * 0.5 * box_loss_scale * mask) loss_h = torch.sum(MSELoss(h, th) / bs * 0.5 * box_loss_scale * mask) loss_conf = torch.sum(BCELoss(conf, mask) * mask / bs) + \ torch.sum(BCELoss(conf, mask) * noobj_mask / bs) loss_cls = torch.sum(BCELoss(pred_cls[mask == 1], tcls[mask == 1])/bs) loss = loss_x * self.lambda_xy + loss_y * self.lambda_xy + \ loss_w * self.lambda_wh + loss_h * self.lambda_wh + \ loss_conf * self.lambda_conf + loss_cls * self.lambda_cls # print(loss, loss_x.item() + loss_y.item(), loss_w.item() + loss_h.item(), # loss_conf.item(), loss_cls.item(), \ # torch.sum(mask),torch.sum(noobj_mask)) return loss, loss_x.item(), loss_y.item(), loss_w.item(), \ loss_h.item(), loss_conf.item(), loss_cls.item() def get_target(self, target, anchors, in_w, in_h, ignore_threshold): # 计算一共有多少张图片 bs = len(target) # 获得先验框 anchor_index = [[0,1,2],[3,4,5],[6,7,8]][[13,26,52].index(in_w)] subtract_index = [0,3,6][[13,26,52].index(in_w)] # 创建全是0或者全是1的阵列 mask = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) noobj_mask = torch.ones(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) tx = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) ty = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) tw = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) th = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) tconf = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) tcls = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, self.num_classes, requires_grad=False) box_loss_scale_x = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) box_loss_scale_y = torch.zeros(bs, int(self.num_anchors/3), in_h, in_w, requires_grad=False) for b in range(bs): for t in range(target[b].shape[0]): # 计算出在特征层上的点位 gx = target[b][t, 0] * in_w gy = target[b][t, 1] * in_h gw = target[b][t, 2] * in_w gh = target[b][t, 3] * in_h # 计算出属于哪个网格 gi = int(gx) gj = int(gy) # 计算真实框的位置 gt_box = torch.FloatTensor(np.array([0, 0, gw, gh])).unsqueeze(0) # 计算出所有先验框的位置 anchor_shapes = torch.FloatTensor(np.concatenate((np.zeros((self.num_anchors, 2)), np.array(anchors)), 1)) # 计算重合程度 anch_ious = bbox_iou(gt_box, anchor_shapes) # Find the best matching anchor box best_n = np.argmax(anch_ious) if best_n not in anchor_index: continue # Masks if (gj < in_h) and (gi < in_w): best_n = best_n - subtract_index # 判定哪些先验框内部真实的存在物体 noobj_mask[b, best_n, gj, gi] = 0 mask[b, best_n, gj, gi] = 1 # 计算先验框中心调整参数 tx[b, best_n, gj, gi] = gx - gi ty[b, best_n, gj, gi] = gy - gj # 计算先验框宽高调整参数 tw[b, best_n, gj, gi] = math.log(gw / anchors[best_n+subtract_index][0]) th[b, best_n, gj, gi] = math.log(gh / anchors[best_n+subtract_index][1]) # 用于获得xywh的比例 box_loss_scale_x[b, best_n, gj, gi] = target[b][t, 2] box_loss_scale_y[b, best_n, gj, gi] = target[b][t, 3] # 物体置信度 tconf[b, best_n, gj, gi] = 1 # 种类 tcls[b, best_n, gj, gi, int(target[b][t, 4])] = 1 else: print('Step {0} out of bound'.format(b)) print('gj: {0}, height: {1} | gi: {2}, width: {3}'.format(gj, in_h, gi, in_w)) continue return mask, noobj_mask, tx, ty, tw, th, tconf, tcls, box_loss_scale_x, box_loss_scale_y def get_ignore(self,prediction,target,scaled_anchors,in_w, in_h,noobj_mask): bs = len(target) anchor_index = [[0,1,2],[3,4,5],[6,7,8]][[13,26,52].index(in_w)] scaled_anchors = np.array(scaled_anchors)[anchor_index] # print(scaled_anchors) # 先验框的中心位置的调整参数 x = torch.sigmoid(prediction[..., 0]) y = torch.sigmoid(prediction[..., 1]) # 先验框的宽高调整参数 w = prediction[..., 2] # Width h = prediction[..., 3] # Height FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor # 生成网格,先验框中心,网格左上角 grid_x = torch.linspace(0, in_w - 1, in_w).repeat(in_w, 1).repeat( int(bs*self.num_anchors/3), 1, 1).view(x.shape).type(FloatTensor) grid_y = torch.linspace(0, in_h - 1, in_h).repeat(in_h, 1).t().repeat( int(bs*self.num_anchors/3), 1, 1).view(y.shape).type(FloatTensor) # 生成先验框的宽高 anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0])) anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1])) anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape) anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape) # 计算调整后的先验框中心与宽高 pred_boxes = FloatTensor(prediction[..., :4].shape) pred_boxes[..., 0] = x.data + grid_x pred_boxes[..., 1] = y.data + grid_y pred_boxes[..., 2] = torch.exp(w.data) * anchor_w pred_boxes[..., 3] = torch.exp(h.data) * anchor_h for i in range(bs): pred_boxes_for_ignore = pred_boxes[i] pred_boxes_for_ignore = pred_boxes_for_ignore.view(-1, 4) for t in range(target[i].shape[0]): gx = target[i][t, 0] * in_w gy = target[i][t, 1] * in_h gw = target[i][t, 2] * in_w gh = target[i][t, 3] * in_h gt_box = torch.FloatTensor(np.array([gx, gy, gw, gh])).unsqueeze(0).type(FloatTensor) anch_ious = bbox_iou(gt_box, pred_boxes_for_ignore, x1y1x2y2=False) anch_ious = anch_ious.view(pred_boxes[i].size()[:3]) noobj_mask[i][anch_ious > self.ignore_threshold] = 0 # print(torch.max(anch_ious)) return noobj_mask def rand(a=0, b=1): return np.random.rand()*(b-a) + a # class Generator(object): # def __init__(self,batch_size, # train_lines, image_size, # ): # # self.batch_size = batch_size # self.train_lines = train_lines # self.train_batches = len(train_lines) # self.image_size = image_size # # def get_random_data(self, annotation_line, input_shape, jitter=.1, hue=.1, sat=1.3, val=1.3): # '''r实时数据增强的随机预处理''' # line = annotation_line.split() # image = Image.open(line[0]) # iw, ih = image.size # h, w = input_shape # box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]]) # # # resize image # new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter) # scale = rand(.25, 2) # if new_ar < 1: # nh = int(scale*h) # nw = int(nh*new_ar) # else: # nw = int(scale*w) # nh = int(nw/new_ar) # image = image.resize((nw,nh), Image.BICUBIC) # # # place image # dx = int(rand(0, w-nw)) # dy = int(rand(0, h-nh)) # new_image = Image.new('RGB', (w, h), (128, 128, 128)) # new_image.paste(image, (dx, dy)) # image = new_image # # # flip image or not # flip = rand()<.5 # if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT) # # # distort image # hue = rand(-hue, hue) # sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat) # val = rand(1, val) if rand()<.5 else 1/rand(1, val) # x = rgb_to_hsv(np.array(image)/255.) # x[..., 0] += hue # x[..., 0][x[..., 0]>1] -= 1 # x[..., 0][x[..., 0]<0] += 1 # x[..., 1] *= sat # x[..., 2] *= val # x[x>1] = 1 # x[x<0] = 0 # image_data = hsv_to_rgb(x)*255 # numpy array, 0 to 1 # # # correct boxes # box_data = np.zeros((len(box),5)) # if len(box)>0: # np.random.shuffle(box) # box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx # box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy # if flip: box[:, [0,2]] = w - box[:, [2,0]] # box[:, 0:2][box[:, 0:2]<0] = 0 # box[:, 2][box[:, 2]>w] = w # box[:, 3][box[:, 3]>h] = h # box_w = box[:, 2] - box[:, 0] # box_h = box[:, 3] - box[:, 1] # box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box # box_data = np.zeros((len(box),5)) # box_data[:len(box)] = box # if len(box) == 0: # return image_data, [] # # if (box_data[:,:4]>0).any(): # return image_data, box_data # else: # return image_data, [] # # def generate(self, train=True): # while True: # shuffle(self.train_lines) # lines = self.train_lines # inputs = [] # targets = [] # for annotation_line in lines: # img,y=self.get_random_data(annotation_line,self.image_size[0:2]) # # if len(y) == 0: # continue # boxes = np.array(y[:,:4],dtype=np.float32) # boxes[:,0] = boxes[:,0]/self.image_size[1] # boxes[:,1] = boxes[:,1]/self.image_size[0] # boxes[:,2] = boxes[:,2]/self.image_size[1] # boxes[:,3] = boxes[:,3]/self.image_size[0] # # boxes = np.maximum(np.minimum(boxes,1),0) # boxes[:,2] = boxes[:,2] - boxes[:,0] # boxes[:,3] = boxes[:,3] - boxes[:,1] # # boxes[:,0] = boxes[:,0] + boxes[:,2]/2 # boxes[:,1] = boxes[:,1] + boxes[:,3]/2 # y = np.concatenate([boxes,y[:,-1:]],axis=-1) # img = np.array(img,dtype = np.float32) # # inputs.append(np.transpose(img/255.0,(2,0,1))) # targets.append(y) # if len(targets) == self.batch_size: # tmp_inp =
render the dendrogram. Its has the following keys: ``'color_list'`` A list of color names. The k'th element represents the color of the k'th link. ``'icoord'`` and ``'dcoord'`` Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]`` where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]`` where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is ``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``. ``'ivl'`` A list of labels corresponding to the leaf nodes. ``'leaves'`` For each i, ``H[i] == j``, cluster node ``j`` appears in position ``i`` in the left-to-right traversal of the leaves, where :math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the ``i``-th leaf node corresponds to an original observation. Otherwise, it corresponds to a non-singleton cluster. ``'leaves_color_list'`` A list of color names. The k'th element represents the color of the k'th leaf. See Also -------- linkage, set_link_color_palette Notes ----- It is expected that the distances in ``Z[:,2]`` be monotonic, otherwise crossings appear in the dendrogram. Examples -------- >>> from scipy.cluster import hierarchy >>> import matplotlib.pyplot as plt A very basic example: >>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., ... 400., 754., 564., 138., 219., 869., 669.]) >>> Z = hierarchy.linkage(ytdist, 'single') >>> plt.figure() >>> dn = hierarchy.dendrogram(Z) Now, plot in given axes, improve the color scheme and use both vertical and horizontal orientations: >>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k']) >>> fig, axes = plt.subplots(1, 2, figsize=(8, 3)) >>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y', ... orientation='top') >>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], ... above_threshold_color='#bcbddc', ... orientation='right') >>> hierarchy.set_link_color_palette(None) # reset to default after use >>> plt.show() """ # This feature was thought about but never implemented (still useful?): # # ... = dendrogram(..., leaves_order=None) # # Plots the leaves in the order specified by a vector of # original observation indices. If the vector contains duplicates # or results in a crossing, an exception will be thrown. Passing # None orders leaf nodes based on the order they appear in the # pre-order traversal. Z = np.asarray(Z, order='c') if orientation not in ["top", "left", "bottom", "right"]: raise ValueError("orientation must be one of 'top', 'left', " "'bottom', or 'right'") if labels is not None and Z.shape[0] + 1 != len(labels): raise ValueError("Dimensions of Z and labels must be consistent.") is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 if type(p) in (int, float): p = int(p) else: raise TypeError('The second argument must be a number') if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None): # 'mlab' and 'mtica' are kept working for backwards compat. raise ValueError('Invalid truncation mode.') if truncate_mode == 'lastp' or truncate_mode == 'mlab': if p > n or p == 0: p = n if truncate_mode == 'mtica': # 'mtica' is an alias truncate_mode = 'level' if truncate_mode == 'level': if p <= 0: p = np.inf if get_leaves: lvs = [] else: lvs = None icoord_list = [] dcoord_list = [] color_list = [] current_color = [0] currently_below_threshold = [False] ivl = [] # list of leaves if color_threshold is None or (isinstance(color_threshold, str) and color_threshold == 'default'): color_threshold = max(Z[:, 2]) * 0.7 R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, 'leaves': lvs, 'color_list': color_list} # Empty list will be filled in _dendrogram_calculate_info contraction_marks = [] if show_contracted else None _dendrogram_calculate_info( Z=Z, p=p, truncate_mode=truncate_mode, color_threshold=color_threshold, get_leaves=get_leaves, orientation=orientation, labels=labels, count_sort=count_sort, distance_sort=distance_sort, show_leaf_counts=show_leaf_counts, i=2*n - 2, iv=0.0, ivl=ivl, n=n, icoord_list=icoord_list, dcoord_list=dcoord_list, lvs=lvs, current_color=current_color, color_list=color_list, currently_below_threshold=currently_below_threshold, leaf_label_func=leaf_label_func, contraction_marks=contraction_marks, link_color_func=link_color_func, above_threshold_color=above_threshold_color) if not no_plot: mh = max(Z[:, 2]) _plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation, no_labels, color_list, leaf_font_size=leaf_font_size, leaf_rotation=leaf_rotation, contraction_marks=contraction_marks, ax=ax, above_threshold_color=above_threshold_color) R["leaves_color_list"] = _get_leaves_color_list(R) return R def _get_leaves_color_list(R): leaves_color_list = [None] * len(R['leaves']) for link_x, link_y, link_color in zip(R['icoord'], R['dcoord'], R['color_list']): for (xi, yi) in zip(link_x, link_y): if yi == 0.0: # if yi is 0.0, the point is a leaf # xi of leaves are 5, 15, 25, 35, ... (see `iv_ticks`) # index of leaves are 0, 1, 2, 3, ... as below leaf_index = (int(xi) - 5) // 10 # each leaf has a same color of its link. leaves_color_list[leaf_index] = link_color return leaves_color_list def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels): # If the leaf id structure is not None and is a list then the caller # to dendrogram has indicated that cluster id's corresponding to the # leaf nodes should be recorded. if lvs is not None: lvs.append(int(i)) # If leaf node labels are to be displayed... if ivl is not None: # If a leaf_label_func has been provided, the label comes from the # string returned from the leaf_label_func, which is a function # passed to dendrogram. if leaf_label_func: ivl.append(leaf_label_func(int(i))) else: # Otherwise, if the dendrogram caller has passed a labels list # for the leaf nodes, use it. if labels is not None: ivl.append(labels[int(i - n)]) else: # Otherwise, use the id as the label for the leaf.x ivl.append(str(int(i))) def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func, i, labels, show_leaf_counts): # If the leaf id structure is not None and is a list then the caller # to dendrogram has indicated that cluster id's corresponding to the # leaf nodes should be recorded. if lvs is not None: lvs.append(int(i)) if ivl is not None: if leaf_label_func: ivl.append(leaf_label_func(int(i))) else: if show_leaf_counts: ivl.append("(" + str(int(Z[i - n, 3])) + ")") else: ivl.append("") def _append_contraction_marks(Z, iv, i, n, contraction_marks): _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks) _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks) def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks): if i >= n: contraction_marks.append((iv, Z[i - n, 2])) _append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks) _append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks) def _dendrogram_calculate_info(Z, p, truncate_mode, color_threshold=np.inf, get_leaves=True, orientation='top', labels=None, count_sort=False, distance_sort=False, show_leaf_counts=False, i=-1, iv=0.0, ivl=[], n=0, icoord_list=[], dcoord_list=[], lvs=None, mhr=False, current_color=[], color_list=[], currently_below_threshold=[], leaf_label_func=None, level=0, contraction_marks=None, link_color_func=None, above_threshold_color='C0'): """ Calculate the endpoints of the links as well as the labels for the the dendrogram rooted at the node with index i. iv is the independent variable value to plot the left-most leaf node below the root node i (if orientation='top', this would be the left-most x value where the plotting of this root node i and its descendents should begin). ivl is a list to store the labels of the leaf nodes. The leaf_label_func is called whenever ivl != None, labels == None, and leaf_label_func != None. When ivl != None and labels != None, the labels list is used only for labeling the leaf nodes. When ivl == None, no labels are generated for leaf nodes. When get_leaves==True, a list of leaves is built as they are visited in the dendrogram. Returns a tuple with l being the independent variable coordinate that corresponds to the midpoint of cluster to the left of cluster i if i is non-singleton, otherwise the independent coordinate of the leaf node if i is a leaf node. Returns ------- A tuple (left, w, h, md), where: * left is the independent variable coordinate of the center of the the U of the subtree * w is the amount of space used for the subtree (in independent variable units) * h is the height of the subtree in dependent variable units * md is the ``max(Z[*,2]``) for all nodes ``*`` below and including the target node. """ if n == 0: raise ValueError("Invalid singleton cluster count n.") if i == -1: raise ValueError("Invalid root cluster index i.") if truncate_mode == 'lastp': # If the node is a leaf node but corresponds to a non-singleton # cluster, its label is either the empty string or the number of # original observations belonging to cluster i. if 2*n - p >
<filename>optimade/filtertransformers/elasticsearch.py from typing import Dict, Union, Type, Optional from lark import v_args from elasticsearch_dsl import Q, Text, Keyword, Integer, Field from optimade.filtertransformers import BaseTransformer, Quantity from optimade.server.mappers import BaseResourceMapper __all__ = ("ElasticTransformer",) class ElasticsearchQuantity(Quantity): """Elasticsearch-specific extension of the underlying [`Quantity`][optimade.filtertransformers.base_transformer.Quantity] class. Attributes: name: The name of the quantity as used in the filter expressions. backend_field: The name of the field for this quantity in Elasticsearch, will be ``name`` by default. elastic_mapping_type: A decendent of an `elasticsearch_dsl.Field` that denotes which mapping type was used in the Elasticsearch index. length_quantity: Elasticsearch does not support length of arrays, but we can map fields with array to other fields with ints about the array length. The LENGTH operator will only be supported for quantities with this attribute. has_only_quantity: Elasticsearch does not support exclusive search on arrays, like a list of chemical elements. But, we can order all elements by atomic number and use a keyword field with all elements to perform this search. This only works for elements (i.e. labels in ``CHEMICAL_SYMBOLS``) and quantities with this attribute. nested_quantity: To support optimade's 'zipped tuple' feature (e.g. 'elements:elements_ratios HAS "H":>0.33), we use elasticsearch nested objects and nested queries. This quantity will provide the field for the nested object that contains the quantity (and others). The zipped tuples will only work for quantities that share the same nested object quantity. """ name: str backend_field: Optional[str] length_quantity: Optional["ElasticsearchQuantity"] elastic_mapping_type: Optional[Field] has_only_quantity: Optional["ElasticsearchQuantity"] nested_quantity: Optional["ElasticsearchQuantity"] def __init__( self, name: str, backend_field: str = None, length_quantity: "ElasticsearchQuantity" = None, elastic_mapping_type: Field = None, has_only_quantity: "ElasticsearchQuantity" = None, nested_quantity: "ElasticsearchQuantity" = None, ): """Initialise the quantity from its name, aliases and mapping type. Parameters: name: The name of the quantity as used in the filter expressions. backend_field: The name of the field for this quantity in Elasticsearch, will be ``name`` by default. elastic_mapping_type: A decendent of an `elasticsearch_dsl.Field` that denotes which mapping type was used in the Elasticsearch index. length_quantity: Elasticsearch does not support length of arrays, but we can map fields with array to other fields with ints about the array length. The LENGTH operator will only be supported for quantities with this attribute. has_only_quantity: Elasticsearch does not support exclusive search on arrays, like a list of chemical elements. But, we can order all elements by atomic number and use a keyword field with all elements to perform this search. This only works for elements (i.e. labels in ``CHEMICAL_SYMBOLS``) and quantities with this attribute. nested_quantity: To support optimade's 'zipped tuple' feature (e.g. 'elements:elements_ratios HAS "H":>0.33), we use elasticsearch nested objects and nested queries. This quantity will provide the field for the nested object that contains the quantity (and others). The zipped tuples will only work for quantities that share the same nested object quantity. """ super().__init__(name, backend_field, length_quantity) self.elastic_mapping_type = ( Keyword if elastic_mapping_type is None else elastic_mapping_type ) self.has_only_quantity = has_only_quantity self.nested_quantity = nested_quantity class ElasticTransformer(BaseTransformer): """Transformer that transforms ``v0.10.1``/`v1.0` grammar parse trees into Elasticsearch queries. Uses elasticsearch_dsl and will produce an `elasticsearch_dsl.Q` instance. """ operator_map = { "<": "lt", "<=": "lte", ">": "gt", ">=": "gte", } _quantity_type: Type[ElasticsearchQuantity] = ElasticsearchQuantity def __init__( self, mapper: BaseResourceMapper = None, quantities: Dict[str, Quantity] = None ): if quantities is not None: self.quantities = quantities super().__init__(mapper=mapper) def _field(self, quantity: Union[str, Quantity], nested: Quantity = None) -> str: """Used to unwrap from `property` to the string backend field name. If passed a `Quantity` (or a derived `ElasticsearchQuantity`), this method returns the backend field name, modulo some handling of nested fields. If passed a string quantity name: - Check that the name does not match a relationship type, raising a `NotImplementedError` if it does. - If the string is prefixed by an underscore, assume this is a provider-specific field from another provider and simply return it. The original `property` rule would have already filtered out provider fields for this backend appropriately as `Quantity` objects. Returns: The field name to use for database queries. """ if isinstance(quantity, str): if quantity in self.mapper.RELATIONSHIP_ENTRY_TYPES: raise NotImplementedError( f"Unable to filter on relationships with type {quantity!r}" ) # In this case, the property rule has already filtered out fields # that do not match this provider, so this indicates an "other provider" # field that should be passed over if quantity.startswith("_"): return quantity if nested is not None: return "%s.%s" % (nested.backend_field, quantity.name) return quantity.backend_field def _query_op( self, quantity: Union[ElasticsearchQuantity, str], op: str, value: Union[str, float, int], nested: ElasticsearchQuantity = None, ) -> Q: """Return a range, match, or term query for the given quantity, comparison operator, and value. Returns: An elasticsearch_dsl query. Raises: BadRequest: If the query is not well-defined or is not supported. """ field = self._field(quantity, nested=nested) if op in self.operator_map: return Q("range", **{field: {self.operator_map[op]: value}}) # If quantity is an "other provider" field then use Keyword as the default # mapping type. These queries should not match on anything as the field # is not present in the index. elastic_mapping_type = Keyword if isinstance(quantity, ElasticsearchQuantity): elastic_mapping_type = quantity.elastic_mapping_type if elastic_mapping_type == Text: query_type = "match" elif elastic_mapping_type in [Keyword, Integer]: query_type = "term" else: raise NotImplementedError("Quantity has unsupported ES field type") if op in ["=", ""]: return Q(query_type, **{field: value}) if op == "!=": # != queries must also include an existence check # Note that for MongoDB, `$exists` will include null-valued fields, # where as in ES `exists` excludes them. # pylint: disable=invalid-unary-operand-type return ~Q(query_type, **{field: value}) & Q("exists", field=field) def _has_query_op(self, quantities, op, predicate_zip_list): """Returns a bool query that combines the operator calls `_query_op` for each predicate and zipped quantity predicate combination. """ if op == "HAS": kind = "must" # in case of HAS we do a must over the "list" of the one given element elif op == "HAS ALL": kind = "must" elif op == "HAS ANY": kind = "should" elif op == "HAS ONLY": # HAS ONLY comes with heavy limitations, because there is no such thing # in elastic search. Only supported for elements, where we can construct # an anonymous "formula" based on elements sorted by order number and # can do a = comparision to check if all elements are contained # @ml-evs: Disabling this HAS ONLY workaround as tests are not passing raise NotImplementedError( "HAS ONLY queries are not currently supported by the Elasticsearch backend." ) # from optimade.models import CHEMICAL_SYMBOLS, ATOMIC_NUMBERS # if len(quantities) > 1: # raise NotImplementedError("HAS ONLY is not supported with zip") # quantity = quantities[0] # if quantity.has_only_quantity is None: # raise NotImplementedError( # "HAS ONLY is not supported by %s" % quantity.name # ) # def values(): # for predicates in predicate_zip_list: # if len(predicates) != 1: # raise NotImplementedError("Tuples not supported in HAS ONLY") # op, value = predicates[0] # if op != "=": # raise NotImplementedError( # "Predicated not supported in HAS ONLY" # ) # if not isinstance(value, str): # raise NotImplementedError("Only strings supported in HAS ONLY") # yield value # try: # order_numbers = list([ATOMIC_NUMBERS[element] for element in values()]) # order_numbers.sort() # value = "".join( # [CHEMICAL_SYMBOLS[number - 1] for number in order_numbers] # ) # except KeyError: # raise NotImplementedError( # "HAS ONLY is only supported for chemical symbols" # ) # return Q("term", **{quantity.has_only_quantity.name: value}) else: raise NotImplementedError(f"Unrecognised operation {op}.") queries = [ self._has_query(quantities, predicates) for predicates in predicate_zip_list ] return Q("bool", **{kind: queries}) def _has_query(self, quantities, predicates): """ Returns a bool query that combines the operator queries ():func:`_query_op`) for quantity pericate combination. """ if len(quantities) != len(predicates): raise ValueError( "Tuple length does not match: %s <o> %s " % (":".join(quantities), ":".join(predicates)) ) if len(quantities) == 1: o, value = predicates[0] return self._query_op(quantities[0], o, value) nested_quantity = quantities[0].nested_quantity same_nested_quantity = any( q.nested_quantity != nested_quantity for q in quantities ) if nested_quantity is None or same_nested_quantity: raise NotImplementedError( "Expression with tuples are
#!/usr/bin/env python # # Author: <NAME> [tg (at) isi (dot) edu] # Created: 2019-10-25 import collections as coll import copy import time from typing import List, Dict, Tuple, Union, Iterator, Set from nlcodec import log, DEF_MIN_CO_EV from nlcodec.codec import Type, Level, Reseved from nlcodec.dstruct import LnNode, MaxHeap from nlcodec.utils import max_RSS from tqdm import tqdm Codes = Dict[int, Tuple[int, ...]] Seq = List[int] Bigram = Tuple[int, int] class BPELearn: """ The core BPE learning algorithm fast implementation using linked lists Note: this implementation takes relatively more RAM; and that is okay for my usecase # TODO: write this in c++ or rust and bind it here """ space_tok = Reseved.SPACE_TOK[0] unk_tok = Reseved.UNK_TOK[0] def __init__(self, seqs: Iterator[Union[Seq, Tuple[Seq, int]]], vocab: List[Type]): # Check one to one map: type.name <-> idx assert len(set(v.idx for v in vocab)) == len(set(v.name for v in vocab)) for i, v in enumerate(vocab): assert i == v.idx self.vocab = vocab self.uni: Dict[int, int] = coll.defaultdict(int) # term freq ; unigrams self.bi: Dict[Bigram, int] = coll.defaultdict(int) # bigram frequencies # Bigram to sequence references self.bi_ixs: Dict[Bigram, Set[LnNode]] = coll.defaultdict(set) self.create_index(seqs) self.validate_index() def create_index(self, seqs): log.info("Going to build corpus stats index; This might take lot of time and memory") n_seqs, n_ignored, n_replaced, bar_msg = 0, 0, 0, '' with tqdm(enumerate(seqs), unit='seqs', dynamic_ncols=True, mininterval=1) as data_bar: for idx, seq in data_bar: freq = 1 # default = 1 freq if isinstance(seq, tuple): # if freq is available seq, freq = seq n_seqs += 1 if idx == 0: # basic validation assert isinstance(seq, list) # first sequence, tokenized assert isinstance(seq[0], int) # sequence's item, should be an int or codepoint if not seq: log.warning(f"Skipping empty sequence at idx {idx + 1}") continue nodes = LnNode.from_seq(seq, freq=freq) assert len(seq) == len(nodes) for i in range(len(seq) - 1): # the last position left out bigm = (seq[i], seq[i + 1]) self.bi[bigm] += freq assert nodes[i] not in self.bi_ixs[bigm] self.bi_ixs[bigm].add(nodes[i]) # bigm found at node i self.uni[seq[i]] += freq self.uni[seq[-1]] += freq # the last unigram count; not covered in the above loop bar_msg = f'MaxRSS={max_RSS()[1]}' data_bar.set_postfix_str(bar_msg, refresh=False) log.info(f"Created index; {bar_msg}") @property def vocab_size(self) -> int: return len(self.vocab) def validate_index(self): """ Call this any time to check if the index of uni bi bi_ixs are valid. Raises exception on invalid index :return: """ # This is code doesnt work with fast but new dirty heap updates max_code = max(self.uni) max_idx = max(t.idx for t in self.vocab) if not (max_code < self.vocab_size and max_code <= max_idx): raise ValueError( f'Vocab size is {self.vocab_size}, but max_code is {max_code}; max_idx={max_idx}') if not len(self.bi) == len(self.bi_ixs): raise ValueError(f"|bi|={len(self.bi)} and |bi_idxs|={len(self.bi_ixs)} are not same") for bigm, freq in self.bi.items(): if not freq >= 0: raise ValueError(f"{bigm} has freq {freq}; expected positive val") if not bigm in self.bi_ixs: raise ValueError(f"{bigm} exists in bi but not in bi_ixs") idx_freq = sum(n.freq for n in self.bi_ixs[bigm]) if not freq == idx_freq: raise ValueError( f"{bigm} has freq={freq} bi but has {idx_freq} bi_ixs refs") # less than unigram freqs if not freq <= self.uni[bigm[0]]: raise ValueError(f"{bigm} has freq={freq} bi but {bigm[0]} has {self.uni[bigm[0]]}") if not freq <= self.uni[bigm[1]]: raise ValueError(f"{bigm} has freq={freq} bi but {bigm[1]} has {self.uni[bigm[1]]}") for uni, freq in self.uni.items(): if not freq >= 0: raise ValueError(f"{uni} has freq={freq}; expected positive value") log.info(f"Index is valid") def learn_codes(self, n_merges: int, min_co_evidence, code_level: int, log_every=2) -> List[Type]: """ :param n_merges: how many more merges :param min_co_evidence: min evidence (co-occurrence frequency); causes early stop upon failure :param code_level: what level to use for new code types created during merge for instance level=1 for word bpe; level=2 for seq bpe :param log_every: delay, in seconds between logs :return: """ uni, bi_ixs = self.uni, self.bi_ixs heap = MaxHeap(self.bi) heap_dirty = coll.defaultdict(int) # subtractions aren't updated in max-heap, they are here vocab = self.vocab last_log_t = time.time() log.info(f"logs every {log_every} seconds") for i in range(n_merges): # Using MaxHeap for faster lookup of max. But heap gets a bit dirty, so a bit of cleanup max_pair, pair_freq = heap.pop() while max_pair in heap_dirty: # clean all max [airs until a clean value freq_update = heap_dirty.pop(max_pair) assert freq_update < 0 # only decrements are valid. increments make this wrong corr_freq = pair_freq + freq_update # correct value assert corr_freq >= 0, f'{max_pair}:{pair_freq}, Δ={freq_update} = {corr_freq}' if corr_freq > 0: # exclude zero count heap.push(max_pair, corr_freq) max_pair, pair_freq = heap.pop() # here the actual loop begins if pair_freq < min_co_evidence: log.warning(f"Early stop; max evidence found is {pair_freq} " f"but min required is {min_co_evidence}") break new_type_idx = len(vocab) a, b = max_pair if time.time() - last_log_t >= log_every: log.info(f"{(100 * i / n_merges):.2f}% :: {new_type_idx} || {a:4}:{uni[a]:5}" f" || {b:4}:{uni[b]:5} || {pair_freq:,} || {vocab[a].name} {vocab[b].name}") last_log_t = time.time() # code -> bigram (flatten out bigram; resolve interim codes new_type = Type(vocab[a].name + vocab[b].name, idx=new_type_idx, freq=pair_freq, level=code_level, kids=(vocab[a], vocab[b])) vocab.append(new_type) # updates: update bigram and unigram counts uni[new_type_idx] = pair_freq # this bigram is now a new unigram # unigram counts drop ; since some of their bigrams are removed uni[a] -= pair_freq uni[b] -= pair_freq heap_deltas = coll.defaultdict(int) update_nodes = bi_ixs.pop(max_pair) # also removed from bi_ixs for node in update_nodes: # -- x a b y -- x_node, b_node = node.left, node.right if node.is_unlinked or (a == b and new_type.idx in (node.val, b_node.val)): # this happens in the cases like "x a a a a y" uni[a] += node.freq uni[b] += node.freq uni[new_type.idx] -= node.freq continue y_node = b_node.right dirty = node.val != a or b_node.val != b # check that the linked list is proper if dirty: log.warning(f'Expected {a, b} but found {node.val, b_node.val}' f'\n {node, b_node}' f'\n--{vocab[a].signature()} ==' f' {vocab[node.val].signature() if node.val != a else "OK"}' f'\n--{vocab[b].signature()} ==' f' {vocab[b_node.val].signature() if b_node.val != b else "OK"}') log.warning(f"a={a}, b={b} || a_node={node}, b_node={b_node}") assert not dirty assert node.freq == b_node.freq # update : x a b y => x R y b_node.delete(unlink=True) # delete() takes care of linking a → y and a ← y new_node = node # reuse a node as new_node/R new_node.val = new_type_idx # reuse a as new_node/R # Note: the above edits to a and b nodes do-not/should-not change __hash__ if x_node: # remove (x_node_val, a) from bi and bi_ixs heap_deltas[(x_node.val, a)] -= x_node.freq if bi_ixs.get((x_node.val, a)): # not sure why 'if' needed here; bi_ixs[(x_node.val, a)].remove(x_node) # add (x_node_val, R) to bi and bi_ixs heap_deltas[(x_node.val, new_type_idx)] += x_node.freq bi_ixs[(x_node.val, new_type_idx)].add(x_node) if y_node: # remove (b, y_node.val) from bi and bi_ixs heap_deltas[(b, y_node.val)] -= b_node.freq if bi_ixs.get((b, y_node.val)): # not sure why 'if' needed here; bi_ixs[(b, y_node.val)].remove(b_node) # add (R, y_node.val) to bi and bi_ixs heap_deltas[(new_type_idx, y_node.val)] += b_node.freq bi_ixs[(new_type_idx, y_node.val)].add(new_node) # however; the counts shouldn't go negative assert uni[a] >= 0 assert uni[b] >= 0 for pair, delta in heap_deltas.items(): if delta > 0: # these are new insertions, and they can go directly to heap assert new_type_idx in pair heap.push(pair, delta) elif delta < 0: # one of those subtractions, which cant be directly updated assert new_type_idx not in pair heap_dirty[pair] += delta return vocab @classmethod def prepare_word(cls, word): # mark ending of sequences # # TODO: check: looks like sentence piece adds at the beginning # subword-nmt (senrich et al 2016) did </w> at the end; # 0.2 of subword-nmt puts last char and </w> together return word + cls.space_tok @classmethod def _make_idxs(cls, voc_idx: Dict[str, int], term_freqs: Dict[str, int]) \ -> Iterator[Tuple[Seq, int]]: """Convert character sequences to char indexed seqs""" unk_idx = voc_idx[cls.unk_tok] for word, freq in term_freqs.items(): if word in voc_idx: res = [voc_idx[word]] else: res = [voc_idx.get(ch, unk_idx) for ch in word] yield res, freq @classmethod def _learn_codes(cls, term_freqs: Dict[str, int], vocab: List[Type], vocab_size: int, init_list: List[str] = None, min_co_evidence: int = DEF_MIN_CO_EV) -> List[Type]: """ :param term_freqs: words types and frequencies :param vocab: initial vocab; usually reserved and alphabet :param vocab_size: desired vocabulary size :param init_list:
#!/usr/bin/python3.9 import asyncio import logging import re import time from asyncio import Queue, Task, create_task, wait_for from copy import deepcopy from dataclasses import asdict, dataclass, field from typing import Any, Optional, Union from aiohttp import web import mc_util from forest import utils from forest.core import JSON, Message, PayBot, Response, app from forest.utils import get_secret new_line: str = "\n" @dataclass class PaymentReceipt: """ Result of an individual payment Attributes: sender (str): signal account that sent payment recipient (str): signal account that received payment signal_timestamp (Optional[float]): time of payment notification received from Signal amount (Optional[float]): amount in pmob sent, Mobilecoin blockchain must be queiried for this amount note (Optional[str]): note sent with payment confirmation_timestamp Optional[float]: time of receipt confirmation on the mobilecoin blockchain timeout (bool): flag determining if payment timeout timeout_timestamp (Optional[float]): timeout timestamp """ sender: str recipient: str signal_timestamp: Optional[float] = None amount: Optional[float] = None note: Optional[str] = None confirmation_timestamp: Optional[float] = None timeout: bool = False timeout_timestamp: Optional[float] = None def __eq__(self, other: Any) -> bool: # Measure equality based on amount, sender, recipient and note if isinstance(other, PaymentReceipt): return ( (self.amount == other.amount) and (self.sender == other.sender) and (self.recipient == other.recipient) and (self.note == other.note) ) return False def __repr__(self) -> str: msg = ( f"{new_line}" f"Amount: {self.amount}{new_line}" f"Note: {self.note}{new_line}" ) if self.timeout: return msg + "Timeout before txo confirmation" if isinstance(self.confirmation_timestamp, float): assert self.signal_timestamp msg = ( msg + f"Txo Confirmation Delta: {round(self.confirmation_timestamp - self.signal_timestamp,2)}{new_line}" ) return msg @dataclass class TestMessage: """ Represents a message object to or from a bot that roughly models the data fields signal messaging clients expect Attributes: recipient (str): recipient of the message message (Optional[str]): text content of the message group (Optional[str]): target group of the TestMessage endsession (bool): send command to reset session/keystate attachments (Optional[list[str]]): attachment list sender (Optional[str]): sender of the message payment (Optional[tuple[str, Optional[int]]]): payment recipient and amount of Mobilecoin to send to recipient """ recipient: str message: Optional[str] = None group: Optional[str] = None endsession: bool = False attachments: Optional[Union[list[dict[str, str]], list[str]]] = None sender: Optional[str] = None payment: Optional[tuple[str, Optional[int]]] = None @dataclass class TestStep: """ Configuration for an individual test message Attributes: uid (str): unique identifier for step description (str): step description message (str): message to send to bot being tested expected_response (Optional[TestMessage]): expected response from the bot being tested expected_receipt (Optional[PaymentReceipt]): expected payment in response to message from bot being tested delay (float): number of second to wait before executing TestStep """ uid: str description: str message: TestMessage expected_response: Optional[TestMessage] = None expected_receipt: Optional[PaymentReceipt] = None delay: float = 3.0 @dataclass class Test: """ Configuration for a multi-message test Attributes: name (str): unique name for the test description (str): description of the test recipient (str): signal formatted number steps (list[TestStep]): List of test step configurations order (str): Order in which to execute test steps timeout (float): Maximum time test is allowed to run step_timeout (float): Maximum time to wait for replies to sent messages payment_timeout (float): Maximum time to wait for Mobilecoin receipts validate_payments (bool): Require payments be confirmed on the MobileCoin blockchain for test to pass payment_validation_strategy (str): Strategy to validate payments. Since payments may not be confirmed in order, payments may be validated by "amount" which will strictly match payment amounts, by "notification_order" which will confirm payments based on order of signal notifications, or "confirmation_order" which will validate tests based on order of their confirmation on the Mobilecoin blockchain """ name: str description: str recipient: str steps: list[TestStep] order: str = "sequential" validate_responses: bool = True timeout: float = 360.0 step_timeout: float = 20.0 payment_timeout: float = 90.0 validate_payments: bool = True payment_validation_strategy: str = "amount" def __post_init__(self) -> None: if self.order not in ("sequential", "paralllel"): raise ValueError("Order must be either sequential or parallel") if self.payment_validation_strategy not in ( "amount", "notification_order", "confirmation_order", ): raise ValueError( "Payments must be validated by amount, notification_order, or confirmation_order" ) self.validate_self() def has_payments(self) -> bool: """ Determine if payments are within test definition Returns: bool: boolean representing existence of payments within test steps """ payment_info = self.validate_payment_tests() if payment_info.get("has_payments"): return True return False def validate_payment_tests(self) -> dict[str, bool]: """ Validates all necessary conditions for payment steps to be valid Returns: dict[str, bool]: dictionary of test attributes related to payment Raises: ValueError: if test payments configured incorrectly """ has_payments = False for step in self.steps: payment = step.message.payment if payment: has_payments = True if not ( isinstance(payment, tuple) and isinstance(payment[0], str) and isinstance(payment[1], int) ): raise ValueError( ( "Payment must be a tuple(recipient(str),amount(int))," " please check your test step initialization for errors" ) ) return {"has_payments": has_payments} def validate_self(self) -> None: """ Ensure test configuration is valid Raises: ValueError: if test configuration is invalid """ payment_info = self.validate_payment_tests() logging.info(f"test is valid, test config: {payment_info}") @dataclass class StepResult: """ Data structure representing a text or attachment reply to an individual message Attributes: uid (str): unique id of test step message_sent (Optional[TestMessage]): Message sent to bot being tested expected_reponse (Optional[TestMessage]): Expected message in response to message sent to bot actual_response (Optional[TestMessage]): Actual message received from bot result (Optional[str]): pass/fail status of step, should take on "pass" result if the expected response matches the actual response and "fail" otherwise python_timestamp (Optional[float]): time of message sent to auxin from Tiamat auxin_timestamp (Optional[float]): time auxin sends message to signal server send_delay (Optional[float]): Delay between python request to & auxin send confirmation response_timestamp (Optional[float]): time of message response roundtrip_delta (Optional[float]): total roundtrip time of message """ uid: Optional[str] = None message_sent: Optional[TestMessage] = None expected_response: Optional[TestMessage] = None actual_response: Optional[TestMessage] = None result: Optional[str] = None python_timestamp: Optional[float] = None auxin_timestamp: Optional[float] = None auxin_roundtrip_latency: Optional[float] = None send_delay: Optional[float] = None response_timestamp: Optional[float] = None roundtrip_delta: Optional[float] = None def __repr__(self) -> str: expected = self.expected_response.message if self.expected_response else "None" got = self.actual_response.message if self.actual_response else "None" return f"<expected: '{expected}'; got '{got}'>" @dataclass class TestResult: """ Container holding data of the result of a multi-step test. Attributes: test (Test): test definition that was used name (Optional[str]): name of test defintion test_account (str): signal account used to run the test step_results (list[StepResult]): list of StepResult objects containing data on results of individual steps payment_receipts (list[PaymentReceipt]): list of payment receipts received during test expected_receipts (list[PaymentReceipt]): list of receipts expected to be received during test result (str): pass/fail result on test, pass if all payments and messages match expected results, fail otherwise start_time (float): start time of test, -1 indicates value not recorded end_time (float): time of test completion or error, -1 indicates value not recorded runtime (float): elapsed time between start_time and endtime """ test: Test = field(repr=False) name: Optional[str] = None test_account: str = "tester" step_results: list[StepResult] = field(default_factory=list, repr=False) payment_receipts: list[PaymentReceipt] = field(default_factory=list) expected_receipts: list[tuple[TestStep, PaymentReceipt]] = field( default_factory=list ) result: str = "pre_initialization" start_time: float = -1.0 end_time: float = -1.0 runtime: float = -1.0 def __repr__(self) -> str: msg = ( f"Test: {self.test.name}{new_line}" f"Result: {self.result}{new_line}" f"Payments:{new_line}" f"expected receipts:{new_line}" f"{[receipt[1] for receipt in self.expected_receipts]}{new_line}" f"actual receipts:{new_line}" f"{self.payment_receipts}{new_line}" f"Runtime: {round(self.runtime, 2)} seconds" ) return msg def __post_init__(self) -> None: self.name = self.test.name for step in self.test.steps: if isinstance(step.expected_receipt, PaymentReceipt): self.expected_receipts.append((step, step.expected_receipt)) if self.test_account != "tester" and isinstance(self.test_account, str): self.set_recipient(self.test_account) def set_recipient(self, number: str) -> None: """ Sets recipient for TestResult object + any expected receipts Args: number (str): Number of the bot performing the test """ logging.info( f"Setting payment recipient as test orchestration account: {number}" ) self.test_account = number if self.expected_receipts: for pair in self.expected_receipts: receipt = pair[1] receipt.recipient = number def all_receipts_confirmed(self) -> bool: """ Determine if all receipts were confirmed on Mobilecoin blockchain Returns: bool: boolean determining if all payments were confirmed """ receipts = self.payment_receipts result = [bool(receipt.confirmation_timestamp) for receipt in receipts] if result: return all(result) return False def receipts_match(self, strategy: str) -> bool: """ Determine if payment receipts match expected receipts along 3 possible strategies ("amount", "notification_order", "confirmation_order") Args: strategy (str): strategy for payment confirmation. "amount"
<gh_stars>1-10 from argparse import ArgumentParser import airsimdroneracinglab as airsim # import keyboard import numpy as np import gym # import cv2 import pygame as pg from pathlib import Path from pyinstrument import Profiler from platform import system from collections import deque import pickle import os import copy import time np.set_printoptions(precision=2, suppress=True, sign=' ', floatmode='fixed') SYS_STR = system() print(f"INFO: detected system: {SYS_STR}") # pygame constants: PG_DISPLAY_WIDTH = 800 PG_DISPLAY_HEIGHT = 600 PG_GAME_FOLDER_PATH = Path("drone_racing_dataset_collector") PG_BACKGROUND_COLOR = (0, 0, 0) PG_IMG_BACKGROUND_COLOR = (255, 255, 255) PG_TEXT_COLOR = (255, 255, 255) PG_GAME_CAPTION = 'Drone Racing Dataset Collector' PG_TEXT_ANTIALIAS = True # joystick constants: DEFAULT_JS1_PITCH_AX = 0 DEFAULT_JS1_ROLL_AX = 1 DEFAULT_JS1_YAW_AX = 2 DEFAULT_JS1_Z_AX = 3 DEFAULT_JS2_PITCH_AX = 0 DEFAULT_JS2_ROLL_AX = 1 DEFAULT_JS2_YAW_AX = 2 DEFAULT_JS2_Z_AX = 3 DEFAULT_JS1_PITCH_GAIN = 0.5 DEFAULT_JS1_ROLL_GAIN = 0.5 DEFAULT_JS1_YAW_GAIN = -2.0 DEFAULT_JS1_Z_GAIN = -0.5 DEFAULT_JS2_PITCH_GAIN = 0.5 DEFAULT_JS2_ROLL_GAIN = 0.5 DEFAULT_JS2_YAW_GAIN = -2.0 DEFAULT_JS2_Z_GAIN = -0.5 JS_CONFIG_TIME = 5.0 JOYSTICKS_SETTINGS_FILE = PG_GAME_FOLDER_PATH / "js.obj" # airsim constants: DRONE1_NAME = 'drone_1' # this is the name of the first drone in the .json file DRONE2_NAME = 'drone_2' # this is the name of the second drone in the .json file DEFAULT_DRONE1_OFFSET = airsim.Vector3r(0.0, 0.0, 0.0) # this is the start position of the first drone in the .json file DEFAULT_DRONE2_OFFSET = airsim.Vector3r(0.0, 0.0, 0.0) # this is the start position of the second drone in the .json file DISPLAY_MODE = "NO_DISPLAY" # setting this to "WINDOWED_DISPLAY" will launch the arisim display alongside the game # environment constants: print(SYS_STR) TIME_STEP_METHOD = 'CONTINUE_FOR_TIME' if SYS_STR == 'Linux' else 'JOIN' # CONTINUE_FOR_TIME seems to break rendering on Windows IMG_OBS_NAME = 'front_camera_dc' DEFAULT_CONTROL_API = 'moveByRollPitchYawrateZAsync' DEFAULT_OBS_COORD = "all" # 'dc', 'ned', 'global' or 'all' DEFAULT_ACT_COORD = "ned" # 'dc', 'ned' or 'global' (!: check API support in the environment before changing this) DEFAULT_RF_CONFIG = { # parameters of the reward function 'constant_penalty': -1.0, # constant penalty per time-step 'collision_radius': 0.5, # at this distance from the opponen, the lagging drone dies (don't change for now, this is enforced by the airsim .pak file) 'velocity_gain': 10.0, # not real velocity: difference of distance to next objective between 2 get_reward() 'gate_crossed_reward': 100.0, 'gate_missed_penalty': -100.0, 'collision_penatly': -10, # collision with environment 'death_penalty': -500, # collision with opponent 'death_constant_penalty': 0.0, # after collision with opponent until the end of track (should be at least lower than constant summed penalty when lagging behind and not moving to avoid reward hacking) 'end_of_track_bonus': 100.0, # only when the last gate is crossed 'lag_penalty': -0.5, # constant additional penalty if not leading the way 'kill_reward': 50.0, 'gate_facing_reward_gain': 1.0 } DEFAULT_LEVEL = "Building99_Hard" DEFAULT_TIER = 0 DEFAULT_CLOCK_SPEED = 1.0 DEFAULT_IMG_WIDTH = 320 DEFAULT_IMG_HEIGHT = 240 DEFAULT_TIME_STEP_DURATION = 0.1 DEFAULT_EPISODE_DURATION = 100.0 DEFAULT_TIME_STOP = True DEFAULT_REAL_TIME = False DEFAULT_ACT_THREADING = False Z_TARGETS_START = {"Building99_Hard": 1.0, "Soccer_Field_Easy": 0.0, "Soccer_Field_Medium": 0.0, "ZhangJiaJie_Medium": 3.0} # keyboard constants: FORWARD1 = pg.K_y BACKWARD1 = pg.K_h RIGHT1 = pg.K_u LEFT1 = pg.K_t RIGHTYAW1 = pg.K_j LEFTYAW1 = pg.K_g UP1 = pg.K_o DOWN1 = pg.K_l FORWARD2 = pg.K_KP8 BACKWARD2 = pg.K_KP5 RIGHT2 = pg.K_KP9 LEFT2 = pg.K_KP7 RIGHTYAW2 = pg.K_KP6 LEFTYAW2 = pg.K_KP4 UP2 = pg.K_PAGEUP DOWN2 = pg.K_PAGEDOWN EXIT = pg.K_ESCAPE # exit game RESET = pg.K_q # discards and resets episode SAVE_EPISODES = pg.K_s # toggles dataset reccording CONFIG_JS = pg.K_c # calls joystick configuration # dataset constants: DATASET_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'drone_racing_dataset_collector', 'dataset') DEFAULT_PLAYER_1 = 'player_1' DEFAULT_PLAYER_2 = 'player_2' # print options: CHAR_PER_OBS = 40 PRINT_TRANSITIONS = False # True for debugging (prints all the observations in terminal) # others: DFAULT_PROFILER = False # True to profile the code with PyInstrument by default # these function are only for printing transitions in terminal: def quaternion_to_euler(q): x_val = q[0] y_val = q[1] z_val = q[2] w_val = q[3] # roll (x-axis rotation) sinr_cosp = 2 * (w_val * x_val + y_val * z_val) cosr_cosp = 1 - 2 * (x_val * x_val + y_val * y_val) roll = np.arctan2(sinr_cosp, cosr_cosp) # pitch (y-axis rotation) sinp = 2 * (w_val * y_val - z_val * x_val) if np.abs(sinp) >= 1: pitch = np.copysign(np.pi / 2, sinp) # use 90 degrees if out of range else: pitch = np.arcsin(sinp) # yaw (z-axis rotation) siny_cosp = 2 * (w_val * z_val + x_val * y_val) cosy_cosp = 1 - 2 * (y_val * y_val + z_val * z_val) yaw = np.arctan2(siny_cosp, cosy_cosp) return np.array([roll, pitch, yaw]) def print_obs_n(obs_n, rew_n, done_n, convert_quat=True): """ !: this will only work with less than 9 agents if convert_quat==True, this shows the corresponding euler angles of the quaternions """ d = {} nb_agents = len(obs_n) for agent in range(nb_agents): for key, item in obs_n[agent].items(): words = key.split('_') letters = [word[0] for word in words] + [f'{agent}'] new_key = "".join(letters) if not convert_quat: d[new_key] = item else: d[new_key] = quaternion_to_euler(item) if "orientation" in key else item for agent in range(nb_agents): d[f"_r{agent}"] = np.array([rew_n[agent]]) d[f"_d{agent}"] = np.array([done_n[agent]]) lks = list(d.keys()) lks.sort() print("---") for k in lks: if 'fc' in k: # front_camera str = f"{k}:{d[k].shape}" else: str = f"{k}:{d[k]}" idx = int(k[-1]) end = "\n" if idx == nb_agents - 1 else " " * (CHAR_PER_OBS - len(str)) print(str, end=end) # These function handle joystick configuration: class JoysticksSettings(object): def __init__(self): self.JS1_PITCH_GAIN = DEFAULT_JS1_PITCH_GAIN self.JS1_ROLL_GAIN = DEFAULT_JS1_ROLL_GAIN self.JS1_YAW_GAIN = DEFAULT_JS1_YAW_GAIN self.JS1_Z_GAIN = DEFAULT_JS1_Z_GAIN self.JS1_PITCH_AX = DEFAULT_JS1_PITCH_AX self.JS1_ROLL_AX = DEFAULT_JS1_ROLL_AX self.JS1_YAW_AX = DEFAULT_JS1_YAW_AX self.JS1_Z_AX = DEFAULT_JS1_Z_AX self.JS2_PITCH_GAIN = DEFAULT_JS2_PITCH_GAIN self.JS2_ROLL_GAIN = DEFAULT_JS2_ROLL_GAIN self.JS2_YAW_GAIN = DEFAULT_JS2_YAW_GAIN self.JS2_Z_GAIN = DEFAULT_JS2_Z_GAIN self.JS2_PITCH_AX = DEFAULT_JS2_PITCH_AX self.JS2_ROLL_AX = DEFAULT_JS2_ROLL_AX self.JS2_YAW_AX = DEFAULT_JS2_YAW_AX self.JS2_Z_AX = DEFAULT_JS2_Z_AX def dump_joysticks_settings(jset): with open(JOYSTICKS_SETTINGS_FILE, 'wb') as f: pickle.dump(jset, f) def load_joysticks_settings(): if JOYSTICKS_SETTINGS_FILE.exists(): print(f"Loading joystick settings from {JOYSTICKS_SETTINGS_FILE}") with open(JOYSTICKS_SETTINGS_FILE, 'rb') as f: jset = pickle.load(f) else: print(f"No setting file found. Creating a new one at {JOYSTICKS_SETTINGS_FILE}") jset = JoysticksSettings() dump_joysticks_settings(jset) return jset def configure_joysticks_settings(jset, js, eps=0.05): for i, j in enumerate(js): na = j.get_numaxes() axvals_neutral = np.array([0.0, ] * na) axvals = np.array([0.0, ] * na) axvals_dif = np.array([0.0, ] * na) print(f"--- Now configuring joystick {i} ---") print(f"Number of detected axis: {na}") time.sleep(JS_CONFIG_TIME) print(f"JOYSTICK {i}: Leave all the axis untouched:") time.sleep(JS_CONFIG_TIME) for _ in range(10): time.sleep(JS_CONFIG_TIME / 10) for event in pg.event.get(): if event.type == pg.QUIT: break for ax in range(na): axvals_neutral[ax] = j.get_axis(ax) print(f"JOYSTICK {i}: detected values: {axvals_neutral}") print(f"JOYSTICK {i}: neutral values: {axvals_neutral}") time.sleep(JS_CONFIG_TIME) print(f"JOYSTICK {i}: HOLD FULL PITCH (forward):") time.sleep(JS_CONFIG_TIME) for _ in range(10): time.sleep(JS_CONFIG_TIME / 10) for event in pg.event.get(): if event.type == pg.QUIT: break for ax in range(na): axvals[ax] = j.get_axis(ax) print(f"JOYSTICK {i}: detected values: {axvals}") axvals_dif = axvals - axvals_neutral argmax_ax = np.argmax(np.abs(axvals_dif)) delta = axvals_dif[argmax_ax] print(f"JOYSTICK {i}: selected PITCH axis:{argmax_ax} with a delta of {delta}") if np.abs(delta) <= eps: print("ERROR: delta too small.") time.sleep(JS_CONFIG_TIME) break if i == 0: jset.JS1_PITCH_AX = argmax_ax jset.JS1_PITCH_GAIN = np.sign(delta) * DEFAULT_JS1_PITCH_GAIN else: jset.JS2_PITCH_AX = argmax_ax jset.JS2_PITCH_GAIN = np.sign(delta) * DEFAULT_JS2_PITCH_GAIN time.sleep(JS_CONFIG_TIME) print(f"JOYSTICK {i}: HOLD FULL THROTTLE (up) for {JS_CONFIG_TIME} seconds:") time.sleep(JS_CONFIG_TIME) for _ in range(10): time.sleep(JS_CONFIG_TIME / 10) for event in pg.event.get(): if event.type == pg.QUIT: break for ax in range(na): axvals[ax] = j.get_axis(ax) print(f"JOYSTICK {i}: detected values: {axvals}") axvals_dif = axvals - axvals_neutral argmax_ax = np.argmax(np.abs(axvals_dif)) delta = axvals_dif[argmax_ax] print(f"JOYSTICK {i}: selected THROTTLE axis:{argmax_ax} with a delta of {delta}") if np.abs(delta) <= eps: print("ERROR: delta too small.") time.sleep(JS_CONFIG_TIME) break if i == 0: jset.JS1_Z_AX = argmax_ax jset.JS1_Z_GAIN = np.sign(delta) * DEFAULT_JS1_Z_GAIN else: jset.JS2_Z_AX = argmax_ax jset.JS2_Z_GAIN = np.sign(delta) * DEFAULT_JS2_Z_GAIN time.sleep(JS_CONFIG_TIME) print(f"JOYSTICK {i}: HOLD FULL ROLL (right) for {JS_CONFIG_TIME} seconds:") time.sleep(JS_CONFIG_TIME) for _ in range(10): time.sleep(JS_CONFIG_TIME / 10) for event in pg.event.get(): if event.type == pg.QUIT: break for ax in range(na): axvals[ax] = j.get_axis(ax) print(f"JOYSTICK {i}: detected values: {axvals}") axvals_dif = axvals - axvals_neutral argmax_ax = np.argmax(np.abs(axvals_dif)) delta = axvals_dif[argmax_ax] print(f"JOYSTICK {i}: selected ROLL axis:{argmax_ax} with a delta of {delta}") if np.abs(delta) <= eps: print("ERROR: delta too small.") time.sleep(JS_CONFIG_TIME) break if i == 0: jset.JS1_ROLL_AX = argmax_ax jset.JS1_ROLL_GAIN = np.sign(delta) * DEFAULT_JS1_ROLL_GAIN else: jset.JS2_ROLL_AX = argmax_ax jset.JS2_ROLL_GAIN = np.sign(delta) * DEFAULT_JS2_ROLL_GAIN time.sleep(JS_CONFIG_TIME) print(f"JOYSTICK {i}: HOLD FULL YAW (look right) for {JS_CONFIG_TIME} seconds:") time.sleep(JS_CONFIG_TIME) for _ in range(10): time.sleep(JS_CONFIG_TIME / 10) for event in pg.event.get(): if event.type == pg.QUIT: break for ax in range(na): axvals[ax] = j.get_axis(ax) print(f"JOYSTICK {i}: detected values: {axvals}") axvals_dif = axvals - axvals_neutral argmax_ax = np.argmax(np.abs(axvals_dif)) delta = axvals_dif[argmax_ax] print(f"JOYSTICK {i}: selected YAW axis:{argmax_ax} with a delta of {delta}") if np.abs(delta) <= eps: print("ERROR: delta too small.") time.sleep(JS_CONFIG_TIME) break if i == 0: jset.JS1_YAW_AX = argmax_ax jset.JS1_YAW_GAIN = np.sign(delta) * DEFAULT_JS1_YAW_GAIN else: jset.JS2_YAW_AX
<reponame>flowerah/PythoMS """ IGNORE: CHANGELOG: - ---2.7 building to add: try to extract timepoints and tic from chromatogramList (x values are sorted, so this probably won't work) IGNORE """ import sys import os import zlib import gzip import base64 import struct import subprocess import xml.dom.minidom import scipy as sci from random import random from .progress import Progress from .spectrum import Spectrum from .psims import CVParameterSet, stringtodigit from .tome import resolution, locate_in_list, trimspectrum # decoding formats for decoding mzML binary data array strings decode_formats = { 'MS:1000519': ['<', 'i'], # signed 32-bit little-endian integer # 'MS:1000520':['',''], # [OBSOLETE] Signed 16-bit float 'MS:1000521': ['<', 'f'], # 32-bit precision little-endian floating point conforming to IEEE-754 'MS:1000522': ['<', 'l'], # Signed 64-bit little-endian integer 'MS:1000523': ['<', 'd'], # 64-bit precision little-endian floating point conforming to IEEE-754. } class BoundsError(Warning): """A warning class to handle bounds errors when integrating (used only by PyRSIR)""" def __init__(self): self.warned = {} def printwarns(self): """prints the number of warnings if merited""" if len(self.warned) > 0: sys.stdout.write('The following peaks exceeded the bounds of the spectrum n number of times:\n') for name in self.warned: sys.stdout.write('"%s": %d\n' % (name, self.warned[name])) def warn(self, name, intstart, intend, mzstart, mzend): """warns the user if there was a mismatch""" if name not in self.warned: sys.stdout.write( '\nThe peak "%s" (%s-%s) is outside of the bounds of the spectrum being summed m/z %.1f-%.1f\n' % ( name, str(intstart), str(intend), mzstart, mzend)) self.warned[name] = 1 else: self.warned[name] += 1 def branch_attributes(branch: xml.dom.minidom.Element): """ Pulls all the attributes of an xml.dom.minidom xml branch. These are generally things like index, id, etc. :param xml.dom.minidom branch: An xml.dom.minidom object. :return: A dictionary of attributes with each key being the attribute name and its value being the value of that attribute. :rtype: dict **Notes** The script will attempt to convert any values to float or integer in order to reduce TypeErrors when trying to use the extracted values. """ return {key: stringtodigit(val) for key, val in branch.attributes.items()} def branch_cvparams(branch): """ Interprets an xml branch as CVParams :param branch: :return: controlled value parameter set with values :rtype: CVParameterSet """ out = {} for cvParam in branch.getElementsByTagName('cvParam'): acc = cvParam.getAttribute('accession') # accession key out[acc] = {} for attribute, value in cvParam.attributes.items(): # pull all the attributes if attribute != 'accession': # attempt to convert to integer or float, keep as string otherwise out[acc][attribute] = stringtodigit(value) return CVParameterSet(**out) def file_present(filepath): """checks for the presence of the specified file or directory in the current working directory""" tf = os.path.isfile(filepath) # look for file first if tf is False: # if file cannot be found, look for directory tf = os.path.isdir(filepath) return tf def decodeformat(p: CVParameterSet, speclen: int): """ Determines the decode format from the accession parameter :param p: extracted CVParamterSet of the data array :param speclen: length of the spectrum (retrievable from the XML file) :return: decode format :rtype: str """ for key in set(decode_formats) & p.keys(): # find the set combination of the possibilities return f'{decode_formats[key][0]}{speclen}{decode_formats[key][1]}' # create the decode format def gettext(nodelist): """gets text from a simple XML object""" rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return ''.join(rc) def extract_spectrum(spectrum: xml.dom.minidom.Element, units: bool = False): """ Extracts and converts binary data to two lists. :param spectrum: A spectrum branch element. This element is expected to have two child nodes containing binaryDataArrays. :param units: whether to extract the units from the spectrum :return: """ """pulls and converts binary data to a list""" # spectrum length (defined in the spectrum attricubes) speclen = int(spectrum.getAttribute('defaultArrayLength')) out = [] if units is True: units = [] for binary in spectrum.getElementsByTagName('binaryDataArray'): p = branch_cvparams(binary) # grab cvparameters # determine whether the binary string is zlib compressed compressed = True if 'MS:1000574' in p else False # determine unpack format unpack_format = decodeformat(p, speclen) # pull the binary string string = gettext(binary.getElementsByTagName('binary')[0].childNodes) # decode the string decoded = base64.standard_b64decode(string) # if the string is compressed, decompress if compressed is True: decoded = zlib.decompress(decoded) # unpack the string out.append(list(struct.unpack(unpack_format, decoded))) if units is not False: for cv in p: if cv.unit is not None: units.append(cv.unit) break if units is not False: # extends the units onto out out.extend(units) return out def pw_convert(filename, bit=64, compression=True, gzip=True, verbose=True): """ Runs msconvert.exe from ProteoWizard to convert Waters .RAW format to .mzXML which can then be parsed by python. module requirements: os, subprocess, sys ProteoWizard must be installed for this script to function. go to http://proteowizard.sourceforge.net/downloads.shtml to download This script assumes that the ProteoWizard is installed under either c:\program files\proteowizard or c:\program files (x86)\proteowizard If you use this python script to convert to mzML, you should cite the paper of the folks who wrote the program Chambers, M.C. Nature Biotechnology 2012, 30, 918-920 doi 10.1038/nbt.2377 """ def find_all(fname, path): """ Finds all files of a given name within a specified directory. Adapted from http://stackoverflow.com/questions/1724693/find-a-file-in-python Module dependancies: os """ locations = [] for root, dirs, files in os.walk(path): if fname in files: locations.append(os.path.join(root, fname)) return locations if sys.platform != 'win32': raise OSError( 'The function that converts to mzML is limited to Windows operating systems.\n' 'You can manually convert to *.mzML using the proteowizard standalone package ' 'and supply that mzML file to this script') locs = [] for val in ['c:\\program files\\proteowizard', 'c:\\program files (x86)\\proteowizard']: # searches for msconvert.exe in expected folders locs.extend(find_all('msconvert.exe', val)) if len(locs) == 0: # if script cannot find msconvert.exe raise IOError( 'The python script could not find msconvert.exe\n' 'Please ensure that ProteoWizard is installed in either:\n' 'c:\\program files\\proteowizard\nor\nc:\\program files (x86)\\proteowizard') outname = filename[:-4] + '.mzML' callstring = locs[-1] + ' "' + filename + '" --mzML' if bit in [32, 64]: callstring += ' --' + str(bit) else: raise ValueError( 'ProteoWizard conversion was called with an invalid floating point precision "%s".' % str(bit)) if compression is True: # call for compression callstring += ' --zlib' exten = '*.mzML' if gzip is True: # call to gzip entire mzml callstring += ' --gzip' outname += '.gz' exten += '.gz' print('callstring', callstring) if verbose is True: callstring += ' --verbose' sys.stdout.write('Generating %s file from %s' % (exten, filename)) sys.stdout.flush() subprocess.call(callstring) sys.stdout.write(' DONE\n') sys.stdout.flush() else: subprocess.call(callstring) return outname def fix_extension(fn): """tries to fix invalid file extensions""" oopsx = {'.mzm': 'l', '.mz': 'ml', '.m': 'zml', '.': 'mzml'} # incomplete mzml extensions oopsr = {'.ra': 'w', '.r': 'aw', '.': 'raw'} # incomplete raw extionsions oopsg = {'.mzml.g': 'z', '.mzml.': 'gz', '.mzml': '.gz', '.mzm': 'l.gz', '.mz': 'ml.gz', '.m': 'zml.gz', '.': 'mzml.gz'} # incomplete gz extensions # looks for missing extensions first if file_present(fn + '.mzml.gz') is True: return fn + '.mzml.gz' if file_present(fn + '.mzml') is True: return fn + '.mzml' for key in oopsg: # tries to complete mzml.gz shortenings if fn.lower().endswith(key) is True: if file_present(fn + oopsg[key]) is True: return fn + oopsg[key] for key in oopsx: # tries to complete mzml shortenings if fn.lower().endswith(key) is True: if file_present(fn + oopsx[key]) is True: return fn + oopsx[key] for key in oopsr: # tries to complete raw shortenings if fn.lower().endswith(key) is True: if file_present(fn + oopsr[key]) is True: return fn + oopsr[key] if file_present(fn + '.raw') is True: # finally looks for raw file return fn + '.raw' raise FileNotFoundError(f'The file {fn} could not be located in the current working directory') def fps(branch): """ extracts function #, process #, and scan # from the idstring of a spectrum branch returns function, process, scan as integers """ idstring = branch.getAttribute('id').split() # pull id string from scan attribute return [int(x.split('=')[1]) for x in idstring] # return each value after converting to integer def scan_properties(hand): """determines the scan properties of the provided spectrum""" mstypes = { # ms accession keys and their respective names (for spectrum identification) 'MS:1000928': 'calibration spectrum', 'MS:1000294': 'mass spectrum', 'MS:1000322': 'charge inversion mass spectrum', 'MS:1000325': 'constant neutral gain spectrum', 'MS:1000326': 'constant neutral loss spectrum', 'MS:1000328': 'e/2 mass spectrum', 'MS:1000341': 'precursor ion spectrum', 'MS:1000343': 'product ion spectrum', 'MS:1000579': 'MS1 spectrum', 'MS:1000580': 'MSn spectrum', 'MS:1000581': 'CRM spectrum', 'MS:1000582': 'SIM spectrum', 'MS:1000583':
# execute essa linha de código para importar as funções do 'plot_helper' import numpy from numpy.linalg import inv, eig from math import ceil from matplotlib import pyplot, ticker, get_backend, rc from mpl_toolkits.mplot3d import Axes3D from itertools import cycle # interactive backends _int_backends = ['GTK3Agg', 'GTK3Cairo', 'MacOSX', 'nbAgg', 'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo', 'TkAgg', 'TkCairo', 'WebAgg', 'WX', 'WXAgg', 'WXCairo'] _backend = get_backend() # get current backend name # shrink figsize and fontsize when using %matplotlib notebook if _backend in _int_backends: fontsize = 4 fig_scale = 0.75 else: fontsize = 5 fig_scale = 1 grey = '#808080' gold = '#cab18c' # x-axis grid lightblue = '#0096d6' # y-axis grid green = '#008367' # x-axis basis vector red = '#E31937' # y-axis basis vector darkblue = '#004065' pink, yellow, orange, purple, brown = '#ef7b9d', '#fbd349', '#ffa500', '#a35cff', '#731d1d' quiver_params = {'angles': 'xy', 'scale_units': 'xy', 'scale': 1, 'width': 0.012} grid_params = {'linewidth': 0.5, 'alpha': 0.8} def set_rc(func): def wrapper(*args, **kwargs): rc('font', family='serif', size=fontsize) rc('figure', dpi=200) rc('axes', axisbelow=True, titlesize=5) rc('lines', linewidth=1) func(*args, **kwargs) return wrapper @set_rc def plot_vector(vectors, tails=None): ''' Draw 2d vectors based on the values of the vectors and the position of their tails. Parameters ---------- vectors : list. List of 2-element array-like structures, each represents a 2d vector. tails : list, optional. List of 2-element array-like structures, each represents the coordinates of the tail of the corresponding vector in vectors. If None (default), all tails are set at the origin (0,0). If len(tails) is 1, all tails are set at the same position. Otherwise, vectors and tails must have the same length. Examples -------- >>> v = [(1, 3), (3, 3), (4, 6)] >>> plot_vector(v) # draw 3 vectors with their tails at origin >>> t = [numpy.array((2, 2))] >>> plot_vector(v, t) # draw 3 vectors with their tails at (2,2) >>> t = [[3, 2], [-1, -2], [3, 5]] >>> plot_vector(v, t) # draw 3 vectors with 3 different tails ''' vectors = numpy.array(vectors) assert vectors.shape[1] == 2, "Each vector should have 2 elements." if tails is not None: tails = numpy.array(tails) assert tails.shape[1] == 2, "Each tail should have 2 elements." else: tails = numpy.zeros_like(vectors) # tile vectors or tails array if needed nvectors = vectors.shape[0] ntails = tails.shape[0] if nvectors == 1 and ntails > 1: vectors = numpy.tile(vectors, (ntails, 1)) elif ntails == 1 and nvectors > 1: tails = numpy.tile(tails, (nvectors, 1)) else: assert tails.shape == vectors.shape, "vectors and tail must have a same shape" # calculate xlimit & ylimit heads = tails + vectors limit = numpy.max(numpy.abs(numpy.hstack((tails, heads)))) limit = numpy.ceil(limit * 1.2) # add some margins figsize = numpy.array([2,2]) * fig_scale figure, axis = pyplot.subplots(figsize=figsize) axis.quiver(tails[:,0], tails[:,1], vectors[:,0], vectors[:,1], color=darkblue, angles='xy', scale_units='xy', scale=1) axis.set_xlim([-limit, limit]) axis.set_ylim([-limit, limit]) axis.set_aspect('equal') # if xticks and yticks of grid do not match, choose the finer one xticks = axis.get_xticks() yticks = axis.get_yticks() dx = xticks[1] - xticks[0] dy = yticks[1] - yticks[0] base = max(int(min(dx, dy)), 1) # grid interval is always an integer loc = ticker.MultipleLocator(base=base) axis.xaxis.set_major_locator(loc) axis.yaxis.set_major_locator(loc) axis.grid(True, **grid_params) # show x-y axis in the center, hide frames axis.spines['left'].set_position('center') axis.spines['bottom'].set_position('center') axis.spines['right'].set_color('none') axis.spines['top'].set_color('none') @set_rc def plot_transformation_helper(axis, matrix, *vectors, unit_vector=True, unit_circle=False, title=None): """ A helper function to plot the linear transformation defined by a 2x2 matrix. Parameters ---------- axis : class matplotlib.axes.Axes. The axes to plot on. matrix : class numpy.ndarray. The 2x2 matrix to visualize. *vectors : class numpy.ndarray. The vector(s) to plot along with the linear transformation. Each array denotes a vector's coordinates before the transformation and must have a shape of (2,). Accept any number of vectors. unit_vector : bool, optional. Whether to plot unit vectors of the standard basis, default to True. unit_circle: bool, optional. Whether to plot unit circle, default to False. title: str, optional. Title of the plot. """ assert matrix.shape == (2,2), "the input matrix must have a shape of (2,2)" grid_range = 20 x = numpy.arange(-grid_range, grid_range+1) X_, Y_ = numpy.meshgrid(x,x) I = matrix[:,0] J = matrix[:,1] X = I[0]*X_ + J[0]*Y_ Y = I[1]*X_ + J[1]*Y_ origin = numpy.zeros(1) # draw grid lines for i in range(x.size): axis.plot(X[i,:], Y[i,:], c=gold, **grid_params) axis.plot(X[:,i], Y[:,i], c=lightblue, **grid_params) # draw (transformed) unit vectors if unit_vector: axis.quiver(origin, origin, [I[0]], [I[1]], color=green, **quiver_params) axis.quiver(origin, origin, [J[0]], [J[1]], color=red, **quiver_params) # draw optional vectors color_cycle = cycle([pink, darkblue, orange, purple, brown]) if vectors: for vector in vectors: color = next(color_cycle) vector_ = matrix @ vector.reshape(-1,1) axis.quiver(origin, origin, [vector_[0]], [vector_[1]], color=color, **quiver_params) # draw optional unit circle if unit_circle: alpha = numpy.linspace(0, 2*numpy.pi, 41) circle = numpy.vstack((numpy.cos(alpha), numpy.sin(alpha))) circle_trans = matrix @ circle axis.plot(circle_trans[0], circle_trans[1], color=red, lw=0.8) # hide frames, set xlimit & ylimit, set title limit = 4 axis.spines['left'].set_position('center') axis.spines['bottom'].set_position('center') axis.spines['left'].set_linewidth(0.3) axis.spines['bottom'].set_linewidth(0.3) axis.spines['right'].set_color('none') axis.spines['top'].set_color('none') axis.set_xlim([-limit, limit]) axis.set_ylim([-limit, limit]) if title is not None: axis.set_title(title) @set_rc def plot_linear_transformation(matrix, *vectors, unit_vector=True, unit_circle=False): """ Plot the linear transformation defined by a 2x2 matrix using the helper function plot_transformation_helper(). It will create 2 subplots to visualize some vectors before and after the transformation. Parameters ---------- matrix : class numpy.ndarray. The 2x2 matrix to visualize. *vectors : class numpy.ndarray. The vector(s) to plot along with the linear transformation. Each array denotes a vector's coordinates before the transformation and must have a shape of (2,). Accept any number of vectors. unit_vector : bool, optional. Whether to plot unit vectors of the standard basis, default to True. unit_circle: bool, optional. Whether to plot unit circle, default to False. """ figsize = numpy.array([4,2]) * fig_scale figure, (axis1, axis2) = pyplot.subplots(1, 2, figsize=figsize) plot_transformation_helper(axis1, numpy.identity(2), *vectors, unit_vector=unit_vector, unit_circle=unit_circle, title='Antes da Transformação') plot_transformation_helper(axis2, matrix, *vectors, unit_vector=unit_vector, unit_circle=unit_circle, title='Depois da Transformação') @set_rc def plot_linear_transformations(*matrices, unit_vector=True, unit_circle=False): """ Plot the linear transformation defined by a sequence of n 2x2 matrices using the helper function plot_transformation_helper(). It will create n+1 subplots to visualize some vectors before and after each transformation. Parameters ---------- *matrices : class numpy.ndarray. The 2x2 matrices to visualize. Accept any number of matrices. unit_vector : bool, optional. Whether to plot unit vectors of the standard basis, default to True. unit_circle: bool, optional. Whether to plot unit circle, default to False. """ nplots = len(matrices) + 1 nx = 2 ny = ceil(nplots/nx) figsize = numpy.array([2*nx, 2*ny]) * fig_scale figure, axes = pyplot.subplots(nx, ny, figsize=figsize) for i in range(nplots): # fig_idx if i == 0: matrix_trans = numpy.identity(2) title = 'Antes da Transformação' else: matrix_trans = matrices[i-1] @ matrix_trans if i == 1: title = 'Depois de {} Trasnformação'.format(i) else: title = 'Depois de {} Transformações'.format(i) plot_transformation_helper(axes[i//nx, i%nx], matrix_trans, unit_vector=unit_vector, unit_circle=unit_circle, title=title) # hide axes of the extra subplot (only when nplots is an odd number) if nx*ny > nplots: axes[-1,-1].axis('off') @set_rc def plot_3d_transformation_helper(axis, matrix, grid=True, unit_sphere=False, title=None): """ A helper function to plot the linear transformation defined by a 3x3 matrix. Parameters ---------- axis : class matplotlib.axes.Axes. The axes to plot on. matrix : class numpy.ndarray. The 3x3 matrix to visualize. grid : bool, optional. Whether to plot 3d grid lines, default to True. unit_sphere : bool, optional. Whether to plot unit sphere, default to False. title : str, optional. Title of the plot. """ assert matrix.shape == (3,3), "the input matrix must have a shape of (3,3)" xcolor, ycolor, zcolor = '#0084b6', '#d8a322', '#FF3333' linewidth = 0.7 if grid: grid_range = 2 x = numpy.arange(-grid_range, grid_range+1) X, Y, Z = numpy.meshgrid(x,x,x) X_new = matrix[0,0]*X + matrix[0,1]*Y + matrix[0,2]*Z Y_new = matrix[1,0]*X + matrix[1,1]*Y + matrix[1,2]*Z Z_new = matrix[2,0]*X + matrix[2,1]*Y + matrix[2,2]*Z for i in range(x.size): for j in range(x.size): axis.plot(X_new[:,i,j], Y_new[:,i,j], Z_new[:,i,j], color=xcolor, linewidth=linewidth) axis.plot(X_new[i,:,j], Y_new[i,:,j], Z_new[i,:,j], color=ycolor, linewidth=linewidth) axis.plot(X_new[i,j,:], Y_new[i,j,:], Z_new[i,j,:], color=zcolor, linewidth=linewidth) if unit_sphere: u = numpy.linspace(0, 2 * numpy.pi, 100) v = numpy.linspace(0, numpy.pi, 100) X = 1 * numpy.outer(numpy.cos(u), numpy.sin(v)) Y = 1 * numpy.outer(numpy.sin(u), numpy.sin(v)) Z = 1 * numpy.outer(numpy.ones(numpy.size(u)), numpy.cos(v)) X_new =
""" Return container object, with each array entry in the container cast to a list """ def to_list(x, _=''): try: return self._ivy.to_list(x) except (AttributeError, ValueError): return x return self.map(to_list) def reshape_like(self, target_dict, leading_shape=None, return_cont=None): """ Set shapes of container entries to shapes specified by new container with the same key structure :return: new container with values of updated shapes """ leading_shape = self._ivy.default(leading_shape, list()) if return_cont is None: return_cont = self.copy() for (_, v_shape), (k, v) in zip(target_dict.items(), return_cont.items()): if isinstance(v_shape, dict): return_cont[k] = self.reshape_like(v_shape, leading_shape, return_cont[k]) else: return_cont[k] = self._ivy.reshape(v, leading_shape + list(v_shape)) return Container(return_cont, **self._config) def create_if_absent(self, key, value, inplace=True): """ Add a key to the container with corresponding value, if it is not already present. otherwise, do nothing. """ if key in self: return self.set_at_key_chain(key, value, inplace) def if_exists(self, key): """ Returns the sub-container at the following key if it exists, otherwise None. """ try: return self[key] except KeyError: return def try_kc(self, key): """ Tries the following key or key chain, returning self if not present. """ try: return self[key] except KeyError: return self def with_print_limit(self, print_limit): return Container(self, **{**self._config, **{'print_limit': print_limit, 'rebuild_child_containers': True}}) # noinspection PyTypeChecker def remove_print_limit(self): return self.with_print_limit(None) def with_print_indent(self, print_indent): return Container(self, **{**self._config, **{'print_indent': print_indent, 'rebuild_child_containers': True}}) def with_print_line_spacing(self, print_line_spacing): return Container(self, **{**self._config, **{'print_line_spacing': print_line_spacing, 'rebuild_child_containers': True}}) # Built-ins # # ----------# def __repr__(self, as_repr=True): indent_str = ' '*self._print_indent def _align_array(array_str_in): array_str_in_split = array_str_in.split('([') leading_str_to_keep = array_str_in_split[0].replace('\\n', '') indented_key_size = len(leading_str_to_keep.replace('"', '').split(': ')[0]) indented_key_str = ' '*(indented_key_size+2) padded = False def _pre_pad_alpha_line(str_in): nonlocal padded padded = True return '\\n' + indent_str + indented_key_str + str_in leading_str_to_keep = ', '.join([_pre_pad_alpha_line(s) if s[0].isalpha() and i != 0 else s for i, s in enumerate(leading_str_to_keep.split(', '))]) local_indent_str = '' if padded else indent_str leading_str = leading_str_to_keep.split('\\n')[-1].replace('"', '') remaining_str = array_str_in_split[1] num_extra_dims = 0 for i, char in enumerate(remaining_str): if char != '[': num_extra_dims = i break extra_indent = (len(leading_str) + 1 + num_extra_dims) * ' ' array_str_in = '(['.join([leading_str_to_keep, remaining_str]) uniform_indent_wo_overflow = array_str_in.replace('\\n[', '\n' + local_indent_str + extra_indent + '[') uniform_indent = '\n'.join([local_indent_str + extra_indent + ' ' + s if (s[0].isnumeric() or s[0] == '-' or s[0:3] == '...' or max([ss in s[0:6] for ss in ['nan, ', 'inf, ']])) else (indent_str + indented_key_str + s if (not s[0].isspace() and s[0] != '"') else s) for s in uniform_indent_wo_overflow.split('\\n')]) indented = uniform_indent # 10 dimensions is a sensible upper bound for the number in a single array for i in range(2, 10): indented = indented.replace(' '*(i-1) + '['*i, '['*i) indented = '\n'.join([s for s in indented.split('\n') if bool(s) and not s.isspace()]) return indented def _align_arrays(str_in): chunks = str_in.split('\n' + indent_str) aligned_array_chunks = {i: _align_array(c) for i, c in enumerate(chunks) if '\\n' in c} chunks = [aligned_array_chunks[i] if i in aligned_array_chunks else c_orig for i, c_orig in enumerate(chunks)] return ('\n' + indent_str).join(chunks) new_dict = dict() for k, v in self.items(): if isinstance(v, Container): # noinspection PyArgumentList rep = v.__repr__(as_repr=False) else: if self._ivy.is_array(v) and len(list(v.shape)) > 0 and _ivy.exists(self._print_limit) and \ _reduce(_mul, v.shape) > self._print_limit: rep = (type(v), "shape=", list(v.shape)) elif isinstance(v, (list, tuple)) and v and self._ivy.is_array(v[0]): rep = ("list[{}]".format(len(v)), type(v[0]), "shape=", list(v[0].shape)) else: rep = v new_dict[k] = rep if as_repr: json_dumped_str = _align_arrays(_json.dumps( Container(new_dict, **self._config).map( lambda x, kc: x if _is_jsonable(x) else _repr(x).replace(' ', '').replace(',', ', ')).to_dict(), indent=self._print_indent)) def _add_newline(str_in): str_in_split = str_in.split('\n') str_split_size = len(str_in_split) return '\n'.join([('\n'*self._print_line_spacing + ss) if i == (str_split_size-1) else ss for i, ss in enumerate(str_in_split)]) json_dumped_str = '":'.join([_add_newline(s) for s in json_dumped_str.split('":')]) # improve tf formatting if _ivy.framework_stack and _ivy.current_framework_str() == 'tensorflow': json_dumped_str_split = json_dumped_str.split("\'Variable:") json_dumped_str = json_dumped_str_split[0] + ', ' + ', '.join(["\'".join(ss.split("\'")[1:]) for ss in json_dumped_str_split[1:]]) json_dumped_str = json_dumped_str.replace(':shape', ', shape').replace(')dtype=', '), dtype=').replace( ', ),', ',),') # make keys green json_dumped_str_split = json_dumped_str.split('":') split_size = len(json_dumped_str_split) json_dumped_str =\ '":'.join([' "'.join(sub_str.split(' "')[:-1] + [termcolor.colored(sub_str.split(' "')[-1], 'green')]) if i < split_size - 1 else sub_str for i, sub_str in enumerate(json_dumped_str_split)]) # remove quotation marks, shape tuple, and color other elements of the dict ret = json_dumped_str.replace('"', '').replace(", 'shape=', [", " shape=[").replace( ':', termcolor.colored(':', 'magenta')).replace('{', termcolor.colored('{', 'blue')).replace( '}', termcolor.colored('}', 'blue')).replace('shape=', termcolor.colored('shape=', 'magenta')).replace( 'device=', termcolor.colored('device=', 'magenta')).replace("<class'", "<class '").replace( "'", "").replace('<class', '<' + termcolor.colored('class', 'blue')) # ToDo: make the solution below more elegant for i in range(10): ret = ret.replace('diff_{}'.format(i), termcolor.colored('diff_{}'.format(i), 'red')) for keyword, color in self._keyword_color_dict.items(): ret = ret.replace(keyword, termcolor.colored(keyword, color)) return ret return new_dict def __dir__(self): return list(super.__dir__(self)) + list(self.keys()) def __getattr__(self, item): try: return dict.__getitem__(self, item) except KeyError: # noinspection PyUnresolvedReferences return super.__getattr__(item) def __setattr__(self, name, value): if name[0] != '_': self[name] = value else: super.__setattr__(self, name, value) def _get_queue_item(self, query): if isinstance(query, int): queue_queries = [query] elif isinstance(query, slice): queue_queries = list(range(query.start, query.stop, _ivy.default(query.step, 1))) elif isinstance(query, (list, tuple)): queue_queries = list(range(query[0].start, query[0].stop, _ivy.default(query[0].step, 1))) else: raise Exception('Invalid slice type, must be one of integer, slice, or sequences of slices.') queue_idxs = set([_np.sum(q >= self._queue_load_sizes_cum).item() for q in queue_queries]) conts = list() for i in queue_idxs: if i not in self._loaded_containers_from_queues: cont = Container(self._queues[i].get(timeout=self._queue_timeout), **self._config) if _ivy.wrapped_mode(): cont = cont.to_ivy() self._loaded_containers_from_queues[i] = cont else: cont = self._loaded_containers_from_queues[i] conts.append(cont) combined_cont = self._container_combine_method(conts) idx = list(queue_idxs)[0] offset = 0 if idx == 0 else self._queue_load_sizes_cum[idx - 1] if isinstance(query, int): shifted_query = query - offset elif isinstance(query, slice): shifted_query = slice(query.start-offset, query.stop-offset, query.step) elif isinstance(query, (list, tuple)): shifted_query = tuple([slice(slc.start-offset, slc.stop-offset, slc.step) for slc in query]) # noinspection PyUnboundLocalVariable return combined_cont[shifted_query] def __getitem__(self, query): """ Get slice, key or key chain of container object. :param query: slice object, key or key chain to query all container elements. :type query: slice or str :return: Container object at desired query. """ if isinstance(query, str): if '/' in query or '.' in query: return self.at_key_chain(query) return dict.__getitem__(self, query) elif _ivy.exists(self._queues): return self._get_queue_item(query) return_dict = dict() for key, value in sorted(self.items()): if isinstance(value, Container): return_dict[key] = value[query] else: # noinspection PyBroadException if isinstance(value, list) or isinstance(value, tuple): if len(value) == 0: return_dict[key] = value else: return_dict[key] = value[query] elif value is None or hasattr(value, 'shape') and value.shape == (): return_dict[key] = value else: return_dict[key] = value[query] return Container(return_dict, **self._config) def __setitem__(self, query, val): """ Set key or key chain of container object. :param query: slice object, key or key chain at which to set all container elements. :type query: slice or str :param val: The value to set at the desired query. :type val: ivy.Container, array, or other :return: New container after updating. """ if isinstance(query, str) and ('/' in query or '.' in query): return self.set_at_key_chain(query, val, inplace=True) else: return dict.__setitem__(self, query, val) def __contains__(self, key): if isinstance(key, str) and ('/' in key or '.' in key): return self.has_key_chain(key) else: return dict.__contains__(self, key) def __pos__(self): return self def __neg__(self): return self.map(lambda x, kc: -x) def __pow__(self, power): if isinstance(power, Container): return self.reduce([self, power], lambda x: _reduce(_pow, x)) return self.map(lambda x, kc: x ** power) def __rpow__(self, power): return self.map(lambda x, kc: power ** x) def __add__(self, other): if isinstance(other, Container): return self.reduce([self, other], sum) return self.map(lambda x, kc: x + other) def __radd__(self, other): return self + other def __sub__(self, other): if isinstance(other, Container): return self.reduce([self, -other], sum) return self.map(lambda x, kc: x - other) def __rsub__(self, other): return -self + other def __mul__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_mul, x)) return self.map(lambda x, kc: x * other) def __rmul__(self, other): return self * other def __truediv__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_truediv, x)) return self.map(lambda x, kc: x / other) def __rtruediv__(self, other): return self.map(lambda x, kc: other / x) def __floordiv__(self, other): if isinstance(other, Container): return self.reduce([self, other], lambda x: _reduce(_floordiv, x)) return self.map(lambda x, kc: x // other) def __rfloordiv__(self, other): return self.map(lambda x, kc: other // x) def __abs__(self): return self.map(lambda x, kc: self._ivy.abs(x)) def __lt__(self, other): if isinstance(other, Container):