input
stringlengths
2.65k
237k
output
stringclasses
1 value
Set degrees surf.degree_u = 3 surf.degree_v = 2 # Set control points control_points = [[0, 0, 0], [0, 4, 0], [0, 8, -3], [2, 0, 6], [2, 4, 0], [2, 8, 0], [4, 0, 0], [4, 4, 0], [4, 8, 3], [6, 0, 0], [6, 4, -3], [6, 8, 0]] surf.set_ctrlpts(control_points, 4, 3) # Set knot vectors surf.knotvector_u = [0, 0, 0, 0, 1, 1, 1, 1] surf.knotvector_v = [0, 0, 0, 1, 1, 1] # Set evaluation delta (control the number of surface points) surf.delta = 0.05 # Get surface points (the surface will be automatically evaluated) surface_points = surf.evalpts """ def __init__(self, **kwargs): super(Surface, self).__init__(**kwargs) self._control_points2D = self._init_array() # control points, 2-D array [u][v] self._evaluator = evaluators.SurfaceEvaluator(find_span_func=self._span_func) self._tsl_component = tessellate.TriangularTessellate() @property def ctrlpts2d(self): """ 2-dimensional array of control points. The getter returns a tuple of 2D control points (weighted control points + weights if NURBS) in *[u][v]* format. The rows of the returned tuple correspond to v-direction and the columns correspond to u-direction. The following example can be used to traverse 2D control points: .. code-block:: python :linenos: # Create a BSpline surface surf_bs = BSpline.Surface() # Do degree, control points and knot vector assignments here # Each u includes a row of v values for u in surf_bs.ctrlpts2d: # Each row contains the coordinates of the control points for v in u: print(str(v)) # will be something like (1.0, 2.0, 3.0) # Create a NURBS surface surf_nb = NURBS.Surface() # Do degree, weighted control points and knot vector assignments here # Each u includes a row of v values for u in surf_nb.ctrlpts2d: # Each row contains the coordinates of the weighted control points for v in u: print(str(v)) # will be something like (0.5, 1.0, 1.5, 0.5) When using **NURBS.Surface** class, the output of :py:attr:`~ctrlpts2d` property could be confusing since, :py:attr:`~ctrlpts` always returns the unweighted control points, i.e. :py:attr:`~ctrlpts` property returns 3D control points all divided by the weights and you can use :py:attr:`~weights` property to access the weights vector, but :py:attr:`~ctrlpts2d` returns the weighted ones plus weights as the last element. This difference is intentionally added for compatibility and interoperability purposes. To explain this situation in a simple way; * If you need the weighted control points directly, use :py:attr:`~ctrlpts2d` * If you need the control points and the weights separately, use :py:attr:`~ctrlpts` and :py:attr:`~weights` .. note:: Please note that the setter doesn't check for inconsistencies and using the setter is not recommended. Instead of the setter property, please use :func:`.set_ctrlpts()` function. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the control points as a 2-dimensional array in [u][v] format :setter: Sets the control points as a 2-dimensional array in [u][v] format :type: list """ return self._control_points2D @ctrlpts2d.setter def ctrlpts2d(self, value): if not isinstance(value, (list, tuple)): raise ValueError("The input must be a list or tuple") # Clean up the surface and control points self.reset(evalpts=True, ctrlpts=True) # Assume that the user has prepared the lists correctly size_u = len(value) size_v = len(value[0]) # Estimate dimension by checking the size of the first element self._dimension = len(value[0][0]) # Make sure that all numbers are float type ctrlpts = [[] for _ in range(size_u * size_v)] for u in range(size_u): for v in range(size_v): idx = v + (size_v * u) ctrlpts[idx] = [float(coord) for coord in value[u][v]] # Set control points self.set_ctrlpts(ctrlpts, size_u, size_v) def set_ctrlpts(self, ctrlpts, *args, **kwargs): """ Sets the control points and checks if the data is consistent. This method is designed to provide a consistent way to set control points whether they are weighted or not. It directly sets the control points member of the class, and therefore it doesn't return any values. The input will be an array of coordinates. If you are working in the 3-dimensional space, then your coordinates will be an array of 3 elements representing *(x, y, z)* coordinates. This method also generates 2D control points in *[u][v]* format which can be accessed via :py:attr:`~ctrlpts2d`. .. note:: The v index varies first. That is, a row of v control points for the first u value is found first. Then, the row of v control points for the next u value. :param ctrlpts: input control points as a list of coordinates :type ctrlpts: list """ # Call parent function super(Surface, self).set_ctrlpts(ctrlpts, *args, **kwargs) # Generate a 2-dimensional list of control points array_init2d = kwargs.get('array_init2d', [[[] for _ in range(args[1])] for _ in range(args[0])]) ctrlpts_float2d = array_init2d for i in range(0, self.ctrlpts_size_u): for j in range(0, self.ctrlpts_size_v): ctrlpts_float2d[i][j] = self._control_points[j + (i * self.ctrlpts_size_v)] # Set the new 2-dimension control points self._control_points2D = ctrlpts_float2d def reset(self, **kwargs): """ Resets control points and/or evaluated points. Keyword Arguments: * ``evalpts``: if True, then resets evaluated points * ``ctrlpts`` if True, then resets control points """ # Call parent function super(Surface, self).reset(**kwargs) # Reset ctrlpts2d reset_ctrlpts = kwargs.get('ctrlpts', False) if reset_ctrlpts: self._control_points2D = self._init_array() def save(self, file_name): """ Saves the surface as a pickled file. :param file_name: name of the file to be saved :type file_name: str :raises IOError: an error occurred writing the file """ # Create a dictionary from the surface data expdata = {'rational': self.rational, 'degree_u': self.degree_u, 'degree_v': self.degree_v, 'knotvector_u': self.knotvector_u, 'knotvector_v': self.knotvector_v, 'ctrlpts_size_u': self.ctrlpts_size_u, 'ctrlpts_size_v': self.ctrlpts_size_v, 'ctrlpts': self._control_points, 'dimension': self.dimension} save_pickle(expdata, file_name) def load(self, file_name): """ Loads the surface from a pickled file. :param file_name: name of the file to be loaded :type file_name: str :raises IOError: an error occurred reading the file """ impdata = read_pickle(file_name) # Check if we have loaded the correct type of surface if self.rational != impdata['rational']: raise TypeError("Surface types are not compatible (rational vs. non-rational mismatch)") # Clean control points and evaluated points self.reset(ctrlpts=True, evalpts=True) # Set the surface data self.degree_u = impdata['degree_u'] self.degree_v = impdata['degree_v'] self.set_ctrlpts(impdata['ctrlpts'], impdata['ctrlpts_size_u'], impdata['ctrlpts_size_v']) self.knotvector_u = impdata['knotvector_u'] self.knotvector_v = impdata['knotvector_v'] def transpose(self): """ Transposes the surface by swapping u and v parametric directions. """ operations.transpose(self, inplace=True) def evaluate(self, **kwargs): """ Evaluates the surface. **The evaluated surface points are stored in :py:attr:`~evalpts` property.** Keyword arguments: * ``start_u``: start parameter on the u-direction * ``stop_u``: stop parameter on the u-direction * ``start_v``: start parameter on the v-direction * ``stop_v``: stop parameter on the v-direction The ``start_u``, ``start_v`` and ``stop_u`` and ``stop_v`` parameters allow evaluation of a surface segment in the range *[start_u, stop_u][start_v, stop_v]* i.e. the surface will also be evaluated at the ``stop_u`` and ``stop_v`` parameter values. The following examples illustrate the usage of the keyword arguments. .. code-block:: python :linenos: # Start evaluating in range u=[0, 0.7] and v=[0.1, 1] surf.evaluate(stop_u=0.7, start_v=0.1) # Start evaluating in range u=[0, 1] and v=[0.1, 0.3] surf.evaluate(start_v=0.1, stop_v=0.3) # Get the evaluated points surface_points = surf.evalpts """ # Call parent method super(Surface, self).evaluate(**kwargs) # Find evaluation start and stop parameter values start_u = kwargs.get('start_u', self.knotvector_u[self.degree_u]) stop_u = kwargs.get('stop_u', self.knotvector_u[-(self.degree_u+1)]) start_v = kwargs.get('start_v', self.knotvector_v[self.degree_v]) stop_v = kwargs.get('stop_v', self.knotvector_v[-(self.degree_v+1)]) # Check parameters if self._kv_normalize: utilities.check_params([start_u, stop_u, start_v, stop_v]) # Clean up the surface points self.reset(evalpts=True) # Evaluate spts = self._evaluator.evaluate(start=(start_u, start_v), stop=(stop_u, stop_v), degree=self._degree, knotvector=self._knot_vector, ctrlpts_size=self._control_points_size, ctrlpts=self._control_points, sample_size=self.sample_size, dimension=self._dimension, precision=self._precision) self._eval_points = spts def evaluate_single(self, param): """ Evaluates the surface at the input (u, v) parameter pair. :param param: parameter pair (u, v) :type param: list, tuple :return: evaluated surface point at the given parameter pair :rtype: list """ # Call parent method super(Surface, self).evaluate_single(param) # Evaluate the surface spt = self._evaluator.evaluate_single(parameter=param, degree=self._degree, knotvector=self._knot_vector, ctrlpts_size=self._control_points_size, ctrlpts=self._control_points, dimension=self._dimension, precision=self._precision) return spt def evaluate_list(self, param_list): """ Evaluates the surface for a given list of (u, v) parameters. :param param_list: list of parameter pairs (u, v) :type param_list: list, tuple :return: evaluated surface point at the input parameter pairs :rtype: tuple """ # Call parent method super(Surface, self).evaluate_list(param_list) # Evaluate
+ colorname1 + "'\n" else: # rf. https://www.w3schools.com/colors/colors_names.asp user_tab_header += indent + param_name_button + ".style.button_color = '" + colorname2 + "'\n" user_tab_header += "\n" + indent + full_name + " = " + widgets[child.attrib['type']] + "(\n" # Try to calculate and provide a "good" delta step (for the tiny "up/down" arrows on a numeric widget) if child.attrib['type'] == "double": fval_abs = abs(float(child.text)) if (fval_abs > 0.0): if (fval_abs > 1.0): # crop delta_val = pow(10, int(math.log10(abs(float(child.text)))) - 1) else: # round delta_val = pow(10, round(math.log10(abs(float(child.text)))) - 1) else: delta_val = 0.01 # if initial value=0.0, we're totally guessing at what a good delta is if print_var_types: print('double: ',float(child.text),', delta_val=',delta_val) user_tab_header += indent2 + "value=" + child.text + ",\n" # Note: "step" values will advance the value to the nearest multiple of the step value itself :-/ user_tab_header += indent2 + "step=" + str(delta_val) + ",\n" # Integers elif child.attrib['type'] == "int": # warning: math.log(1000,10)=2.99..., math.log10(1000)=3 if (abs(int(child.text)) > 0): delta_val = pow(10,int(math.log10(abs(int(child.text)))) - 1) else: delta_val = 1 # if initial value=0, we're totally guessing at what a good delta is if print_var_types: print('int: ',int(child.text),', delta_val=',delta_val) user_tab_header += indent2 + "value=" + child.text + ",\n" user_tab_header += indent2 + "step=" + str(delta_val) + ",\n" # Booleans elif child.attrib['type'] == "bool": if (child.text.lower() == "true"): child.text = "True" elif (child.text.lower() == "false"): child.text = "False" else: print(" --- ERROR: bool must be True or False, not ", child.text) sys.exit(1) if print_var_types: print('bool: ',child.text) user_tab_header += indent2 + "value=" + child.text + ",\n" # Strings elif child.attrib['type'] == "string": user_tab_header += indent2 + "value='" + child.text + "',\n" # elif child.attrib['type'].lower() == 'divider': # divider_flag = True # child.text = "Worker_Parameters" # # user_tab_header += indent2 + "value=" + child.description + ",\n" # user_tab_header += indent2 + "value=" + child.attrib['description'] + ",\n" row_name = "row" + str(param_count) box_name = "box" + str(param_count) if (not divider_flag): # We're processing a "normal" row - typically a name, numeric field, units, description # - append the info at the end of this widget user_tab_header += indent2 + "style=style, layout=widget_layout)\n" row_str += indent + row_name + " = [" + param_name_button + ", " + full_name + ", " + units_btn_name + ", " + desc_row_name + "] \n" box_str += indent + box_name + " = Box(children=" + row_name + ", layout=box_layout)\n" else: # divider box_str += indent + box_name + " = Box(children=" + row_name + ", layout=box_layout)\n" vbox_str += indent2 + box_name + ",\n" if (not divider_flag): # float, int, bool if (type_cast[child.attrib['type']] == "bool"): fill_gui_str += indent + full_name + ".value = ('true' == (uep.find('.//" + child.tag + "').text.lower()) )\n" else: fill_gui_str += indent + full_name + ".value = " + type_cast[child.attrib['type']] + "(uep.find('.//" + child.tag + "').text)\n" fill_xml_str += indent + "uep.find('.//" + child.tag + "').text = str("+ full_name + ".value)\n" vbox_str += indent + "])" # Write the beginning of the Python module for the user parameters tab in the GUI user_tab_file = "user_params.py" print("\n --------------------------------- ") print("Generated a new: ", user_tab_file) print() fp= open(user_tab_file, 'w') fp.write(user_tab_header) fp.write(units_buttons_str) fp.write(desc_buttons_str) fp.write(row_str) fp.write(box_str) fp.write(vbox_str) fp.write(fill_gui_str) fp.write(fill_xml_str) fp.close() #--------------------------------------------------------------------------------------------------- #---------- micronenv #--------------------------------------------------------------------------------------------------- microenv_tab_header = """ # This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file. # # Edit at your own risk. # import os from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box class MicroenvTab(object): def __init__(self): micron_units = Label('micron') # use "option m" (Mac, for micro symbol) constWidth = '180px' tab_height = '500px' stepsize = 10 #style = {'description_width': '250px'} style = {'description_width': '25%'} layout = {'width': '400px'} name_button_layout={'width':'25%'} widget_layout = {'width': '15%'} units_button_layout ={'width':'15%'} desc_button_layout={'width':'45%'} """ """ self.therapy_activation_time = BoundedFloatText( min=0., max=100000000, step=stepsize, description='therapy_activation_time', style=style, layout=layout, # layout=Layout(width=constWidth), ) self.save_interval_after_therapy_start = BoundedFloatText( min=0., max=100000000, step=stepsize, description='save_interval_after_therapy_start', style=style, layout=layout, ) label_blankline = Label('') self.tab = VBox([HBox([self.therapy_activation_time, Label('min')]), HBox([self.save_interval_after_therapy_start, Label('min')]), ]) """ fill_gui_str= """ # Populate the GUI widgets with values from the XML def fill_gui(self, xml_root): uep = xml_root.find('.//microenvironment_setup') # find unique entry point vp = [] # pointers to <variable> nodes if uep: for var in uep.findall('variable'): vp.append(var) """ fill_xml_str= """ # Read values from the GUI widgets to enable editing XML def fill_xml(self, xml_root): uep = xml_root.find('.//microenvironment_setup') # find unique entry point vp = [] # pointers to <variable> nodes if uep: for var in uep.findall('variable'): vp.append(var) """ # Now parse a configuration file (.xml) and map the user parameters into GUI widgets #tree = ET.parse('../config/PhysiCell_settings.xml') try: tree = ET.parse(config_file) except: print("Cannot parse",config_file, "- check it's XML syntax.") sys.exit(1) root = tree.getroot() indent = " " indent2 = " " widgets = {"double":"FloatText", "int":"IntText", "bool":"Checkbox", "string":"Text", "divider":"div"} type_cast = {"double":"float", "int":"int", "bool":"bool", "string":"", "divider":"div"} vbox_str = "\n" + indent + "self.tab = VBox([\n" #param_desc_buttons_str = "\n" #name_buttons_str = "\n" units_buttons_str = "\n" desc_buttons_str = "\n" row_str = "\n" box_str = "\n" + indent + "box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='100%')\n" # box1 = Box(children=row1, layout=box_layout)\n" menv_var_count = 0 # micronenv param_count = 0 divider_count = 0 color_count = 0 #param_desc_count = 0 name_count = 0 units_count = 0 #---------- micronenv # <microenvironment_setup> # <variable name="oxygen" units="mmHg" ID="0"> # <physical_parameter_set> # <diffusion_coefficient units="micron^2/min">100000.000000</diffusion_coefficient> # <decay_rate units="1/min">.1</decay_rate> # </physical_parameter_set> # <initial_condition units="mmHg">38.0</initial_condition> # <Dirichlet_boundary_condition units="mmHg" enabled="true">38.0</Dirichlet_boundary_condition> # </variable> # ... # # <options> # <calculate_gradients>False</calculate_gradients> # <track_internalized_substrates_in_each_agent>False</track_internalized_substrates_in_each_agent> # <initial_condition enabled="false" type="matlab"> # <filename>./config/initial.mat</filename> # </initial_condition> # <dirichlet_nodes enabled="false" type="matlab"> # <filename>./config/dirichlet.mat</filename> # </dirichlet_nodes> # </options> # </microenvironment_setup> uep = root.find('.//microenvironment_setup') # find unique entry point (uep) if uep: fill_gui_str += indent + "uep = xml_root.find('.//microenvironment_setup') # find unique entry point\n" fill_xml_str += indent + "uep = xml_root.find('.//microenvironment_setup') # find unique entry point\n" microenv_tab_header += "\n" pp_count = 0 units_buttons_str += indent + " # ------- micronenv info\n" var_idx = -1 # units_buttons_str += indent + " # --- variable info\n" for var in uep.findall('variable'): fill_gui_str += "\n" var_idx += 1 menv_var_count += 1 print('==== new microenv var: ',var.tag, var.attrib) # --- basic widgets: # full_name = "self." + var.attrib['name'] # name_count += 1 # param_name_button = "param_name" + str(name_count) # 1) Variable name + [units] menv_var_name_button = "menv_var" + str(menv_var_count) menv_var_name = var.attrib['name'].replace(" ","_") # e.g., "director signal" --> "director_signal" print('menv_var_name=',menv_var_name) units_str = '' if ('units' in var.attrib) and (var.attrib['units'] != 'dimensionless'): units_str = ' (' + var.attrib['units'] + ')' microenv_tab_header += '\n' + indent + menv_var_name_button + " = " + "Button(description='" + menv_var_name + units_str + "', disabled=True, layout=name_button_layout)\n" if (color_count % 2): microenv_tab_header += indent + menv_var_name_button + ".style.button_color = '" + colorname1 + "'\n" else: # rf. https://www.w3schools.com/colors/colors_names.asp microenv_tab_header += indent + menv_var_name_button + ".style.button_color = '" + colorname2 + "'\n" color_count += 1 # color each menv variable block the same, but alternating color # print(microenv_tab_header) # --- row_str: appear AFTER all the basic widgets are defined row_name = "row_" + menv_var_name # str(param_count) # row_str += indent + row_name + " = [" + param_name_button + ", " + full_name + ", " + units_btn_name + ", " + desc_row_name + "] \n" row_str += indent + row_name + " = [" + menv_var_name_button + ", ] \n" # box_name = "box" + str(param_count) box_name = "box_" + menv_var_name box_str += indent + box_name + " = Box(children=" + row_name + ", layout=box_layout)\n" vbox_str += indent2 + box_name + ",\n" for child in var: # print(' child in var-----> ',child.tag, child.attrib) # print(' child.tag.lower() ----> ',child.tag.lower()) # if (child.tag.lower == 'physical_parameter_set'): if ('physical_parameter_set' in child.tag.lower() ): # 2) <physical_parameter_set> variables for pp in var.findall('physical_parameter_set'): for ppchild in pp: # e.g., diffusion_coefficient, decay_rate # print(' -- ppchild in pp: ',ppchild.tag, ppchild.attrib, float(ppchild.text)) pp_button_name = "pp_button" + str(pp_count) pp_units_name = "pp_button_units" + str(pp_count) pp_count += 1 param_count += 1 param_name_button = "menv_param" + str(pp_count) # desc_buttons_str += indent + desc_row_name + " = " + "Button(description='" + describe_str + "', disabled=True, layout=desc_button_layout) \n" # desc_buttons_str += indent + param_name_button + " = " + "Button(description='" + ppchild.tag + "', disabled=True, layout=name_button_layout) \n" #microenv_tab_header += indent + param_name_button + " = " + "Button(description='" + ppchild.tag + "', disabled=True, layout=name_button_layout) \n" name_count += 1 param_name_button = "param_name" + str(name_count) microenv_tab_header += "\n" + indent + param_name_button + "
sys.exit(1) max_flow = 0 path_capacities = [] for path in paths: capacity = path_capacity(links, path) path_capacities.append(capacity) max_flow += capacity for i in range(len(paths)): weight = path_capacities[i] * 1.0 / max_flow all_paths[src, dst].append((paths[i], weight)) # Compute sort information on OD pairs: node_infos = {} for node in data['nodes']: node_infos[node['id']] = {'fanout': 0, 'fanin': 0, 'connectivity': 0, 'nb_paths': 0} for link in data['links']: node_infos[link['destination']]['connectivity'] += 1 node_infos[link['destination']]['fanin'] += link['capacity'] node_infos[link['source']]['connectivity'] += 1 node_infos[link['source']]['fanout'] += link['capacity'] for (src, dst) in all_paths: for (path, _) in all_paths[src, dst]: for node in path[1:-1]: node_infos[node]['nb_paths'] += 1 od_pair_infos = {} for (src, dst) in od_pairs: src_infos = node_infos[src] dst_infos = node_infos[dst] m1 = min(src_infos['fanout'], dst_infos['fanin']) m2 = min(src_infos['connectivity'], dst_infos['connectivity']) if src_infos['nb_paths'] == dst_infos['nb_paths'] == 0: m3 = float("Inf") else: m3 = 1.0 / max(src_infos['nb_paths'], dst_infos['nb_paths']) od_pair_infos[(src, dst)] = {'m1': m1, 'm2': m2, 'm3': m3} # Sort OD pairs: def make_comparator(od_pair_infos): def compare(od1, od2): pair1_infos = od_pair_infos[od1] pair2_infos = od_pair_infos[od2] if pair1_infos['m1'] == pair2_infos['m1']: if pair1_infos['m2'] == pair2_infos['m2']: if pair1_infos['m3'] == pair2_infos['m3']: return 0 elif pair1_infos['m3'] > pair2_infos['m3']: return 1 else: return -1 elif pair1_infos['m2'] > pair2_infos['m2']: return 1 else: return -1 elif pair1_infos['m1'] > pair2_infos['m1']: return 1 else: return -1 return compare flow_rates.sort(reverse=True) od_pairs = sorted(od_pairs, cmp=make_comparator(od_pair_infos), reverse=True) # Route flows between OD pairs: route_flows_multipaths(links, all_paths, od_pairs, flow_rates) # Write link loads to YAML and compute objective value: objective = 0 for link in data['links']: link['legit_load'] = links[link['source'], link['destination']]['legit_load'] max_link_load = link['legit_load'] / link['capacity'] if max_link_load > objective: objective = max_link_load return objective """Displays the graph of the network with legitimate loads on edges, in a new window. Args: data: The dictionary containing the topology and the node capacities. """ def display_graph(data): links = [(link['source'], link['destination']) for link in data['links']] edge_labels = {} for link in data['links']: if link['legit_load'] > 0: edge_labels[(link['source'], link['destination'])] = link['legit_load'] G = nx.DiGraph(links) pos = nx.spring_layout(G) nx.draw(G, pos=pos, with_labels=True, arrows=False) nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels) plt.show() """Computes the mean load factor (load / capacity) of links in the network. Args: data: The dictionary containing the topology and the node capacities. Returns: The mean link load factor. """ def compute_mean_link_load(data): tot_link_load = 0 nb_links = 0 for link in data['links']: tot_link_load += link['legit_load'] / link['capacity'] nb_links += 1 return tot_link_load / nb_links """Scales down the legitimate loads of links in the network by factor. Args: data: The dictionary containing the topology and the node capacities. factor: The value link loads are divided by. """ def scale_down_tm(data, factor): for link in data['links']: link['legit_load'] /= factor """Generates node and link items for the output YAML file. For each link (u, v), a back link (v, u) of equal capacity is created. Link capacities are all adapted according to the max_link_capacity. Args: data: The dictionary containing the output information. template: The template information, directly from the YAML file (dictionary). max_node_class: The maximum node class to include in the output network. Used to restrict the network's size. max_link_capacity: The capacity of the largest links. Returns: The list of selected nodes, according to the max_node_class argument. """ def generate_topology(data, template, max_node_class, max_link_capacity): classes = {} for class_ in template['classes']: classes[class_['name']] = class_ # Collects neighbors for each node: all_neighbors = {} for node in template['nodes']: neighbors = [] all_neighbors[node['id']] = neighbors for link in template['links']: if link['destination'] == node['id']: neighbors.append((link['source'], link['capacity'])) elif link['source'] == node['id']: neighbors.append((link['destination'], link['capacity'])) neighbors.sort(key=lambda tup: tup[0]) # Collects links: links = {} for link in template['links']: if link['source'] < link['destination']: links[(link['source'], link['destination'])] = link['capacity'] else: links[(link['destination'], link['source'])] = link['capacity'] # Selects the nodes according to the max. node class wanted: nodes = [] i = 0 for node in template['nodes']: if node['class'] <= max_node_class: class_ = classes[node['class']] new_node = {'id': node['id'], 'cpus': class_['cpus'], 'memory': class_['memory']} data['nodes'].append(new_node) nodes.append(node['id']) else: # Removed node, need to bridge the neighbors: nb_neighbors = len(all_neighbors[node['id']]) if nb_neighbors >= 2 and nb_neighbors <= 3: for i in range(nb_neighbors): neighbor1 = all_neighbors[node['id']][i] neighbor2 = all_neighbors[node['id']][(i + 1) % nb_neighbors] if neighbor1[0] == neighbor2[0]: # Only allow edges between different nodes. continue # Retrieves the max capacity between the already existing link, if any, and the new: capacity = max(neighbor1[1], neighbor2[1]) if (neighbor1[0], neighbor2[0]) in links: link2_capacity = links[(neighbor1[0], neighbor2[0])] if link2_capacity > capacity: capacity = link2_capacity else: continue link = {'source': neighbor1[0], 'destination': neighbor2[0], 'capacity': capacity} template['links'].insert(0, link) # Removes the current node from neighbor lists: all_neighbors[link['source']] = [(u, cap) for (u, cap) in all_neighbors[link['source']] if u != node['id']] all_neighbors[link['destination']] = [(u, cap) for (u, cap) in all_neighbors[link['destination']] if u != node['id']] # Adds the new neighbors: all_neighbors[link['source']].append((link['destination'], link['capacity'])) all_neighbors[link['destination']].append((link['source'], link['capacity'])) if nb_neighbors == 2: # If we continue we'll add a back-edge between the two neighbors. break i += 1 # Selects the links according to the remaining nodes: cur_max_link_capacity = max(template['links'], key=lambda link: link['capacity'])['capacity'] link_size_factor = max_link_capacity / cur_max_link_capacity for link in template['links']: if link['source'] in nodes and link['destination'] in nodes: already_added = sum([l['source'] == link['source'] and l['destination'] == link['destination'] for l in data['links']]) if already_added == 0: link['capacity'] = link['capacity'] * link_size_factor data['links'].append(link) back_link = dict(link) back_link['source'] = link['destination'] back_link['destination'] = link['source'] data['links'].append(back_link) return nodes """Generates the legitimate link loads according to the Gravity model. Displays a few info messages on stdout. Args: data: The dictionary containing the topology and the node capacities. nodes: The list of node IDs. mipgap: The mipgap argument for the GLPK solver. total_traffic: target_mean_link_load: flow_assign_method: The method to use to map generated flow rates into the network. One of 'heuristic', 'ilp', or 'ilp-glpk'. """ def generate_link_loads(data, nodes, mipgap, total_traffic, target_mean_link_load, flow_assign_method): methods = {'heuristic': assign_flow_rates_heuristic, 'ilp': assign_flow_rates_ilp, 'ilp-glpk': assign_flow_rates_ilp_glpk} tm = generate_tm(len(nodes), total_traffic) objective = methods[flow_assign_method](data, tm, mipgap) if objective > 1: print("Scaling down TM by %f to reach feasible routing." % objective, file=sys.stderr) scale_down_tm(data, objective) mean_link_load = compute_mean_link_load(data) factor = mean_link_load / target_mean_link_load if factor < 1: print("Mean link load is at %f." % mean_link_load, file=sys.stderr) else: print("Scaling down TM by %f to reach %f mean link load." % (factor, args.mean_link_load), file=sys.stderr) scale_down_tm(data, factor) """Generates attacks between the given attackers and targets. Attack loads follow an exponential distribution. Attackers and targets are selected randomly. Args: data: The dictionary containing the topology and the node capacities. nodes: The list of node IDs. nb_attackers: The number attackers. 0 for all nodes. nb_targets: The number of targets. mean_attack_load: Mean attack load, in Mbps. """ def generate_attacks(data, nodes, nb_attackers, nb_targets, mean_attack_load): random.shuffle(nodes) if nb_attackers == 0: attackers = nodes else: attackers = nodes[0:nb_attackers] targets = nodes[0:nb_targets] for attacker in attackers: target = targets[random.randint(0, nb_targets - 1)] load = round(np.random.exponential(mean_attack_load)) attack = {'source': attacker, 'destination': target, 'load': load} data['attacks'].append(attack) """Rounds values for the legitimate loads on links. Args: data: The dictionary containing the topology and the node capacities. """ def round_link_loads(data): for link in data['links']: link['legit_load'] = round(link['legit_load']) """Format all output information into text format Args: data: The dictionary containing the topology and the node capacities. Returns: The text to display or write to file. """ def format_text(data): nb_nodes = len(data['nodes']) adjacency_matrix = [[0 for x in range(nb_nodes)] for y in range(nb_nodes)] legit_load_matrix = [[0 for x in range(nb_nodes)] for y in range(nb_nodes)] for link in data['links']: adjacency_matrix[link['source'] - 1][link['destination'] - 1] = link['capacity'] legit_load_matrix[link['source'] - 1][link['destination'] - 1] = link['legit_load'] adjacency_matrix_text = "[" legit_load_matrix_text = "[" for i in range(nb_nodes - 1): adjacency_matrix_text += "%s\n " % json.dumps(adjacency_matrix[i]) legit_load_matrix_text += "%s\n " % json.dumps(legit_load_matrix[i]) adjacency_matrix_text += "%s]" % json.dumps(adjacency_matrix[nb_nodes - 1]) legit_load_matrix_text += "%s]" % json.dumps(legit_load_matrix[nb_nodes - 1]) text = "%s\n\n%s" % (adjacency_matrix_text, legit_load_matrix_text) resource_matrix = [[0 for x in range(nb_nodes)] for y in range(2)] for node in data['nodes']: resource_matrix[0][node['id'] - 1] = node['cpus'] resource_matrix[1][node['id'] - 1] = node['memory'] resource_matrix_text = "[%s,\n" % json.dumps(resource_matrix[0]) resource_matrix_text += " %s]" % json.dumps(resource_matrix[1]) text = "%s\n\n%s" % (text, resource_matrix_text) nb_attacks = len(data['attacks']) attack_source_vector = [0 for x in range(nb_attacks)] attack_dest_vector = [0 for x in range(nb_attacks)] attack_load_vector = [0 for x in range(nb_attacks)] i = 0 for attack in data['attacks']: attack_source_vector[i] = attack['source'] attack_dest_vector[i] = attack['destination'] attack_load_vector[i] = attack['load'] i += 1 text = "%s\n\n%s\n%s\n%s" %
ndimage.zoom(ori, np.array(img.shape)/np.array(ori.shape, dtype=float), order=0) if mask.shape != img.shape: mask = ndimage.zoom(mask, np.array(img.shape)/np.array(mask.shape, dtype=float), order=0) if coh is None: coh = np.ones_like(img) fig = plt.figure() plt.imshow(img, cmap='gray') plt.hold(True) for i in range(stride, img.shape[0], stride): for j in range(stride, img.shape[1], stride): if mask[i, j] == 0: continue x, y, o, r = j, i, ori[i, j], coh[i, j]*(stride*0.9) plt.plot([x, x+r*np.cos(o)], [y, y+r*np.sin(o)], 'r-') plt.axis([0, img.shape[1], img.shape[0], 0]) plt.axis('off') if saveimage: plt.savefig(fname, bbox_inches='tight') plt.close(fig) else: plt.show() return def local_constrast_enhancement(img): img = img.astype(np.float32) mean_v = cv2.blur(img, (15, 15)) normalized = img - mean_v var = abs(normalized) var = cv2.blur(var, (15, 15)) normalized = normalized/(var+10) * 0.75 normalized = np.clip(normalized, -1, 1) normalized = (normalized+1)*127.5 return normalized def get_quality_map_ori_dict(img, data_dict, spacing, dir_map=None, block_size=16): if img.dtype == 'uint8': img = img.astype(np.float) img = fast_enhance_texture(img) blk_height, blk_width = dir_map.shape quality_map = np.zeros((blk_height, blk_width), dtype=np.float) fre_map = np.zeros((blk_height, blk_width), dtype=np.float) ori_num = len(data_dict) #dir_map = math.pi/2 - dir_map dir_ind = dir_map*ori_num/math.pi dir_ind = dir_ind.astype(np.int) dir_ind = dir_ind % ori_num patch_size = np.sqrt(data_dict[0].shape[1]) patch_size = patch_size.astype(np.int) pad_size = (patch_size-block_size)//2 img = np.lib.pad(img, (pad_size, pad_size), 'symmetric') for i in range(0, blk_height): for j in range(0, blk_width): ind = dir_ind[i, j] patch = img[i*block_size:i*block_size+patch_size, j*block_size:j*block_size+patch_size] patch = patch.reshape(patch_size*patch_size,) patch = patch - np.mean(patch) patch = patch / (np.linalg.norm(patch)+0.0001) patch[patch > 0.05] = 0.05 patch[patch < -0.05] = -0.05 simi = np.dot(data_dict[ind], patch) similar_ind = np.argmax(abs(simi)) quality_map[i, j] = np.max(abs(simi)) fre_map[i, j] = 1./spacing[ind][similar_ind] quality_map = filters.gaussian(quality_map, sigma=2) return quality_map, fre_map def fast_enhance_texture(img, sigma=2.5, show=False): img = img.astype(np.float32) height, width = img.shape height_2 = 2 ** nextpow2(height) width_2 = 2 ** nextpow2(width) fft_size = np.max([height_2, width_2]) x, y = np.meshgrid(list(range(int(-fft_size / 2), int(fft_size / 2))), list(range(int(-fft_size / 2), int(fft_size / 2)))) r = np.sqrt(x * x + y * y) + 0.0001 r = r/fft_size L = 1. / (1 + (2 * math.pi * r * sigma) ** 4) img_low = lowpass_filtering(img, L) gradim1 = compute_gradient_norm(img) gradim1 = lowpass_filtering(gradim1, L) gradim2 = compute_gradient_norm(img_low) gradim2 = lowpass_filtering(gradim2, L) diff = gradim1-gradim2 ar1 = np.abs(gradim1) diff[ar1 > 1] = diff[ar1 > 1]/ar1[ar1 > 1] diff[ar1 <= 1] = 0 cmin = 0.3 cmax = 0.7 weight = (diff-cmin)/(cmax-cmin) weight[diff < cmin] = 0 weight[diff > cmax] = 1 u = weight * img_low + (1-weight) * img temp = img - u lim = 20 temp1 = (temp + lim) * 255 / (2 * lim) temp1[temp1 < 0] = 0 temp1[temp1 > 255] = 255 v = temp1 if show: plt.imshow(v, cmap='gray') plt.show() return v def compute_gradient_norm(input_data): input_data = input_data.astype(np.float32) g_x, g_y = np.gradient(input_data) out = np.sqrt(g_x * g_x + g_y * g_y) + 0.000001 return out def lowpass_filtering(img, L): height, width = img.shape height_2, width_2 = L.shape img = cv2.copyMakeBorder(img, 0, height_2-height, 0, width_2-width, cv2.BORDER_CONSTANT, value=0) img_fft = np.fft.fft2(img) img_fft = np.fft.fftshift(img_fft) img_fft = img_fft * L rec_img = np.fft.ifft2(np.fft.fftshift(img_fft)) rec_img = np.real(rec_img) rec_img = rec_img[:height, :width] return rec_img def nextpow2(x): return int(math.ceil(math.log(x, 2))) def construct_dictionary(ori_num=30): ori_dict = [] s = [] for i in range(ori_num): ori_dict.append([]) s.append([]) patch_size2 = 16 patch_size = 32 dict_all = [] spacing_all = [] ori_all = [] y, x = np.meshgrid(list(range(-patch_size2, patch_size2)), list(range(-patch_size2, patch_size2))) for spacing in range(6, 13): for valley_spacing in range(3, spacing//2): ridge_spacing = spacing - valley_spacing for k in range(ori_num): theta = np.pi/2-k*np.pi / ori_num x_r = x * np.cos(theta) - y * np.sin(theta) for offset in range(0, spacing-1, 2): x_r_offset = x_r + offset + ridge_spacing / 2 x_r_offset = np.remainder(x_r_offset, spacing) y_1 = np.zeros((patch_size, patch_size)) y_2 = np.zeros((patch_size, patch_size)) y_1[x_r_offset <= ridge_spacing] = x_r_offset[x_r_offset <= ridge_spacing] y_2[x_r_offset > ridge_spacing] = x_r_offset[x_r_offset > ridge_spacing] - ridge_spacing element = -np.sin(2 * math.pi * (y_1 / ridge_spacing / 2) ) + np.sin(2 * math.pi * (y_2 / valley_spacing / 2)) element = element.reshape(patch_size*patch_size,) element = element-np.mean(element) element = element / np.linalg.norm(element) ori_dict[k].append(element) s[k].append(spacing) dict_all.append(element) spacing_all.append(1.0/spacing) ori_all.append(theta) for i, _ in enumerate(ori_dict): ori_dict[i] = np.asarray(ori_dict[i]) s[k] = np.asarray(s[k]) dict_all = np.asarray(dict_all) dict_all = np.transpose(dict_all) spacing_all = np.asarray(spacing_all) ori_all = np.asarray(ori_all) return ori_dict, s, dict_all, ori_all, spacing_all # TODO : simplify function def get_maps_stft(img, patch_size=64, block_size=16, preprocess=False): assert len(img.shape) == 2 nrof_dirs = 16 ovp_size = (patch_size-block_size)//2 if preprocess: img = fast_enhance_texture(img, sigma=2.5, show=False) img = np.lib.pad(img, (ovp_size, ovp_size), 'symmetric') height, width = img.shape blk_height = (height - patch_size)//block_size+1 blk_width = (width - patch_size)//block_size+1 local_info = np.empty((blk_height, blk_width), dtype=object) x, y = np.meshgrid(list(range(int(-patch_size / 2), int(patch_size / 2))), list(range(int(-patch_size / 2), int(patch_size / 2)))) x = x.astype(np.float32) y = y.astype(np.float32) r = np.sqrt(x*x + y*y) + 0.0001 r_min = 3 # min allowable ridge spacing r_max = 18 # maximum allowable ridge spacing f_low = patch_size / r_max f_high = patch_size / r_min dr_low = 1. / (1 + (r / f_high) ** 4) dr_high = 1. / (1 + (f_low / r) ** 4) db_pass = dr_low * dr_high # bandpass direction = np.arctan2(y, x) direction[direction < 0] = direction[direction < 0] + math.pi dir_ind = np.floor(direction/(math.pi/nrof_dirs)) dir_ind = dir_ind.astype(np.int, copy=False) dir_ind[dir_ind == nrof_dirs] = 0 dir_ind_list = [] for i in range(nrof_dirs): tmp = np.argwhere(dir_ind == i) dir_ind_list.append(tmp) sigma = patch_size/3 weight = np.exp(-(x*x + y*y)/(sigma*sigma)) for i in range(0, blk_height): for j in range(0, blk_width): patch = img[i*block_size:i*block_size+patch_size, j*block_size:j*block_size+patch_size].copy() local_info[i, j] = LocalSTFT(patch, weight, db_pass) local_info[i, j].analysis(r, dir_ind_list) # get the ridge flow from the local information dir_map, fre_map = get_ridge_flow_top(local_info) dir_map = smooth_dir_map(dir_map) return dir_map, fre_map def smooth_dir_map(dir_map, sigma=2.0, mask=None): cos_2_theta = np.cos(dir_map * 2) sin_2_theta = np.sin(dir_map * 2) if mask is not None: assert dir_map.shape[0] == mask.shape[0] assert dir_map.shape[1] == mask.shape[1] cos_2_theta[mask == 0] = 0 sin_2_theta[mask == 0] = 0 cos_2_theta = filters.gaussian(cos_2_theta, sigma, multichannel=False, mode='reflect') sin_2_theta = filters.gaussian(sin_2_theta, sigma, multichannel=False, mode='reflect') dir_map = np.arctan2(sin_2_theta, cos_2_theta)*0.5 return dir_map def get_ridge_flow_top(local_info): blk_height, blk_width = local_info.shape dir_map = np.zeros((blk_height, blk_width)) - 10 fre_map = np.zeros((blk_height, blk_width)) - 10 for i in range(blk_height): for j in range(blk_width): if local_info[i, j].ori is None: continue dir_map[i, j] = local_info[i, j].ori[0] # + math.pi*0.5 fre_map[i, j] = local_info[i, j].fre[0] return dir_map, fre_map class LocalSTFT: def __init__(self, patch, weight=None, dBPass=None): if weight is not None: patch = patch * weight patch = patch - np.mean(patch) norm = np.linalg.norm(patch) patch = patch / (norm+0.000001) f = np.fft.fft2(patch) fshift = np.fft.fftshift(f) if dBPass is not None: fshift = dBPass * fshift self.patch_fft = fshift self.patch = patch self.ori = None self.fre = None self.confidence = None self.patch_size = patch.shape[0] self.border_wave = None def analysis(self, r, dir_ind_list=None, n=2): assert dir_ind_list is not None energy = np.abs(self.patch_fft) energy = energy / (np.sum(energy)+0.00001) nrof_dirs = len(dir_ind_list) ori_interval = math.pi/nrof_dirs ori_interval2 = ori_interval/2 pad_size = 1 dir_norm = np.zeros((nrof_dirs + 2,)) for i in range(nrof_dirs): tmp = energy[dir_ind_list[i][:, 0], dir_ind_list[i][:, 1]] dir_norm[i + 1] = np.sum(tmp) dir_norm[0] = dir_norm[nrof_dirs] dir_norm[nrof_dirs + 1] = dir_norm[1] # smooth dir_norm smoothed_dir_norm = dir_norm for i in range(1, nrof_dirs + 1): smoothed_dir_norm[i] = (dir_norm[i - 1] + dir_norm[i] * 4 + dir_norm[i + 1]) / 6 smoothed_dir_norm[0] = smoothed_dir_norm[nrof_dirs] smoothed_dir_norm[nrof_dirs + 1] = smoothed_dir_norm[1] den = np.sum(smoothed_dir_norm[1:nrof_dirs + 1]) + 0.00001 # verify if den == 1 smoothed_dir_norm = smoothed_dir_norm/den # normalization if den == 1, this line can be removed ori = [] fre = [] confidence = [] wenergy = energy*r for i in range(1, nrof_dirs+1): if smoothed_dir_norm[i] > smoothed_dir_norm[i-1] and smoothed_dir_norm[i] > smoothed_dir_norm[i+1]: tmp_ori = (i-pad_size)*ori_interval + ori_interval2 + math.pi/2 ori.append(tmp_ori) confidence.append(smoothed_dir_norm[i]) tmp_fre = np.sum(wenergy[dir_ind_list[i-pad_size][:, 0], dir_ind_list[i-pad_size][:, 1]])/dir_norm[i] tmp_fre = 1/(tmp_fre+0.00001) fre.append(tmp_fre) if len(confidence) > 0: confidence = np.asarray(confidence) fre = np.asarray(fre) ori = np.asarray(ori) ind = confidence.argsort()[::-1] confidence = confidence[ind] fre = fre[ind] ori = ori[ind] if len(confidence) >= 2 and confidence[0]/confidence[1] > 2.0: self.ori = [ori[0]] self.fre = [fre[0]] self.confidence = [confidence[0]] elif len(confidence) > n: fre = fre[:n] ori = ori[:n] confidence = confidence[:n] self.ori = ori self.fre = fre self.confidence = confidence else: self.ori = ori self.fre = fre self.confidence
- 65.41*m.x840 - 53.98*m.x852 - 56.41*m.x863 - 56.41*m.x879 - 64.04*m.x894 - 64.04*m.x931 - 86.37*m.x968 - 20.03*m.x979 - 20.03*m.x995 - 52.86*m.x1021 - 52.86*m.x1027 - 52.86*m.x1049 - 87.05*m.x1063 - 28.6*m.x1084 - 38.28*m.x1113 - 38.72*m.x1137 - 35.81*m.x1168 - 50.75*m.x1187 - 65.41*m.x1197 - 20.03*m.x1237 <= 0) m.c202 = Constraint(expr= - 15.53*m.x92 - 15.53*m.x100 - 15.53*m.x105 - 15.53*m.x114 - 15.53*m.x133 + 13.98*m.x152 + 13.98*m.x171 - 3.23*m.x186 - 3.23*m.x204 - 3.23*m.x209 - 3.23*m.x234 + 34.47*m.x255 + 34.47*m.x260 + 34.47*m.x268 + 34.47*m.x294 - 22.47*m.x305 - 22.47*m.x323 - 22.47*m.x338 + 41.34*m.x347 + 41.34*m.x353 + 41.34*m.x370 + 41.34*m.x396 + 43.43*m.x407 + 43.43*m.x425 + 43.43*m.x430 + 43.43*m.x450 + 42.49*m.x461 + 42.49*m.x479 + 42.49*m.x484 + 42.49*m.x503 + 42.49*m.x523 - 30.94*m.x544 - 30.94*m.x549 - 30.94*m.x584 + 16.63*m.x605 + 16.63*m.x610 + 16.63*m.x619 + 16.63*m.x636 + 16.63*m.x656 + 43.64*m.x667 + 43.64*m.x673 + 43.64*m.x682 + 43.64*m.x702 + 29.34*m.x713 + 29.34*m.x718 + 29.34*m.x727 + 29.34*m.x744 - 11.51*m.x753 - 11.51*m.x771 - 11.51*m.x786 - 21.7*m.x795 - 21.7*m.x811 - 21.7*m.x840 + 31.99*m.x852 + 9.8*m.x863 + 9.8*m.x879 - 13.82*m.x894 - 13.82*m.x931 + 39.71*m.x968 + 24.08*m.x979 + 24.08*m.x995 + 31.04*m.x1021 + 31.04*m.x1027 + 31.04*m.x1049 - 15.53*m.x1063 - 3.23*m.x1084 + 41.34*m.x1113 + 42.49*m.x1137 + 43.64*m.x1168 - 11.51*m.x1187 - 21.7*m.x1197 + 24.08*m.x1237 <= 0) m.c203 = Constraint(expr= - 61.38*m.x92 - 61.38*m.x100 - 61.38*m.x105 - 61.38*m.x114 - 61.38*m.x133 - 60.96*m.x152 - 60.96*m.x171 - 55.54*m.x186 - 55.54*m.x204 - 55.54*m.x209 - 55.54*m.x234 - 27.82*m.x255 - 27.82*m.x260 - 27.82*m.x268 - 27.82*m.x294 - 24.08*m.x305 - 24.08*m.x323 - 24.08*m.x338 - 11.85*m.x347 - 11.85*m.x353 - 11.85*m.x370 - 11.85*m.x396 - 62.9*m.x407 - 62.9*m.x425 - 62.9*m.x430 - 62.9*m.x450 - 15.47*m.x461 - 15.47*m.x479 - 15.47*m.x484 - 15.47*m.x503 - 15.47*m.x523 - 50.04*m.x544 - 50.04*m.x549 - 50.04*m.x584 - 11.04*m.x605 - 11.04*m.x610 - 11.04*m.x619 - 11.04*m.x636 - 11.04*m.x656 - 41.31*m.x667 - 41.31*m.x673 - 41.31*m.x682 - 41.31*m.x702 - 7.08*m.x713 - 7.08*m.x718 - 7.08*m.x727 - 7.08*m.x744 + 3.66*m.x753 + 3.66*m.x771 + 3.66*m.x786 - 8.88*m.x795 - 8.88*m.x811 - 8.88*m.x840 + 1.35*m.x852 - 30.62*m.x863 - 30.62*m.x879 + 9.84*m.x894 + 9.84*m.x931 - 42.53*m.x968 - 55.85*m.x979 - 55.85*m.x995 - 59.97*m.x1021 - 59.97*m.x1027 - 59.97*m.x1049 - 61.38*m.x1063 - 55.54*m.x1084 - 11.85*m.x1113 - 15.47*m.x1137 - 41.31*m.x1168 + 3.66*m.x1187 - 8.88*m.x1197 - 55.85*m.x1237 <= 0) m.c204 = Constraint(expr= 14.46*m.x92 + 14.46*m.x100 + 14.46*m.x105 + 14.46*m.x114 + 14.46*m.x133 - 24.56*m.x152 - 24.56*m.x171 - 30.47*m.x186 - 30.47*m.x204 - 30.47*m.x209 - 30.47*m.x234 - 39.49*m.x255 - 39.49*m.x260 - 39.49*m.x268 - 39.49*m.x294 - 48.05*m.x305 - 48.05*m.x323 - 48.05*m.x338 - 32.22*m.x347 - 32.22*m.x353 - 32.22*m.x370 - 32.22*m.x396 - 20.6*m.x407 - 20.6*m.x425 - 20.6*m.x430 - 20.6*m.x450 + 1.56*m.x461 + 1.56*m.x479 + 1.56*m.x484 + 1.56*m.x503 + 1.56*m.x523 - 5.19*m.x544 - 5.19*m.x549 - 5.19*m.x584 + 18.16*m.x605 + 18.16*m.x610 + 18.16*m.x619 + 18.16*m.x636 + 18.16*m.x656 + 8.62*m.x667 + 8.62*m.x673 + 8.62*m.x682 + 8.62*m.x702 - 1.66*m.x713 - 1.66*m.x718 - 1.66*m.x727 - 1.66*m.x744 - 14.73*m.x753 - 14.73*m.x771 - 14.73*m.x786 - 44.72*m.x795 - 44.72*m.x811 - 44.72*m.x840 - 44.43*m.x852 - 40.71*m.x863 - 40.71*m.x879 - 45.9*m.x894 - 45.9*m.x931 + 5.14*m.x968 - 27.92*m.x979 - 27.92*m.x995 + 4.32*m.x1021 + 4.32*m.x1027 + 4.32*m.x1049 + 14.46*m.x1063 - 30.47*m.x1084 - 32.22*m.x1113 + 1.56*m.x1137 + 8.62*m.x1168 - 14.73*m.x1187 - 44.72*m.x1197 - 27.92*m.x1237 <= 0) m.c205 = Constraint(expr= - 17.87*m.x92 - 17.87*m.x100 - 17.87*m.x105 - 17.87*m.x114 - 17.87*m.x133 - 16.62*m.x152 - 16.62*m.x171 - 4.51*m.x186 - 4.51*m.x204 - 4.51*m.x209 - 4.51*m.x234 - 59.3*m.x255 - 59.3*m.x260 - 59.3*m.x268 - 59.3*m.x294 - 11.57*m.x305 - 11.57*m.x323 - 11.57*m.x338 - 3.27*m.x347 - 3.27*m.x353 - 3.27*m.x370 - 3.27*m.x396 - 60.92*m.x407 - 60.92*m.x425 - 60.92*m.x430 - 60.92*m.x450 - 39.79*m.x461 - 39.79*m.x479 - 39.79*m.x484 - 39.79*m.x503 - 39.79*m.x523 - 17.34*m.x544 - 17.34*m.x549 - 17.34*m.x584 + 5.46*m.x605 + 5.46*m.x610 + 5.46*m.x619 + 5.46*m.x636 + 5.46*m.x656 - 23.62*m.x667 - 23.62*m.x673 - 23.62*m.x682 - 23.62*m.x702 - 49.34*m.x713 - 49.34*m.x718 - 49.34*m.x727 - 49.34*m.x744 - 47.93*m.x753 - 47.93*m.x771 - 47.93*m.x786 - 58.66*m.x795 - 58.66*m.x811 - 58.66*m.x840 - 5.92*m.x852 - 27.6*m.x863 - 27.6*m.x879 - 12.63*m.x894 - 12.63*m.x931 + 10.33*m.x968 + 9.91*m.x979 + 9.91*m.x995 - 11.66*m.x1021 - 11.66*m.x1027 - 11.66*m.x1049 - 17.87*m.x1063 - 4.51*m.x1084 - 3.27*m.x1113 - 39.79*m.x1137 - 23.62*m.x1168 - 47.93*m.x1187 - 58.66*m.x1197 + 9.91*m.x1237 <= 0) m.c206 = Constraint(expr= - 49.59*m.x92 - 49.59*m.x100 - 49.59*m.x105 - 49.59*m.x114 - 49.59*m.x133 + 2.33*m.x152 + 2.33*m.x171 - 34.89*m.x186 - 34.89*m.x204 - 34.89*m.x209 - 34.89*m.x234 - 6.77*m.x255 - 6.77*m.x260 - 6.77*m.x268 - 6.77*m.x294 - 17.06*m.x305 - 17.06*m.x323 - 17.06*m.x338 - 50.85*m.x347 - 50.85*m.x353 - 50.85*m.x370 - 50.85*m.x396 + 1.03*m.x407 + 1.03*m.x425 + 1.03*m.x430 + 1.03*m.x450 - 50.15*m.x461 - 50.15*m.x479 - 50.15*m.x484 - 50.15*m.x503 - 50.15*m.x523 - 32.36*m.x544 - 32.36*m.x549 - 32.36*m.x584 - 53.61*m.x605 - 53.61*m.x610 - 53.61*m.x619 - 53.61*m.x636 - 53.61*m.x656 - 4.6*m.x667 - 4.6*m.x673 - 4.6*m.x682 - 4.6*m.x702 + 13.65*m.x713 + 13.65*m.x718 + 13.65*m.x727 + 13.65*m.x744 - 52.99*m.x753 - 52.99*m.x771 - 52.99*m.x786 - 25.58*m.x795 - 25.58*m.x811 - 25.58*m.x840 - 42.39*m.x852 - 64.68*m.x863 - 64.68*m.x879 + 13.13*m.x894 + 13.13*m.x931 - 14.15*m.x968 - 48.9*m.x979 - 48.9*m.x995 - 44.44*m.x1021 - 44.44*m.x1027 - 44.44*m.x1049 - 49.59*m.x1063 - 34.89*m.x1084 - 50.85*m.x1113 - 50.15*m.x1137 - 4.6*m.x1168 - 52.99*m.x1187 - 25.58*m.x1197 - 48.9*m.x1237 <= 0) m.c207 = Constraint(expr= - 25.2*m.x92 - 25.2*m.x100 - 25.2*m.x105 - 25.2*m.x114 - 25.2*m.x133 - 60.05*m.x152 - 60.05*m.x171 - 72.39*m.x186 - 72.39*m.x204 - 72.39*m.x209 - 72.39*m.x234 - 66.97*m.x255 - 66.97*m.x260 - 66.97*m.x268 - 66.97*m.x294 - 43.03*m.x305 - 43.03*m.x323 - 43.03*m.x338 - 4.26*m.x347 - 4.26*m.x353 - 4.26*m.x370 - 4.26*m.x396 - 65.71*m.x407 - 65.71*m.x425 - 65.71*m.x430 - 65.71*m.x450 - 48.43*m.x461 - 48.43*m.x479 - 48.43*m.x484 - 48.43*m.x503 - 48.43*m.x523 - 64.06*m.x544 - 64.06*m.x549 - 64.06*m.x584 - 17.62*m.x605 - 17.62*m.x610 - 17.62*m.x619 - 17.62*m.x636 - 17.62*m.x656 - 24.02*m.x667 - 24.02*m.x673 - 24.02*m.x682 - 24.02*m.x702 - 48.2*m.x713 - 48.2*m.x718 - 48.2*m.x727 - 48.2*m.x744 - 23.7*m.x753 - 23.7*m.x771 - 23.7*m.x786 - 23.26*m.x795 - 23.26*m.x811 - 23.26*m.x840 - 69.56*m.x852 - 3.87*m.x863 - 3.87*m.x879 - 56*m.x894 - 56*m.x931 - 36.04*m.x968 - 76.69*m.x979 - 76.69*m.x995 - 66.24*m.x1021 - 66.24*m.x1027 - 66.24*m.x1049 - 25.2*m.x1063 - 72.39*m.x1084 - 4.26*m.x1113 - 48.43*m.x1137 - 24.02*m.x1168 - 23.7*m.x1187 - 23.26*m.x1197 - 76.69*m.x1237 <= 0) m.c208 = Constraint(expr= 7.64*m.x92 + 7.64*m.x100 + 7.64*m.x105 + 7.64*m.x114 + 7.64*m.x133 - 48.49*m.x152 - 48.49*m.x171 + 2.87*m.x186 + 2.87*m.x204 + 2.87*m.x209 + 2.87*m.x234 - 67.43*m.x255 - 67.43*m.x260 - 67.43*m.x268 - 67.43*m.x294 - 47.58*m.x305 - 47.58*m.x323 - 47.58*m.x338 - 64.94*m.x347 - 64.94*m.x353 - 64.94*m.x370 - 64.94*m.x396 - 51.7*m.x407 - 51.7*m.x425 - 51.7*m.x430 - 51.7*m.x450 - 5.78*m.x461 - 5.78*m.x479 - 5.78*m.x484 - 5.78*m.x503 - 5.78*m.x523 - 29*m.x544 - 29*m.x549 - 29*m.x584 + 5.1*m.x605 + 5.1*m.x610 + 5.1*m.x619 + 5.1*m.x636 + 5.1*m.x656 - 49.05*m.x667 - 49.05*m.x673 - 49.05*m.x682 - 49.05*m.x702 - 44.44*m.x713 - 44.44*m.x718 - 44.44*m.x727 - 44.44*m.x744 - 53.66*m.x753 - 53.66*m.x771 - 53.66*m.x786 - 54.17*m.x795 - 54.17*m.x811 - 54.17*m.x840 - 10.83*m.x852 - 44.41*m.x863 - 44.41*m.x879 - 62.32*m.x894 - 62.32*m.x931 - 47.29*m.x968 - 26.82*m.x979 - 26.82*m.x995 + 3.15*m.x1021 + 3.15*m.x1027 + 3.15*m.x1049 + 7.64*m.x1063 + 2.87*m.x1084 - 64.94*m.x1113 - 5.78*m.x1137 - 49.05*m.x1168 - 53.66*m.x1187 - 54.17*m.x1197 - 26.82*m.x1237 <= 0) m.c209 = Constraint(expr= 3.72*m.x92 + 3.72*m.x100 + 3.72*m.x105 + 3.72*m.x114 + 3.72*m.x133 - 15.49*m.x152 - 15.49*m.x171 - 7.43*m.x186 - 7.43*m.x204 - 7.43*m.x209 - 7.43*m.x234 - 34.29*m.x255 - 34.29*m.x260 - 34.29*m.x268 - 34.29*m.x294 - 8.62*m.x305 - 8.62*m.x323 - 8.62*m.x338 - 35.41*m.x347 - 35.41*m.x353 - 35.41*m.x370 - 35.41*m.x396 - 5.46*m.x407 - 5.46*m.x425 - 5.46*m.x430 - 5.46*m.x450 + 8.67*m.x461 + 8.67*m.x479 + 8.67*m.x484 + 8.67*m.x503 + 8.67*m.x523 + 8.86*m.x544 + 8.86*m.x549 + 8.86*m.x584 - 12.61*m.x605 - 12.61*m.x610 - 12.61*m.x619 - 12.61*m.x636 - 12.61*m.x656 - 7.82*m.x667 - 7.82*m.x673 - 7.82*m.x682 - 7.82*m.x702 - 3.31*m.x713 - 3.31*m.x718 - 3.31*m.x727 - 3.31*m.x744 - 55.96*m.x753 - 55.96*m.x771 - 55.96*m.x786 - 5.55*m.x795 - 5.55*m.x811 - 5.55*m.x840 - 42.73*m.x852 + 13.44*m.x863 + 13.44*m.x879 + 0.23*m.x894 + 0.23*m.x931 - 59.75*m.x968 - 57.05*m.x979 - 57.05*m.x995 - 26.49*m.x1021 - 26.49*m.x1027 - 26.49*m.x1049 + 3.72*m.x1063 - 7.43*m.x1084 - 35.41*m.x1113 + 8.67*m.x1137 - 7.82*m.x1168 - 55.96*m.x1187 - 5.55*m.x1197 - 57.05*m.x1237 <= 0) m.c210 = Constraint(expr= - 9.41*m.x92 - 9.41*m.x100 - 9.41*m.x105 - 9.41*m.x114 - 9.41*m.x133 - 6.36*m.x152 - 6.36*m.x171 - 53.02*m.x186 - 53.02*m.x204 - 53.02*m.x209 - 53.02*m.x234 - 4.31*m.x255 - 4.31*m.x260 - 4.31*m.x268 - 4.31*m.x294 - 55.98*m.x305 - 55.98*m.x323 - 55.98*m.x338 - 18.57*m.x347 - 18.57*m.x353 - 18.57*m.x370 - 18.57*m.x396 - 8.18*m.x407 - 8.18*m.x425 - 8.18*m.x430 - 8.18*m.x450 + 2.77*m.x461 +
entity_id) task_key = entity.get('key') self.prefs[entity_key][task_key]['isFolded'] = not self.prefs[entity_key][task_key]['isFolded'] def page_fwd(self, *args, **kwargs): self.prefs['current_page'] += 1 def page_bkw(self, *args, **kwargs): self.prefs['current_page'] = max(self.prefs['current_page'] - 1, 0) def refresh(self, *args, **kwargs): pass class flameMenuPublisher(flameMenuApp): def __init__(self, framework, connector): flameMenuApp.__init__(self, framework) self.connector = connector # app defaults if not self.prefs.master.get(self.name): self.prefs['show_all'] = True self.prefs['current_page'] = 0 self.prefs['menu_max_items_per_page'] = 128 self.prefs['flame_bug_message_shown'] = False self.prefs['templates'] = default_templates # init values from default for entity_type in self.prefs['templates'].keys(): for template in self.prefs['templates'][entity_type].keys(): if isinstance(self.prefs['templates'][entity_type][template], dict): if 'default' in self.prefs['templates'][entity_type][template].keys(): self.prefs['templates'][entity_type][template]['value'] = self.prefs['templates'][entity_type][template]['default'] self.prefs['flame_export_presets'] = default_flame_export_presets self.prefs['poster_frame'] = 1 self.prefs['version_zero'] = False if not self.prefs_global.master.get(self.name): self.prefs_global['temp_files_list'] = [] self.flame_bug_message = False self.selected_clips = [] self.create_export_presets() self.progress = self.publish_progress_dialog() def __getattr__(self, name): def method(*args, **kwargs): entity = self.dynamic_menu_data.get(name) if entity: if entity.get('caller') == 'build_addremove_menu': self.show_bug_message() self.update_loader_list(entity) elif entity.get('caller') == 'flip_assigned_for_entity': self.show_bug_message() self.flip_assigned_for_entity(entity) elif entity.get('caller') == 'fold_step_entity': self.fold_step_entity(entity) elif entity.get('caller') == 'fold_task_entity': self.fold_task_entity(entity) elif entity.get('caller') == 'publish': self.publish(entity, args[0]) self.connector.bootstrap_toolkit() self.rescan() self.progress.hide() return method def create_uid(self): import uuid uid = ((str(uuid.uuid1()).replace('-', '')).upper()) return uid[:4] def scope_clip(self, selection): selected_clips = [] visibility = False for item in selection: if isinstance(item, (self.flame.PyClip)): selected_clips.append(item) visibility = True return visibility def build_menu(self): if not self.connector.sg_user: return None if not self.connector.sg_linked_project_id: return None batch_name = self.flame.batch.name.get_value() tasks = [] cached_tasks = self.connector.cache_retrive_result('current_tasks') if not isinstance(cached_tasks, list): return [] for cached_task in cached_tasks: if not cached_task.get('entity'): continue tasks.append(cached_task) entities_id_list = [task.get('entity').get('id') for task in tasks] add_menu_list = [] if (('additional menu ' + batch_name) in self.prefs.keys()) and self.prefs.get('additional menu ' + batch_name): add_menu_list = self.prefs.get('additional menu ' + batch_name) for index, stored_entity in enumerate(add_menu_list): stored_entity_type = stored_entity.get('type', 'Shot') stored_entity_id = stored_entity.get('id', 0) if not stored_entity_id in entities_id_list: add_menu_list.pop(index) if not add_menu_list: entity = {} for task in tasks: current_entity = task.get('entity') if current_entity: if current_entity.get('name') == batch_name: entity = current_entity break if entity: self.update_loader_list(entity) add_menu_list = self.prefs.get('additional menu ' + batch_name) else: self.prefs['additional menu ' + batch_name] = [] entity = {} for task in tasks: current_entity = task.get('entity') if current_entity: if current_entity.get('name') == batch_name: entity = current_entity break if entity: self.update_loader_list(entity) add_menu_list = self.prefs.get('additional menu ' + batch_name) menus = [] add_remove_menu = self.build_addremove_menu() # for action in add_remove_menu['actions']: # action['isVisible'] = self.scope_clip menus.append(add_remove_menu) for entity in add_menu_list: publish_menu = self.build_publish_menu(entity) if publish_menu: # for action in publish_menu['actions']: # action['isVisible'] = self.scope_clip menus.append(publish_menu) return menus def build_addremove_menu(self): if not self.connector.sg_user: return None if not self.connector.sg_linked_project: return None flame_project_name = self.flame.project.current_project.name batch_name = self.flame.batch.name.get_value() entities_to_mark = [] add_menu_list = self.prefs.get('additional menu ' + batch_name) for item in add_menu_list: entities_to_mark.append(item.get('id')) menu = {'actions': []} menu['name'] = self.menu_group_name + ' Add/Remove' menu_item = {} menu_item['name'] = '~ Rescan' menu_item['execute'] = self.rescan menu['actions'].append(menu_item) menu_item = {} if self.prefs['show_all']: menu_item['name'] = '~ Show Assigned Only' else: menu_item['name'] = '~ Show All' menu_item['execute'] = self.flip_assigned menu['actions'].append(menu_item) user_only = not self.prefs['show_all'] filter_out = ['Project', 'Sequence'] found_entities = self.get_entities(user_only, filter_out) if len(found_entities) == 0: menu_item = {} if self.prefs['show_all']: menu_item['name'] = ' '*4 + 'No tasks found' else: menu_item['name'] = ' '*4 + 'No assigned tasks found' menu_item['execute'] = self.rescan menu_item['isEnabled'] = False menu['actions'].append(menu_item) menu_ctrls_len = len(menu) menu_lenght = menu_ctrls_len menu_lenght += len(found_entities.keys()) for entity_type in found_entities.keys(): menu_lenght += len(found_entities.get(entity_type)) max_menu_lenght = self.prefs.get('menu_max_items_per_page') menu_main_body = [] for index, entity_type in enumerate(sorted(found_entities.keys())): menu_item = {} menu_item['name'] = '- [ ' + entity_type + 's ]' menu_item['execute'] = self.rescan menu_main_body.append(menu_item) entities_by_name = {} for entity in found_entities[entity_type]: entities_by_name[entity.get('code')] = entity for entity_name in sorted(entities_by_name.keys()): entity = entities_by_name.get(entity_name) menu_item = {} if entity.get('id') in entities_to_mark: menu_item['name'] = ' * ' + entity.get('code') else: menu_item['name'] = ' ' + entity.get('code') entity['caller'] = inspect.currentframe().f_code.co_name self.dynamic_menu_data[str(id(entity))] = entity menu_item['execute'] = getattr(self, str(id(entity))) menu_main_body.append(menu_item) if menu_lenght < max_menu_lenght: # controls and entites fits within menu size # we do not need additional page switch controls for menu_item in menu_main_body: menu['actions'].append(menu_item) else: # round up number of pages and get current page num_of_pages = ((menu_lenght) + max_menu_lenght - 1) // max_menu_lenght curr_page = self.prefs.get('current_page') # decorate top with move backward control # if we're not on the first page if curr_page > 0: menu_item = {} menu_item['name'] = '<<[ prev page ' + str(curr_page) + ' of ' + str(num_of_pages) + ' ]' menu_item['execute'] = self.page_bkw menu['actions'].append(menu_item) # calculate the start and end position of a window # and append items to the list menu_used_space = menu_ctrls_len + 2 # two more controls for page flip window_size = max_menu_lenght - menu_used_space start_index = window_size*curr_page + min(1*curr_page, 1) end_index = window_size*curr_page+window_size + ((curr_page+1) // num_of_pages) for menu_item in menu_main_body[start_index:end_index]: menu['actions'].append(menu_item) # decorate bottom with move forward control # if we're not on the last page if curr_page < (num_of_pages - 1): menu_item = {} menu_item['name'] = '[ next page ' + str(curr_page+2) + ' of ' + str(num_of_pages) + ' ]>>' menu_item['execute'] = self.page_fwd menu['actions'].append(menu_item) return menu def build_publish_menu(self, entity): if not entity.get('code'): entity['code'] = entity.get('name', 'no_name') entity_type = entity.get('type') entity_id = entity.get('id') entity_key = (entity_type, entity_id) if entity_key not in self.prefs.keys(): self.prefs[entity_key] = {} self.prefs[entity_key]['show_all'] = True cached_tasks_query = self.connector.async_cache.get('current_tasks') cached_tasks_by_entity = cached_tasks_query.get('by_entity') if cached_tasks_query else False tasks = cached_tasks_by_entity.get(entity_key, []) if cached_tasks_by_entity else [] cached_versions_query = self.connector.async_cache.get('current_versions') cached_versions_by_entity = cached_versions_query.get('by_entity') if cached_versions_query else False versions = cached_versions_by_entity.get(entity_key, []) if cached_versions_by_entity else [] cached_pbfiles_query = self.connector.async_cache.get('current_pbfiles') cached_pbfiles_by_entity = cached_pbfiles_query.get('by_entity') if cached_pbfiles_query else False pbfiles = cached_pbfiles_by_entity.get(entity_key, []) if cached_pbfiles_by_entity else [] if not self.connector.sg_human_user: human_user = {'id': 0} else: human_user = self.connector.sg_human_user menu = {} menu['name'] = self.menu_group_name + ' Publish ' + entity.get('code') + ':' menu['actions'] = [] menu_item = {} menu_item['name'] = '~ Rescan' menu_item['execute'] = self.rescan menu['actions'].append(menu_item) menu_item = {} show_all_entity = dict(entity) show_all_entity['caller'] = 'flip_assigned_for_entity' if self.prefs[entity_key]['show_all']: menu_item['name'] = '~ Show Assigned Only' else: menu_item['name'] = '~ Show All Tasks' self.dynamic_menu_data[str(id(show_all_entity))] = show_all_entity menu_item['execute'] = getattr(self, str(id(show_all_entity))) menu['actions'].append(menu_item) tasks_by_step = {} for task in tasks: task_assignees = task.get('task_assignees') user_ids = [] if task_assignees: for user in task_assignees: user_ids.append(user.get('id')) if not self.prefs[entity_key]['show_all']: if human_user.get('id') not in user_ids: continue step_name = task.get('step.Step.code') if not step_name: step_name = '' step_id = task.get('step.Step.id') if step_name not in tasks_by_step.keys(): tasks_by_step[step_name] = [] tasks_by_step[step_name].append(task) if len(tasks_by_step.values()) == 0: menu_item = {} if self.prefs[entity_key]['show_all']: menu_item['name'] = ' '*4 + 'No tasks found' else: menu_item['name'] = ' '*4 + 'No assigned tasks found' menu_item['execute'] = self.rescan menu_item['isEnabled'] = False menu['actions'].append(menu_item) current_steps = self.connector.async_cache.get('current_steps').get('result', dict()).values() entity_steps = [x for x in current_steps if x.get('entity_type') == entity_type] entity_steps_by_code = {step.get('code'):step for step in entity_steps} current_step_names = tasks_by_step.keys() current_step_order = [] for step in current_step_names: current_step_order.append(entity_steps_by_code.get(step).get('list_order')) for step_name in (x for _, x in sorted(zip(current_step_order, current_step_names))): step_key = ('Step', step_name) if step_key not in self.prefs[entity_key].keys(): self.prefs[entity_key][step_key] = {'isFolded': False} fold_step_entity = dict(entity) fold_step_entity['caller'] = 'fold_step_entity' fold_step_entity['key'] = step_key self.dynamic_menu_data[str(id(fold_step_entity))] = fold_step_entity menu_item = {} menu_item['execute'] = getattr(self, str(id(fold_step_entity))) if self.prefs[entity_key][step_key].get('isFolded') and len(tasks_by_step[step_name]) != 1: menu_item['name'] = '+ [ ' + step_name + ' ]' menu['actions'].append(menu_item) continue elif self.prefs[entity_key][step_key].get('isFolded') and tasks_by_step[step_name][0].get('content') != step_name: menu_item['name'] = '+ [ ' + step_name + ' ]' menu['actions'].append(menu_item) continue if len(tasks_by_step[step_name]) != 1: menu_item['name'] = '- [ ' + step_name + ' ]' menu['actions'].append(menu_item) elif tasks_by_step[step_name][0].get('content') != step_name: menu_item['name'] = '- [ ' + step_name + ' ]' menu['actions'].append(menu_item) for task in tasks_by_step[step_name]: task_key = ('Task', task.get('id')) if task_key not in self.prefs[entity_key].keys(): self.prefs[entity_key][task_key] = {'isFolded': False} fold_task_entity = dict(entity) fold_task_entity['caller'] = 'fold_task_entity' fold_task_entity['key'] = task_key self.dynamic_menu_data[str(id(fold_task_entity))] = fold_task_entity # fill in template fields from task task_Sequence = task.get('entity.Shot.sg_sequence', {}) task_Sequence_name = task_Sequence.get('name') task_Shot = entity.get('code') task_Asset = entity.get('code') task_sg_Asset_type = task.get('entity.Asset.sg_asset_type') task_Step = task.get('step.Step.code') task_Step_code = task.get('step.Step.short_name') task_name = task.get('content') menu_item = {} if (task_name == step_name) and (len(tasks_by_step[step_name]) == 1): if self.prefs[entity_key][task_key].get('isFolded'): menu_item['name'] = '+ [ ' + task_name + ' ]' else: menu_item['name'] = '- [ ' + task_name + ' ]' else: if
""" HKEX Simulation Message handle """ import selectors import struct from time import sleep from bitarray import bitarray from datetime import datetime from ocgmock .msg_type import MsgType from ocgmock .exec_type import ExecType from ocgmock .ord_status import OrdStatus from ocgmock .util.utils import Utils from ocgmock .util.crc32c import Crc32c class Message: def __init__(self, selector, sock, addr, keep_running, logger, config_map): self.selector = selector self.sock = sock self.addr = addr self._recv_buffer = b'' self._send_buffer = b'' self.request = b'' self.response_created = False self.send_seq_dict = {} self.receive_exp_next_seq_dict = {} self.order_seq_no = 1 self.exec_id_seq_no = 1 self.report_id_seq_no = 1 self.keep_running = keep_running self.logger = logger self.config_map = config_map self.crc = Crc32c(logger) self.clordid_orderid = {} def _set_selector_events_mask(self, mode): if mode == 'r': events = selectors.EVENT_READ elif mode == 'w': events = selectors.EVENT_WRITE elif mode == 'rw': events = selectors.EVENT_READ | selectors.EVENT_WRITE else: raise ValueError(f'Invalid events mask mode {repr(mode)}.') self.selector.modify(self.sock, events, data=self) def _read(self): try: data = self.sock.recv(4096) except BlockingIOError: self.logger.error('Read BlockingIOError') pass else: if data: self._recv_buffer += data else: self.selector.unregister(self.sock) self.sock.close() self.keep_running = False self.logger.error('Peer closed!') raise RuntimeError('Peer closed.') def _write(self): if self._send_buffer: # print('sending', repr(self._send_buffer), 'to', self.addr) ''' try: sent = self.sock.send(self._send_buffer) except BlockingIOError: self.logger.error('BlockingIOError') pass else: self.logger.info('Send length = %d bytes length = %d', sent, len(self._send_buffer)) self._send_buffer = self._send_buffer[sent:] ''' while len(self._send_buffer) > 3: plan_sent = struct.unpack('<H', self._send_buffer[1:3])[0] self.logger.info("@@@@@@@@@@@@@@@@@@@@********************* plan sent = %s, msg length = %d", plan_sent , len(self._send_buffer)) try: sent = self.sock.send(self._send_buffer[0:plan_sent]) sleep(0.01) except BlockingIOError: self.logger.error('BlockingIOError') pass else: self.logger.info('Send length = %d bytes length = %d', sent, len(self._send_buffer)) self._send_buffer = self._send_buffer[sent:] # if sent and not self._send_buffer: # self.close() def process_events(self, mask): self.logger.debug('process_events(), mask = %d', mask) if mask & selectors.EVENT_READ: self.read() if mask & selectors.EVENT_WRITE: self.write() def read(self): self._read() # response message self.process_request() def write(self): if self.request: # if not self.response_created: self.create_response() self._write() def close(self): self.logger.info('closing connection to %s', self.addr) try: if self.sock and (self.sock != -1): self.selector.unregister(self.sock) except ValueError as e1: pass except Exception as e: self.logger.exception('error: selector.unregister() exception for %s', self.addr) finally: self.sock = None def process_request(self): self.logger.info('process_request') data_len = struct.unpack('<H', self._recv_buffer[1:3])[0] if not len(self._recv_buffer) >= data_len: self.logger.info('======== truncate some data =======================') return data = self._recv_buffer[:data_len] self._recv_buffer = self._recv_buffer[data_len:] self.request = data msg_type = self.request[3] self.logger.info('received data from %s - msgType = %s', self.addr, msg_type) # Set selector to listen for write events, we're done reading. self._set_selector_events_mask('w') def create_response(self): msg_type = self.request[3] comp_id = self.request[10:22] self.logger.info("MsgType = %s", msg_type) # header 54 bytes, trailer 4 bytes msg_len = 58 comp_id_s = comp_id.decode('utf-8') present_map = bitarray(32 * 8, endian='big') present_map.setall(False) if msg_type == MsgType.LOGON: self.handle_logon(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.HEART_BEAT: self.handle_heartbeat(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.LOGOUT: self.handle_logout(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.TEST_REQUEST: self.handle_test_request(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.RESEND_REQUEST: self.handle_resend_request(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.LOOKUP_REQUEST: self.handle_lookup(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.NEW_ORDER_SINGLE: self.handle_new_order(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.ORDER_CANCEL_REQUEST: self.handle_cancel_request(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.ORDER_CANCEL_REPLACE_REQUEST: self.handle_amend_request(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.OBO_CANCEL_REQUEST: self.handle_obo_cancel_request(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.THROTTLE_ENTITLEMENT_REQUEST: self.handle_throttle_entitlement_request(present_map, msg_len, comp_id, comp_id_s) elif msg_type == MsgType.PARTY_ENTITLEMENT_REQUEST: self.handle_party_entitlement_request(present_map, msg_len, comp_id, comp_id_s) else: self.logger.info('un-implement for msg_type = ', msg_type) self.request = b'' self._set_selector_events_mask('r') return def encode_msg_header(self, msg_type, comp_id, comp_id_s, msg_len, present_map): seq_num = 1 self.logger.info('msg_type=%d comp_id = %s, comp_id type = %s', msg_type, comp_id, type(comp_id)) # ', present_map = ' , present_map, ', present_map type = ', type(present_map)) if comp_id_s in self.send_seq_dict: self.send_seq_dict[comp_id_s] += 1 else: self.send_seq_dict[comp_id_s] = seq_num seq_num = self.send_seq_dict[comp_id_s] return struct.pack('<bHBI2B12s32s', 2, msg_len, msg_type, seq_num, 0, 0, comp_id, present_map.tobytes()) def encode_msg_trailer(self, msg): ret = msg + struct.pack('<I', self.crc.calc_checksum(msg) & 0xFFFFFFFF) self.logger.info(Utils.print_binary(msg)) return ret def handle_throttle_entitlement_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Throttle Entitlement Response=' self.logger.info('Throttle Entitlement Request') resp_msg_type = MsgType.THROTTLE_ENTITLEMENT_RESPONSE pos = 54 user_req_id = self.request[pos: pos + 20] pos += 20 user_req_type = self.request[pos] pos += 1 user_name = self.request[pos: pos+50] self.logger.info('user_req_id=%s user_req_type = %d, user_name = %s', user_req_id.decode('utf-8'), user_req_type, user_name.decode('utf-8')) # generate the party entitlement report present_map[0], present_map[1], present_map[2] = 1, 1, 1 msg_body = struct.pack('<20s50sH', user_req_id, user_name, 1) msg_len += 20 + 50 + 2 thro_pre_map = bitarray(2 * 8, endian='big') thro_pre_map.setall(False) thro_pre_map[0], thro_pre_map[1], thro_pre_map[2], thro_pre_map[3], thro_pre_map[4] = 1, 1, 1, 1, 1 msg_body += struct.pack('<2sBBHBB', thro_pre_map.tobytes(), 2, 0, 8, 0, 0) msg_len += 2 + 1 + 1 + 2 + 1 + 1 message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_party_entitlement_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Party Entitlement Report=' self.logger.info('Party Entitlement Request') resp_msg_type = MsgType.PARTY_ENTITLEMENT_REPORT pos = 54 entitle_req_id = self.request[pos: pos + 21] # generate the party entitlement report present_map[0], present_map[1], present_map[2], present_map[3], present_map[4] = 1, 1, 1, 1, 1 present_map[5] = 1 entitle_report_id = datetime.now().strftime('%H%M%S') + format(str(self.report_id_seq_no), 's').zfill(5) self.report_id_seq_no += 1 broker_id = '1122' request_result, total_no_party_list, last_fragment = 0, 1, 1 msg_body = struct.pack('<21s21sHHB12s', entitle_report_id.encode('utf-8'), entitle_req_id, request_result , total_no_party_list, last_fragment, broker_id.encode('utf-8')) msg_len += 21 + 21 + 2 + 2 + 1 + 12 message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_amend_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Execution Report=' self.logger.info('Order Amend Request') resp_msg_type = MsgType.EXECUTION_REPORT msg_body = b'' message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_obo_cancel_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Execution Report=' self.logger.info('Order OBO Cancel Request') resp_msg_type = MsgType.EXECUTION_REPORT msg_body = b'' message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r') def handle_cancel_request(self, present_map, msg_len, comp_id, comp_id_s): mm = 'Execution Report=' self.logger.info('Order Cancel Request') resp_msg_type = MsgType.EXECUTION_REPORT utc_now = datetime.utcnow() transact_time = datetime.strftime(utc_now, '%Y%m%d %H:%M:%S.%f')[:-3] pm = bitarray(format(self.request[22], 'b').zfill(8), endian='big') pm.extend(format(self.request[23], 'b').zfill(8)) pm.extend(format(self.request[24], 'b').zfill(8)) pos = 54 cl_ord_id = self.request[pos: pos + 21] pos += 21 submitting_broker_id = self.request[pos: pos + 12] pos += 12 security_id = self.request[pos: pos + 21] pos += 21 security_id_source = self.request[pos] pos += 1 exch, broker_location_id = None, None if pm[4]: exch = self.request[pos: pos + 5] pos += 5 if pm[5]: broker_location_id = self.request[pos: pos + 11] pos += 11 ord_transact_time = self.request[pos: pos + 25] pos += 25 side = self.request[pos] pos += 1 orig_cl_ord_id = self.request[pos: pos + 21] pos += 21 order_id_in_req = None if pm[9]: order_id_in_req = self.request[pos: pos + 21].decode('utf-8') pos += 21 # msg_body = b'' symbol = security_id.decode('utf8').strip('\x00') self.logger.info(' --------------- securityId = %s ------------------', symbol) if symbol == '13': self.generate_business_reject(comp_id, comp_id_s, cl_ord_id, 13, struct.unpack('<I', self.request[4: 8])[0]) return present_map[0], present_map[1], present_map[2], present_map[3], present_map[4] = 1, 1, 1, 1, 1 msg_body = struct.pack('<21s12s21sB5s', cl_ord_id, submitting_broker_id, security_id, security_id_source, exch) if orig_cl_ord_id in self.clordid_orderid: order_id, order_qty, broker_location_id = self.clordid_orderid[orig_cl_ord_id] else: if order_id_in_req: order_id = order_id_in_req else: order_id = datetime.now().strftime('%H%M%S') + format(str(self.order_seq_no), 's').zfill(5) order_qty = None broker_location_id = None if broker_location_id: present_map[5] = 1 msg_body += struct.pack('<11s', broker_location_id) msg_len += 11 msg_len += 21 + 12 + 21 + 1 + 5 present_map[6], present_map[7], present_map[8], present_map[9], present_map[11] = 1, 1, 1, 1, 1 self.order_seq_no += 1 msg_body += struct.pack('<25sB21s21sB', transact_time.encode('utf-8'), side, orig_cl_ord_id , order_id.encode('utf-8'), b'2'[0]) msg_len += 25 + 1 + 21 + 21 + 1 if order_qty: present_map[13] = 1 msg_body += struct.pack('<Q', order_qty) msg_len += 8 present_map[21], present_map[22], present_map[23], present_map[24], present_map[25] = 1, 1, 1, 1, 1 exec_id = datetime.now().strftime('%H%M%S') + format(str(self.exec_id_seq_no), 's').zfill(5) self.exec_id_seq_no += 1 msg_body += struct.pack('<21sBcQQ', exec_id.encode('utf-8'), OrdStatus.CANCELLED, ExecType.CANCEL.value.encode('utf-8'), 0, 1000) msg_len += 21 + 1 + 1 + 8 + 8 message = self.encode_msg_trailer(self.encode_msg_header(resp_msg_type, comp_id, comp_id_s, msg_len, present_map) + msg_body) mm += Utils.print_binary(message) self.logger.info(mm) # message = b'Response ... ' self.response_created = True self._send_buffer += message self.request = b'' self._set_selector_events_mask('r')
<gh_stars>1-10 #!/usr/bin/env python """ WordAPI.py Copyright 2014 Wordnik, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ import sys import os from .models import * class WordApi(object): def __init__(self, apiClient): self.apiClient = apiClient def getExamples(self, word, **kwargs): """Returns examples for a word Args: word, str: Word to return examples for (required) includeDuplicates, str: Show duplicate examples from different sources (optional) useCanonical, str: If true will try to return the correct word root ('cats' -&gt; 'cat'). If false returns exactly what was requested. (optional) skip, int: Results to skip (optional) limit, int: Maximum number of results to return (optional) Returns: ExampleSearchResults """ allParams = ['word', 'includeDuplicates', 'useCanonical', 'skip', 'limit'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method getExamples" % key) params[key] = val del params['kwargs'] resourcePath = '/word.{format}/{word}/examples' resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('includeDuplicates' in params): queryParams['includeDuplicates'] = self.apiClient.toPathValue(params['includeDuplicates']) if ('useCanonical' in params): queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical']) if ('skip' in params): queryParams['skip'] = self.apiClient.toPathValue(params['skip']) if ('limit' in params): queryParams['limit'] = self.apiClient.toPathValue(params['limit']) if ('word' in params): replacement = str(self.apiClient.toPathValue(params['word'])) resourcePath = resourcePath.replace('{' + 'word' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'ExampleSearchResults') return responseObject def getWord(self, word, **kwargs): """Given a word as a string, returns the WordObject that represents it Args: word, str: String value of WordObject to return (required) useCanonical, str: If true will try to return the correct word root ('cats' -&gt; 'cat'). If false returns exactly what was requested. (optional) includeSuggestions, str: Return suggestions (for correct spelling, case variants, etc.) (optional) Returns: WordObject """ allParams = ['word', 'useCanonical', 'includeSuggestions'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method getWord" % key) params[key] = val del params['kwargs'] resourcePath = '/word.{format}/{word}' resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('useCanonical' in params): queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical']) if ('includeSuggestions' in params): queryParams['includeSuggestions'] = self.apiClient.toPathValue(params['includeSuggestions']) if ('word' in params): replacement = str(self.apiClient.toPathValue(params['word'])) resourcePath = resourcePath.replace('{' + 'word' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'WordObject') return responseObject def getDefinitions(self, word, **kwargs): """Return definitions for a word Args: word, str: Word to return definitions for (required) partOfSpeech, str: CSV list of part-of-speech types (optional) sourceDictionaries, str: Source dictionary to return definitions from. If 'all' is received, results are returned from all sources. If multiple values are received (e.g. 'century,wiktionary'), results are returned from the first specified dictionary that has definitions. If left blank, results are returned from the first dictionary that has definitions. By default, dictionaries are searched in this order: ahd, wiktionary, webster, century, wordnet (optional) limit, int: Maximum number of results to return (optional) includeRelated, str: Return related words with definitions (optional) useCanonical, str: If true will try to return the correct word root ('cats' -&gt; 'cat'). If false returns exactly what was requested. (optional) includeTags, str: Return a closed set of XML tags in response (optional) Returns: list[Definition] """ allParams = ['word', 'partOfSpeech', 'sourceDictionaries', 'limit', 'includeRelated', 'useCanonical', 'includeTags'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method getDefinitions" % key) params[key] = val del params['kwargs'] resourcePath = '/word.{format}/{word}/definitions' resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('limit' in params): queryParams['limit'] = self.apiClient.toPathValue(params['limit']) if ('partOfSpeech' in params): queryParams['partOfSpeech'] = self.apiClient.toPathValue(params['partOfSpeech']) if ('includeRelated' in params): queryParams['includeRelated'] = self.apiClient.toPathValue(params['includeRelated']) if ('sourceDictionaries' in params): queryParams['sourceDictionaries'] = self.apiClient.toPathValue(params['sourceDictionaries']) if ('useCanonical' in params): queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical']) if ('includeTags' in params): queryParams['includeTags'] = self.apiClient.toPathValue(params['includeTags']) if ('word' in params): replacement = str(self.apiClient.toPathValue(params['word'])) resourcePath = resourcePath.replace('{' + 'word' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'list[Definition]') return responseObject def getTopExample(self, word, **kwargs): """Returns a top example for a word Args: word, str: Word to fetch examples for (required) useCanonical, str: If true will try to return the correct word root ('cats' -&gt; 'cat'). If false returns exactly what was requested. (optional) Returns: Example """ allParams = ['word', 'useCanonical'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method getTopExample" % key) params[key] = val del params['kwargs'] resourcePath = '/word.{format}/{word}/topExample' resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('useCanonical' in params): queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical']) if ('word' in params): replacement = str(self.apiClient.toPathValue(params['word'])) resourcePath = resourcePath.replace('{' + 'word' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'Example') return responseObject def getRelatedWords(self, word, **kwargs): """Given a word as a string, returns relationships from the Word Graph Args: word, str: Word to fetch relationships for (required) relationshipTypes, str: Limits the total results per type of relationship type (optional) useCanonical, str: If true will try to return the correct word root ('cats' -&gt; 'cat'). If false returns exactly what was requested. (optional) limitPerRelationshipType, int: Restrict to the supplied relatinship types (optional) Returns: list[Related] """ allParams = ['word', 'relationshipTypes', 'useCanonical', 'limitPerRelationshipType'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method getRelatedWords" % key) params[key] = val del params['kwargs'] resourcePath = '/word.{format}/{word}/relatedWords' resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('useCanonical' in params): queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical']) if ('relationshipTypes' in params): queryParams['relationshipTypes'] = self.apiClient.toPathValue(params['relationshipTypes']) if ('limitPerRelationshipType' in params): queryParams['limitPerRelationshipType'] = self.apiClient.toPathValue(params['limitPerRelationshipType']) if ('word' in params): replacement = str(self.apiClient.toPathValue(params['word'])) resourcePath = resourcePath.replace('{' + 'word' + '}', replacement) postData = (params['body'] if 'body' in params else None) response = self.apiClient.callAPI(resourcePath, method, queryParams, postData, headerParams) if not response: return None responseObject = self.apiClient.deserialize(response, 'list[Related]') return responseObject def getTextPronunciations(self, word, **kwargs): """Returns text pronunciations for a given word Args: word, str: Word to get pronunciations for (required) sourceDictionary, str: Get from a single dictionary (optional) typeFormat, str: Text pronunciation type (optional) useCanonical, str: If true will try to return a correct word root ('cats' -&gt; 'cat'). If false returns exactly what was requested. (optional) limit, int: Maximum number of results to return (optional) Returns: list[TextPron] """ allParams = ['word', 'sourceDictionary', 'typeFormat', 'useCanonical', 'limit'] params = locals() for (key, val) in params['kwargs'].items(): if key not in allParams: raise TypeError("Got an unexpected keyword argument '%s' to method getTextPronunciations" % key) params[key] = val del params['kwargs'] resourcePath = '/word.{format}/{word}/pronunciations' resourcePath = resourcePath.replace('{format}', 'json') method = 'GET' queryParams = {} headerParams = {} if ('useCanonical' in params): queryParams['useCanonical'] = self.apiClient.toPathValue(params['useCanonical']) if ('sourceDictionary' in params): queryParams['sourceDictionary'] = self.apiClient.toPathValue(params['sourceDictionary']) if ('typeFormat' in params): queryParams['typeFormat'] = self.apiClient.toPathValue(params['typeFormat']) if ('limit' in params): queryParams['limit'] = self.apiClient.toPathValue(params['limit']) if ('word' in params): replacement = str(self.apiClient.toPathValue(params['word'])) resourcePath = resourcePath.replace('{' + 'word' + '}', replacement) postData = (params['body'] if 'body' in params else None) response
<reponame>Falsejoey/NabBot import asyncio import calendar import datetime as dt import random import re import time import urllib.parse from operator import attrgetter from typing import Optional import discord from discord.ext import commands from nabbot import NabBot from utils import checks from utils.config import config from utils.context import NabCtx from utils.database import get_server_property, userDatabase from utils.general import get_time_diff, join_list, get_brasilia_time_zone, global_online_list, get_local_timezone, log, \ is_numeric, get_user_avatar from utils.messages import html_to_markdown, get_first_image, split_message from utils.pages import Pages, CannotPaginate, VocationPages from utils.tibia import NetworkError, get_character, tibia_logo, get_share_range, get_voc_emoji, get_voc_abb, get_guild, \ url_house, get_stats, get_map_area, get_tibia_time_zone, get_world, tibia_worlds, get_world_bosses, get_recent_news, \ get_news_article, Character, url_guild, highscore_format, get_character_url, url_character, get_house, \ get_voc_abb_and_emoji, get_world_list, get_highscores, get_highscores_tibiadata from utils.tibiawiki import get_rashid_info FLAGS = {"North America": "🇺🇸", "South America": "🇧🇷", "Europe": "🇬🇧"} PVP = {"Optional PvP": "🕊️", "Hardcore PvP": "💀", "Open PvP": "⚔", "Retro Open PvP": "⚔", "Retro Hardcore PvP": "💀"} TRANSFERS = {"locked": "🔒", "blocked": "⛔"} class Tibia: """Commands related to Tibia, gathered from information present in Tibia.com""" def __init__(self, bot: NabBot): self.bot = bot self.news_announcements_task = self.bot.loop.create_task(self.scan_news()) async def __error(self, ctx: NabCtx, error): if isinstance(error, commands.UserInputError): await self.bot.show_help(ctx) # Commands @commands.command(aliases=['bless']) async def blessings(self, ctx: NabCtx, level: int): """Calculates the price of blessings for a specific level. For player over level 100, it will also display the cost of the Blessing of the Inquisition.""" if level < 1: await ctx.send("Very funny... Now tell me a valid level.") return bless_price = max(2000, 200 * (min(level, 120) - 20)) mountain_bless_price = max(2000, 200 * (min(level, 150) - 20)) inquisition = "" if level >= 100: inquisition = f"\nBlessing of the Inquisition costs **{int(bless_price*5*1.1):,}** gold coins." await ctx.send(f"At that level you will pay **{bless_price:,}** gold coins per blessing for a total of " f"**{bless_price*5:,}** gold coins.{inquisition}" f"\nMountain blessings cost **{mountain_bless_price:,}** each, for a total of " f"**{int(mountain_bless_price*2):,}**.") @commands.command() async def bosses(self, ctx: NabCtx, world=None): """Shows predictions for bosses.""" if world is None and not ctx.is_private and ctx.world: world = ctx.world elif world is None: await ctx.send("You need to tell me a world's name.") return world = world.title() if world not in tibia_worlds: await ctx.send("That world doesn't exist.") return bosses = await get_world_bosses(world) if type(bosses) is not dict: await ctx.send("Something went wrong") fields = {"High Chance": "", "Low Chance": "", "No Chance": "", "Unpredicted": ""} for boss, info in bosses.items(): try: if info["days"] > 1000: continue info["name"] = boss.title() fields[info["chance"]] += "{name} - {days:,} days.\n".format(**info) except KeyError: continue embed = discord.Embed(title=f"Bosses for {world}") if fields["High Chance"]: embed.add_field(name="High Chance - Last seen", value=fields["High Chance"]) if fields["Low Chance"]: embed.add_field(name="Low Chance - Last seen", value=fields["Low Chance"]) if ctx.long: if fields["No Chance"]: embed.add_field(name="No Chance - Expect in", value=fields["No Chance"]) if fields["Unpredicted"]: embed.add_field(name="Unpredicted - Last seen", value=fields["Unpredicted"]) else: ask_channel = ctx.ask_channel_name if ask_channel: askchannel_string = " or use #" + ask_channel else: askchannel_string = "" embed.set_footer(text="To see more, PM me{0}.".format(askchannel_string)) await ctx.send(embed=embed) @commands.group(aliases=['deathlist'], invoke_without_command=True, case_insensitive=True) async def deaths(self, ctx: NabCtx, *, name: str = None): """Shows a character's recent deaths. If this discord server is tracking a tibia world, it will show deaths registered to the character. Additionally, if no name is provided, all recent deaths will be shown.""" if name is None and ctx.is_lite: return permissions = ctx.bot_permissions if not permissions.embed_links: await ctx.send("Sorry, I need `Embed Links` permission for this command.") return if ctx.is_private: user_guilds = self.bot.get_user_guilds(ctx.author.id) user_worlds = self.bot.get_user_worlds(ctx.author.id) else: user_guilds = [ctx.guild] user_worlds = [self.bot.tracked_worlds.get(ctx.guild.id)] if user_worlds[0] is None and name is None: await ctx.send("This server is not tracking any tibia worlds.") return c = userDatabase.cursor() entries = [] author = None author_icon = discord.Embed.Empty count = 0 now = time.time() show_links = not ctx.long per_page = 20 if ctx.long else 5 try: if name is None: title = "Latest deaths" c.execute("SELECT char_deaths.level, date, name, user_id, byplayer, killer, world, vocation " "FROM char_deaths, chars " "WHERE char_id = id AND char_deaths.level > ? " "ORDER BY date DESC", (config.announce_threshold,)) while True: row = c.fetchone() if row is None: break user = self.bot.get_member(row["user_id"], user_guilds) if user is None: continue if row["world"] not in user_worlds: continue count += 1 row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"])) row["user"] = user.display_name row["emoji"] = get_voc_emoji(row["vocation"]) entries.append("{emoji} {name} (**@{user}**) - At level **{level}** by {killer} - *{time} ago*" .format(**row)) if count >= 100: break else: try: char = await get_character(name) if char is None: await ctx.send("That character doesn't exist.") return except NetworkError: await ctx.send("Sorry, I had trouble checking that character, try it again.") return deaths = char.deaths last_time = now name = char.name voc_emoji = get_voc_emoji(char.vocation) title = "{1} {0} latest deaths:".format(name, voc_emoji) if ctx.guild is not None and char.owner: owner: discord.Member = ctx.guild.get_member(char.owner) if owner is not None: author = owner.display_name author_icon = owner.avatar_url for death in deaths: last_time = death.time.timestamp() death_time = get_time_diff(dt.datetime.now(tz=dt.timezone.utc) - death.time) if death.by_player and show_links: killer = f"[{death.killer}]({Character.get_url(death.killer)})" elif death.by_player: killer = f"**{death.killer}**" else: killer = f"{death.killer}" entries.append("At level **{0.level}** by {name} - *{time} ago*".format(death, time=death_time, name=killer)) count += 1 c.execute("SELECT id, name FROM chars WHERE name LIKE ?", (name,)) result = c.fetchone() if result is not None and not ctx.is_lite: id = result["id"] c.execute("SELECT char_deaths.level, date, byplayer, killer " "FROM char_deaths " "WHERE char_id = ? AND date < ? " "ORDER BY date DESC", (id, last_time)) while True: row = c.fetchone() if row is None: break count += 1 row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"])) entries.append("At level **{level}** by {killer} - *{time} ago*".format(**row)) if count >= 100: break if count == 0: await ctx.send("There are no registered deaths.") return finally: c.close() pages = Pages(ctx, entries=entries, per_page=per_page) pages.embed.title = title pages.embed.set_author(name=author, icon_url=author_icon) try: await pages.paginate() except CannotPaginate as e: await ctx.send(e) @deaths.command(name="monster", aliases=["mob", "killer"]) @checks.is_in_tracking_world() async def deaths_monsters(self, ctx: NabCtx, *, name: str): """Shows the latest deaths caused by a specific monster.""" permissions = ctx.bot_permissions if not permissions.embed_links: await ctx.send("Sorry, I need `Embed Links` permission for this command.") return c = userDatabase.cursor() count = 0 entries = [] now = time.time() per_page = 20 if ctx.long else 5 if name[:1] in ["a", "e", "i", "o", "u"]: name_with_article = "an " + name else: name_with_article = "a " + name try: c.execute("SELECT char_deaths.level, date, name, user_id, byplayer, killer, vocation " "FROM char_deaths, chars " "WHERE char_id = id AND (killer LIKE ? OR killer LIKE ?) " "ORDER BY date DESC", (name, name_with_article)) while True: row = c.fetchone() if row is None: break user = self.bot.get_member(row["user_id"], ctx.guild) if user is None: continue count += 1 row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"])) row["user"] = user.display_name row["emoji"] = get_voc_emoji(row["vocation"]) entries.append("{emoji} {name} (**@{user}**) - At level **{level}** - *{time} ago*".format(**row)) if count >= 100: break if count == 0: await ctx.send("There are no registered deaths by that killer.") return finally: c.close() pages = Pages(ctx, entries=entries, per_page=per_page) pages.embed.title = f"{name.title()} latest kills" try: await pages.paginate() except CannotPaginate as e: await ctx.send(e) @deaths.command(name="user") @checks.is_in_tracking_world() async def deaths_user(self, ctx: NabCtx, *, name: str): """Shows a user's recent deaths on his/her registered characters.""" permissions = ctx.bot_permissions if not permissions.embed_links: await ctx.send("Sorry, I need `Embed Links` permission for this command.") return if ctx.is_private: user_servers = self.bot.get_user_guilds(ctx.author.id) user_worlds = self.bot.get_user_worlds(ctx.author.id) else: user_servers = [ctx.guild] user_worlds = [self.bot.tracked_worlds.get(ctx.guild.id)] if user_worlds[0] is None: await ctx.send("This server is not tracking any tibia worlds.") return user = self.bot.get_member(name, user_servers) if user is None: await ctx.send("I don't see any users with that name.") return c = userDatabase.cursor() count = 0 entries = [] now = time.time() per_page = 20 if ctx.long else 5 try: c.execute("SELECT name, world, char_deaths.level, killer, byplayer, date, vocation " "FROM chars, char_deaths " "WHERE char_id = id AND user_id = ? " "ORDER BY date DESC", (user.id,)) while True: row = c.fetchone() if row is None: break if row["world"] not in user_worlds: continue count += 1 row["time"] = get_time_diff(dt.timedelta(seconds=now - row["date"])) row["emoji"] = get_voc_emoji(row["vocation"]) entries.append("{emoji} {name} - At level **{level}** by {killer} - *{time} ago*".format(**row)) if count >= 100: break if count == 0: await ctx.send("There are not registered deaths by this user.") return finally: c.close() title = "{0} latest kills".format(user.display_name) icon_url =
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 22 11:39:43 2018 Demonstration of CPU implementation against the GPU one @authors: <NAME>, <NAME> """ import matplotlib.pyplot as plt import numpy as np import os import timeit from ccpi.filters.regularisers import ROF_TV, FGP_TV, SB_TV, TGV, LLT_ROF, FGP_dTV, NDF, DIFF4th from ccpi.filters.regularisers import PatchSelect from qualitymetrics import rmse ############################################################################### def printParametersToString(pars): txt = r'' for key, value in pars.items(): if key== 'algorithm' : txt += "{0} = {1}".format(key, value.__name__) elif key == 'input': txt += "{0} = {1}".format(key, np.shape(value)) elif key == 'refdata': txt += "{0} = {1}".format(key, np.shape(value)) else: txt += "{0} = {1}".format(key, value) txt += '\n' return txt ############################################################################### filename = os.path.join(".." , ".." , ".." , "data" ,"lena_gray_512.tif") # read image Im = plt.imread(filename) Im = np.asarray(Im, dtype='float32') Im = Im/255 perc = 0.05 u0 = Im + np.random.normal(loc = 0 , scale = perc * Im , size = np.shape(Im)) u_ref = Im + np.random.normal(loc = 0 , scale = 0.01 * Im , size = np.shape(Im)) # map the u0 u0->u0>0 # f = np.frompyfunc(lambda x: 0 if x < 0 else x, 1,1) u0 = u0.astype('float32') u_ref = u_ref.astype('float32') #%% print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print ("____________ROF-TV bench___________________") print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") ## plot fig = plt.figure() plt.suptitle('Comparison of ROF-TV regulariser using CPU and GPU implementations') a=fig.add_subplot(1,4,1) a.set_title('Noisy Image') imgplot = plt.imshow(u0,cmap="gray") # set parameters pars = {'algorithm': ROF_TV, \ 'input' : u0,\ 'regularisation_parameter':0.04,\ 'number_of_iterations': 4500,\ 'time_marching_parameter': 0.00002 } print ("#############ROF TV CPU####################") start_time = timeit.default_timer() rof_cpu = ROF_TV(pars['input'], pars['regularisation_parameter'], pars['number_of_iterations'], pars['time_marching_parameter'],'cpu') rms = rmse(Im, rof_cpu) pars['rmse'] = rms txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,2) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(rof_cpu, cmap="gray") plt.title('{}'.format('CPU results')) print ("##############ROF TV GPU##################") start_time = timeit.default_timer() rof_gpu = ROF_TV(pars['input'], pars['regularisation_parameter'], pars['number_of_iterations'], pars['time_marching_parameter'],'gpu') rms = rmse(Im, rof_gpu) pars['rmse'] = rms pars['algorithm'] = ROF_TV txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,3) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(rof_gpu, cmap="gray") plt.title('{}'.format('GPU results')) print ("--------Compare the results--------") tolerance = 1e-05 diff_im = np.zeros(np.shape(rof_cpu)) diff_im = abs(rof_cpu - rof_gpu) diff_im[diff_im > tolerance] = 1 a=fig.add_subplot(1,4,4) imgplot = plt.imshow(diff_im, vmin=0, vmax=1, cmap="gray") plt.title('{}'.format('Pixels larger threshold difference')) if (diff_im.sum() > 1): print ("Arrays do not match!") else: print ("Arrays match") #%% print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print ("____________FGP-TV bench___________________") print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") ## plot fig = plt.figure() plt.suptitle('Comparison of FGP-TV regulariser using CPU and GPU implementations') a=fig.add_subplot(1,4,1) a.set_title('Noisy Image') imgplot = plt.imshow(u0,cmap="gray") # set parameters pars = {'algorithm' : FGP_TV, \ 'input' : u0,\ 'regularisation_parameter':0.04, \ 'number_of_iterations' :1200 ,\ 'tolerance_constant':0.00001,\ 'methodTV': 0 ,\ 'nonneg': 0 ,\ 'printingOut': 0 } print ("#############FGP TV CPU####################") start_time = timeit.default_timer() fgp_cpu = FGP_TV(pars['input'], pars['regularisation_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['nonneg'], pars['printingOut'],'cpu') rms = rmse(Im, fgp_cpu) pars['rmse'] = rms txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,2) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(fgp_cpu, cmap="gray") plt.title('{}'.format('CPU results')) print ("##############FGP TV GPU##################") start_time = timeit.default_timer() fgp_gpu = FGP_TV(pars['input'], pars['regularisation_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['nonneg'], pars['printingOut'],'gpu') rms = rmse(Im, fgp_gpu) pars['rmse'] = rms pars['algorithm'] = FGP_TV txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,3) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(fgp_gpu, cmap="gray") plt.title('{}'.format('GPU results')) print ("--------Compare the results--------") tolerance = 1e-05 diff_im = np.zeros(np.shape(fgp_cpu)) diff_im = abs(fgp_cpu - fgp_gpu) diff_im[diff_im > tolerance] = 1 a=fig.add_subplot(1,4,4) imgplot = plt.imshow(diff_im, vmin=0, vmax=1, cmap="gray") plt.title('{}'.format('Pixels larger threshold difference')) if (diff_im.sum() > 1): print ("Arrays do not match!") else: print ("Arrays match") #%% print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print ("____________SB-TV bench___________________") print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") ## plot fig = plt.figure() plt.suptitle('Comparison of SB-TV regulariser using CPU and GPU implementations') a=fig.add_subplot(1,4,1) a.set_title('Noisy Image') imgplot = plt.imshow(u0,cmap="gray") # set parameters pars = {'algorithm' : SB_TV, \ 'input' : u0,\ 'regularisation_parameter':0.04, \ 'number_of_iterations' :150 ,\ 'tolerance_constant':1e-05,\ 'methodTV': 0 ,\ 'printingOut': 0 } print ("#############SB-TV CPU####################") start_time = timeit.default_timer() sb_cpu = SB_TV(pars['input'], pars['regularisation_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['printingOut'],'cpu') rms = rmse(Im, sb_cpu) pars['rmse'] = rms txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,2) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(sb_cpu, cmap="gray") plt.title('{}'.format('CPU results')) print ("##############SB TV GPU##################") start_time = timeit.default_timer() sb_gpu = SB_TV(pars['input'], pars['regularisation_parameter'], pars['number_of_iterations'], pars['tolerance_constant'], pars['methodTV'], pars['printingOut'],'gpu') rms = rmse(Im, sb_gpu) pars['rmse'] = rms pars['algorithm'] = SB_TV txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,3) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(sb_gpu, cmap="gray") plt.title('{}'.format('GPU results')) print ("--------Compare the results--------") tolerance = 1e-05 diff_im = np.zeros(np.shape(sb_cpu)) diff_im = abs(sb_cpu - sb_gpu) diff_im[diff_im > tolerance] = 1 a=fig.add_subplot(1,4,4) imgplot = plt.imshow(diff_im, vmin=0, vmax=1, cmap="gray") plt.title('{}'.format('Pixels larger threshold difference')) if (diff_im.sum() > 1): print ("Arrays do not match!") else: print ("Arrays match") #%% print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print ("____________TGV bench___________________") print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") ## plot fig = plt.figure() plt.suptitle('Comparison of TGV regulariser using CPU and GPU implementations') a=fig.add_subplot(1,4,1) a.set_title('Noisy Image') imgplot = plt.imshow(u0,cmap="gray") # set parameters pars = {'algorithm' : TGV, \ 'input' : u0,\ 'regularisation_parameter':0.04, \ 'alpha1':1.0,\ 'alpha0':0.7,\ 'number_of_iterations' :250 ,\ 'LipshitzConstant' :12 ,\ } print ("#############TGV CPU####################") start_time = timeit.default_timer() tgv_cpu = TGV(pars['input'], pars['regularisation_parameter'], pars['alpha1'], pars['alpha0'], pars['number_of_iterations'], pars['LipshitzConstant'],'cpu') rms = rmse(Im, tgv_cpu) pars['rmse'] = rms txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,2) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(tgv_cpu, cmap="gray") plt.title('{}'.format('CPU results')) print ("##############TGV GPU##################") start_time = timeit.default_timer() tgv_gpu = TGV(pars['input'], pars['regularisation_parameter'], pars['alpha1'], pars['alpha0'], pars['number_of_iterations'], pars['LipshitzConstant'],'gpu') rms = rmse(Im, tgv_gpu) pars['rmse'] = rms pars['algorithm'] = TGV txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,3) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(tgv_gpu, cmap="gray") plt.title('{}'.format('GPU results')) print ("--------Compare the results--------") tolerance = 1e-05 diff_im = np.zeros(np.shape(tgv_gpu)) diff_im = abs(tgv_cpu - tgv_gpu) diff_im[diff_im > tolerance] = 1 a=fig.add_subplot(1,4,4) imgplot = plt.imshow(diff_im, vmin=0, vmax=1, cmap="gray") plt.title('{}'.format('Pixels larger threshold difference')) if (diff_im.sum() > 1): print ("Arrays do not match!") else: print ("Arrays match") #%% print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print ("____________LLT-ROF bench___________________") print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") ## plot fig = plt.figure() plt.suptitle('Comparison of LLT-ROF regulariser using CPU and GPU implementations') a=fig.add_subplot(1,4,1) a.set_title('Noisy Image') imgplot = plt.imshow(u0,cmap="gray") # set parameters pars = {'algorithm' : LLT_ROF, \ 'input' : u0,\ 'regularisation_parameterROF':0.04, \ 'regularisation_parameterLLT':0.01, \ 'number_of_iterations' :4500 ,\ 'time_marching_parameter' :0.00002 ,\ } print ("#############LLT- ROF CPU####################") start_time = timeit.default_timer() lltrof_cpu = LLT_ROF(pars['input'], pars['regularisation_parameterROF'], pars['regularisation_parameterLLT'], pars['number_of_iterations'], pars['time_marching_parameter'],'cpu') rms = rmse(Im, lltrof_cpu) pars['rmse'] = rms txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,2) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(lltrof_cpu, cmap="gray") plt.title('{}'.format('CPU results')) print ("#############LLT- ROF GPU####################") start_time = timeit.default_timer() lltrof_gpu = LLT_ROF(pars['input'], pars['regularisation_parameterROF'], pars['regularisation_parameterLLT'], pars['number_of_iterations'], pars['time_marching_parameter'],'gpu') rms = rmse(Im, lltrof_gpu) pars['rmse'] = rms pars['algorithm'] = LLT_ROF txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,3) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(lltrof_gpu, cmap="gray") plt.title('{}'.format('GPU results')) print ("--------Compare the results--------") tolerance = 1e-05 diff_im = np.zeros(np.shape(lltrof_gpu)) diff_im = abs(lltrof_cpu - lltrof_gpu) diff_im[diff_im > tolerance] = 1 a=fig.add_subplot(1,4,4) imgplot = plt.imshow(diff_im, vmin=0, vmax=1, cmap="gray") plt.title('{}'.format('Pixels larger threshold difference')) if (diff_im.sum() > 1): print ("Arrays do not match!") else: print ("Arrays match") #%% print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print ("_______________NDF bench___________________") print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") ## plot fig = plt.figure() plt.suptitle('Comparison of NDF regulariser using CPU and GPU implementations') a=fig.add_subplot(1,4,1) a.set_title('Noisy Image') imgplot = plt.imshow(u0,cmap="gray") # set parameters pars = {'algorithm' : NDF, \ 'input' : u0,\ 'regularisation_parameter':0.06, \ 'edge_parameter':0.04,\ 'number_of_iterations' :1000 ,\ 'time_marching_parameter':0.025,\ 'penalty_type': 1 } print ("#############NDF CPU####################") start_time = timeit.default_timer() ndf_cpu = NDF(pars['input'], pars['regularisation_parameter'], pars['edge_parameter'], pars['number_of_iterations'], pars['time_marching_parameter'], pars['penalty_type'],'cpu') rms = rmse(Im, ndf_cpu) pars['rmse'] = rms txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,2) # these are matplotlib.patch.Patch properties props = dict(boxstyle='round', facecolor='wheat', alpha=0.75) # place a text box in upper left in axes coords a.text(0.15, 0.25, txtstr, transform=a.transAxes, fontsize=14, verticalalignment='top', bbox=props) imgplot = plt.imshow(ndf_cpu, cmap="gray") plt.title('{}'.format('CPU results')) print ("##############NDF GPU##################") start_time = timeit.default_timer() ndf_gpu = NDF(pars['input'], pars['regularisation_parameter'], pars['edge_parameter'], pars['number_of_iterations'], pars['time_marching_parameter'], pars['penalty_type'],'gpu') rms = rmse(Im, ndf_gpu) pars['rmse'] = rms pars['algorithm'] = NDF txtstr = printParametersToString(pars) txtstr += "%s = %.3fs" % ('elapsed time',timeit.default_timer() - start_time) print (txtstr) a=fig.add_subplot(1,4,3) # these are
<gh_stars>1-10 from __future__ import print_function, division __all__ = ['KanesMethod'] from sympy import Symbol, zeros, Matrix, diff, solve_linear_system_LU, eye from sympy.core.compatibility import reduce from sympy.utilities import default_sort_key from sympy.physics.mechanics.essential import ReferenceFrame, dynamicsymbols from sympy.physics.mechanics.particle import Particle from sympy.physics.mechanics.point import Point from sympy.physics.mechanics.rigidbody import RigidBody from sympy.physics.mechanics.functions import (inertia_of_point_mass, partial_velocity) class KanesMethod(object): """Kane's method object. This object is used to do the "book-keeping" as you go through and form equations of motion in the way Kane presents in: <NAME>., <NAME>. Dynamics Theory and Applications. 1985 McGraw-Hill The attributes are for equations in the form [M] udot = forcing. Attributes ========== auxiliary : Matrix If applicable, the set of auxiliary Kane's equations used to solve for non-contributing forces. mass_matrix : Matrix The system's mass matrix forcing : Matrix The system's forcing vector mass_matrix_full : Matrix The "mass matrix" for the u's and q's forcing_full : Matrix The "forcing vector" for the u's and q's Examples ======== This is a simple example for a one degree of freedom translational spring-mass-damper. In this example, we first need to do the kinematics. This involves creating generalized speeds and coordinates and their derivatives. Then we create a point and set its velocity in a frame:: >>> from sympy import symbols >>> from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame >>> from sympy.physics.mechanics import Point, Particle, KanesMethod >>> q, u = dynamicsymbols('q u') >>> qd, ud = dynamicsymbols('q u', 1) >>> m, c, k = symbols('m c k') >>> N = ReferenceFrame('N') >>> P = Point('P') >>> P.set_vel(N, u * N.x) Next we need to arrange/store information in the way that KanesMethod requires. The kinematic differential equations need to be stored in a dict. A list of forces/torques must be constructed, where each entry in the list is a (Point, Vector) or (ReferenceFrame, Vector) tuple, where the Vectors represent the Force or Torque. Next a particle needs to be created, and it needs to have a point and mass assigned to it. Finally, a list of all bodies and particles needs to be created:: >>> kd = [qd - u] >>> FL = [(P, (-k * q - c * u) * N.x)] >>> pa = Particle('pa', P, m) >>> BL = [pa] Finally we can generate the equations of motion. First we create the KanesMethod object and supply an inertial frame, coordinates, generalized speeds, and the kinematic differential equations. Additional quantities such as configuration and motion constraints, dependent coordinates and speeds, and auxiliary speeds are also supplied here (see the online documentation). Next we form FR* and FR to complete: Fr + Fr* = 0. We have the equations of motion at this point. It makes sense to rearrnge them though, so we calculate the mass matrix and the forcing terms, for E.o.M. in the form: [MM] udot = forcing, where MM is the mass matrix, udot is a vector of the time derivatives of the generalized speeds, and forcing is a vector representing "forcing" terms:: >>> KM = KanesMethod(N, q_ind=[q], u_ind=[u], kd_eqs=kd) >>> (fr, frstar) = KM.kanes_equations(FL, BL) >>> MM = KM.mass_matrix >>> forcing = KM.forcing >>> rhs = MM.inv() * forcing >>> rhs Matrix([[(-c*u(t) - k*q(t))/m]]) >>> KM.linearize()[0] Matrix([ [ 0, 1], [-k, -c]]) Please look at the documentation pages for more information on how to perform linearization and how to deal with dependent coordinates & speeds, and how do deal with bringing non-contributing forces into evidence. """ simp = True ___KDEqError = AttributeError('Create an instance of KanesMethod with' + 'kinematic differential equations to use' + 'this method.') def __init__(self, frame, q_ind, u_ind, kd_eqs=None, q_dependent=[], configuration_constraints=[], u_dependent=[], velocity_constraints=[], acceleration_constraints=None, u_auxiliary=[]): """Please read the online documentation. """ # Big storage things if not isinstance(frame, ReferenceFrame): raise TypeError('An intertial ReferenceFrame must be supplied') self._inertial = frame self._forcelist = None self._bodylist = None self._fr = None self._frstar = None self._rhs = None self._aux_eq = None # States self._q = None self._qdep = [] self._qdot = None self._u = None self._udep = [] self._udot = None self._uaux = None # Differential Equations Matrices and Map self._k_d = None self._f_d = None self._k_kqdot = None self._k_ku = None self._f_k = None self._qdot_u_map = None # Constraint Matrices self._f_h = Matrix([]) self._k_nh = Matrix([]) self._f_nh = Matrix([]) self._k_dnh = Matrix([]) self._f_dnh = Matrix([]) self._coords(q_ind, q_dependent, configuration_constraints) self._speeds(u_ind, u_dependent, velocity_constraints, acceleration_constraints, u_auxiliary) if kd_eqs is not None: self._kindiffeq(kd_eqs) def _find_dynamicsymbols(self, inlist, insyms=[]): """Finds all non-supplied dynamicsymbols in the expressions.""" from sympy.core.function import AppliedUndef, Derivative t = dynamicsymbols._t return reduce(set.union, [set([i]) for j in inlist for i in j.atoms(AppliedUndef, Derivative) if i.atoms() == set([t])], set()) - insyms temp_f = set().union(*[i.atoms(AppliedUndef) for i in inlist]) temp_d = set().union(*[i.atoms(Derivative) for i in inlist]) set_f = set([a for a in temp_f if a.args == (t,)]) set_d = set([a for a in temp_d if ((a.args[0] in set_f) and all([i == t for i in a.variables]))]) return list(set.union(set_f, set_d) - set(insyms)) def _find_othersymbols(self, inlist, insyms=[]): """Finds all non-dynamic symbols in the expressions.""" return list(reduce(set.union, [i.atoms(Symbol) for i in inlist]) - set(insyms)) def _mat_inv_mul(self, A, B): """Internal Function Computes A^-1 * B symbolically w/ substitution, where B is not necessarily a vector, but can be a matrix. """ r1, c1 = A.shape r2, c2 = B.shape temp1 = Matrix(r1, c1, lambda i, j: Symbol('x' + str(j) + str(r1 * i))) temp2 = Matrix(r2, c2, lambda i, j: Symbol('y' + str(j) + str(r2 * i))) for i in range(len(temp1)): if A[i] == 0: temp1[i] = 0 for i in range(len(temp2)): if B[i] == 0: temp2[i] = 0 temp3 = [] for i in range(c2): temp3.append(temp1.LDLsolve(temp2[:, i])) temp3 = Matrix([i.T for i in temp3]).T return temp3.subs(dict(list(zip(temp1, A)))).subs(dict(list(zip(temp2, B)))) def _coords(self, qind, qdep=[], coneqs=[]): """Supply all the generalized coordinates in a list. If some coordinates are dependent, supply them as part of qdep. Their dependent nature will only show up in the linearization process though. Parameters ========== qind : list A list of independent generalized coords qdep : list List of dependent coordinates coneq : list List of expressions which are equal to zero; these are the configuration constraint equations """ if not isinstance(qind, (list, tuple)): raise TypeError('Generalized coords. must be supplied in a list.') self._q = qind + qdep self._qdot = [diff(i, dynamicsymbols._t) for i in self._q] if not isinstance(qdep, (list, tuple)): raise TypeError('Dependent coordinates and constraints must each be ' 'provided in their own list.') if len(qdep) != len(coneqs): raise ValueError('There must be an equal number of dependent ' 'coordinates and constraints.') coneqs = Matrix(coneqs) self._qdep = qdep self._f_h = coneqs def _speeds(self, uind, udep=[], coneqs=[], diffconeqs=None, u_auxiliary=[]): """Supply all the generalized speeds in a list. If there are motion constraints or auxiliary speeds, they are provided here as well (as well as motion constraints). Parameters ========== uind : list A list of independent generalized speeds udep : list Optional list of dependent speeds coneqs : list Optional List of constraint expressions; these are expressions which are equal to zero which define a speed (motion) constraint. diffconeqs : list Optional, calculated automatically otherwise; list of constraint equations; again equal to zero, but define an acceleration constraint. u_auxiliary : list An optional list of auxiliary speeds used for brining non-contributing forces into evidence """ if not hasattr(uind, '__iter__'): raise TypeError('Supply generalized speeds in an iterable.') self._u = uind + udep self._udot = [diff(i, dynamicsymbols._t) for i in self._u] self._uaux = u_auxiliary if not hasattr(udep, '__iter__'): raise TypeError('Supply dependent speeds in an iterable.') if len(udep) != len(coneqs): raise ValueError('There must be an equal number of dependent ' 'speeds and constraints.') if diffconeqs is not None: if len(udep) != len(diffconeqs): raise ValueError('There must be an equal number of dependent ' 'speeds and constraints.') if len(udep) != 0: u = self._u uzero = dict(list(zip(u, [0] * len(u)))) coneqs = Matrix(coneqs) udot = self._udot udotzero = dict(list(zip(udot, [0] * len(udot)))) self._udep = udep self._f_nh = coneqs.subs(uzero) self._k_nh = (coneqs
- m.x542 - m.x554 == 0) m.c615 = Constraint(expr= m.x328 - m.x520 - m.x532 - m.x544 - m.x556 == 0) m.c616 = Constraint(expr= m.x331 - m.x522 - m.x534 - m.x546 - m.x558 == 0) m.c617 = Constraint(expr= - 712.572602172813*m.b2 + m.x417 - m.x837 >= -712.572602172813) m.c618 = Constraint(expr= - 712.572602172813*m.b3 + m.x421 - m.x839 >= -712.572602172813) m.c619 = Constraint(expr= - 712.572602172813*m.b4 + m.x425 - m.x841 >= -712.572602172813) m.c620 = Constraint(expr= - 712.572602172813*m.b5 + m.x429 - m.x843 >= -712.572602172813) m.c621 = Constraint(expr= - 712.572602172813*m.b6 + m.x433 - m.x845 >= -712.572602172813) m.c622 = Constraint(expr= - 712.572602172813*m.b7 + m.x437 - m.x847 >= -712.572602172813) m.c623 = Constraint(expr= - 851.700667228731*m.b8 + m.x443 - m.x837 >= -851.700667228731) m.c624 = Constraint(expr= - 851.700667228731*m.b9 + m.x449 - m.x839 >= -851.700667228731) m.c625 = Constraint(expr= - 851.700667228731*m.b10 + m.x455 - m.x841 >= -851.700667228731) m.c626 = Constraint(expr= - 851.700667228731*m.b11 + m.x461 - m.x843 >= -851.700667228731) m.c627 = Constraint(expr= - 851.700667228731*m.b12 + m.x467 - m.x845 >= -851.700667228731) m.c628 = Constraint(expr= - 851.700667228731*m.b13 + m.x473 - m.x847 >= -851.700667228731) m.c629 = Constraint(expr= - 851.700667228731*m.b14 + m.x479 - m.x837 >= -851.700667228731) m.c630 = Constraint(expr= - 851.700667228731*m.b15 + m.x485 - m.x839 >= -851.700667228731) m.c631 = Constraint(expr= - 851.700667228731*m.b16 + m.x491 - m.x841 >= -851.700667228731) m.c632 = Constraint(expr= - 851.700667228731*m.b17 + m.x497 - m.x843 >= -851.700667228731) m.c633 = Constraint(expr= - 851.700667228731*m.b18 + m.x503 - m.x845 >= -851.700667228731) m.c634 = Constraint(expr= - 851.700667228731*m.b19 + m.x509 - m.x847 >= -851.700667228731) m.c635 = Constraint(expr= - 851.700667228731*m.b20 + m.x515 - m.x837 >= -851.700667228731) m.c636 = Constraint(expr= - 851.700667228731*m.b21 + m.x521 - m.x839 >= -851.700667228731) m.c637 = Constraint(expr= - 851.700667228731*m.b22 + m.x527 - m.x841 >= -851.700667228731) m.c638 = Constraint(expr= - 851.700667228731*m.b23 + m.x533 - m.x843 >= -851.700667228731) m.c639 = Constraint(expr= - 851.700667228731*m.b24 + m.x539 - m.x845 >= -851.700667228731) m.c640 = Constraint(expr= - 851.700667228731*m.b25 + m.x545 - m.x847 >= -851.700667228731) m.c641 = Constraint(expr= - 851.700667228731*m.b26 + m.x551 - m.x837 >= -851.700667228731) m.c642 = Constraint(expr= - 851.700667228731*m.b27 + m.x557 - m.x839 >= -851.700667228731) m.c643 = Constraint(expr= - 851.700667228731*m.b28 - m.x841 + m.x915 >= -851.700667228731) m.c644 = Constraint(expr= - 851.700667228731*m.b29 - m.x843 + m.x918 >= -851.700667228731) m.c645 = Constraint(expr= - 851.700667228731*m.b30 - m.x845 + m.x921 >= -851.700667228731) m.c646 = Constraint(expr= - 851.700667228731*m.b31 - m.x847 + m.x924 >= -851.700667228731) m.c647 = Constraint(expr= - 851.700667228731*m.b32 - m.x837 + m.x927 >= -851.700667228731) m.c648 = Constraint(expr= - 851.700667228731*m.b33 - m.x839 + m.x930 >= -851.700667228731) m.c649 = Constraint(expr= - 851.700667228731*m.b34 + m.x93 - m.x841 >= -851.700667228731) m.c650 = Constraint(expr= - 851.700667228731*m.b35 + m.x96 - m.x843 >= -851.700667228731) m.c651 = Constraint(expr= - 851.700667228731*m.b36 + m.x99 - m.x845 >= -851.700667228731) m.c652 = Constraint(expr= - 851.700667228731*m.b37 + m.x102 - m.x847 >= -851.700667228731) m.c653 = Constraint(expr= - 851.700667228731*m.b38 + m.x105 - m.x837 >= -851.700667228731) m.c654 = Constraint(expr= - 851.700667228731*m.b39 + m.x108 - m.x839 >= -851.700667228731) m.c655 = Constraint(expr= - 851.700667228731*m.b40 + m.x111 - m.x841 >= -851.700667228731) m.c656 = Constraint(expr= - 851.700667228731*m.b41 + m.x114 - m.x843 >= -851.700667228731) m.c657 = Constraint(expr= - 851.700667228731*m.b42 + m.x117 - m.x845 >= -851.700667228731) m.c658 = Constraint(expr= - 851.700667228731*m.b43 + m.x120 - m.x847 >= -851.700667228731) m.c659 = Constraint(expr= - 712.572602172813*m.b44 + m.x122 - m.x837 >= -712.572602172813) m.c660 = Constraint(expr= - 712.572602172813*m.b45 + m.x124 - m.x839 >= -712.572602172813) m.c661 = Constraint(expr= - 712.572602172813*m.b46 + m.x126 - m.x841 >= -712.572602172813) m.c662 = Constraint(expr= - 712.572602172813*m.b47 + m.x128 - m.x843 >= -712.572602172813) m.c663 = Constraint(expr= - 712.572602172813*m.b48 + m.x130 - m.x845 >= -712.572602172813) m.c664 = Constraint(expr= - 712.572602172813*m.b49 + m.x132 - m.x847 >= -712.572602172813) m.c665 = Constraint(expr= - 925.825187656153*m.b50 + m.x134 - m.x848 >= -925.825187656153) m.c666 = Constraint(expr= - 925.825187656153*m.b51 + m.x136 - m.x849 >= -925.825187656153) m.c667 = Constraint(expr= - 925.825187656153*m.b52 + m.x138 - m.x850 >= -925.825187656153) m.c668 = Constraint(expr= - 925.825187656153*m.b53 + m.x140 - m.x851 >= -925.825187656153) m.c669 = Constraint(expr= - 925.825187656153*m.b54 + m.x142 - m.x852 >= -925.825187656153) m.c670 = Constraint(expr= - 925.825187656153*m.b55 + m.x144 - m.x853 >= -925.825187656153) m.c671 = Constraint(expr= - 925.825187656153*m.b56 + m.x146 - m.x848 >= -925.825187656153) m.c672 = Constraint(expr= - 925.825187656153*m.b57 + m.x148 - m.x849 >= -925.825187656153) m.c673 = Constraint(expr= - 925.825187656153*m.b58 + m.x150 - m.x850 >= -925.825187656153) m.c674 = Constraint(expr= - 925.825187656153*m.b59 + m.x152 - m.x851 >= -925.825187656153) m.c675 = Constraint(expr= - 925.825187656153*m.b60 + m.x154 - m.x852 >= -925.825187656153) m.c676 = Constraint(expr= - 925.825187656153*m.b61 + m.x156 - m.x853 >= -925.825187656153) m.c677 = Constraint(expr= - 925.825187656153*m.b62 + m.x158 - m.x848 >= -925.825187656153) m.c678 = Constraint(expr= - 925.825187656153*m.b63 + m.x160 - m.x849 >= -925.825187656153) m.c679 = Constraint(expr= - 925.825187656153*m.b64 + m.x162 - m.x850 >= -925.825187656153) m.c680 = Constraint(expr= - 925.825187656153*m.b65 + m.x164 - m.x851 >= -925.825187656153) m.c681 = Constraint(expr= - 925.825187656153*m.b66 + m.x166 - m.x852 >= -925.825187656153) m.c682 = Constraint(expr= - 925.825187656153*m.b67 + m.x168 - m.x853 >= -925.825187656153) m.c683 = Constraint(expr= - 925.825187656502*m.b68 + m.x170 - m.x848 >= -925.825187656502) m.c684 = Constraint(expr= - 925.825187656502*m.b69 + m.x172 - m.x849 >= -925.825187656502) m.c685 = Constraint(expr= - 925.825187656502*m.b70 + m.x174 - m.x850 >= -925.825187656502) m.c686 = Constraint(expr= - 925.825187656502*m.b71 + m.x176 - m.x851 >= -925.825187656502) m.c687 = Constraint(expr= - 925.825187656502*m.b72 + m.x178 - m.x852 >= -925.825187656502) m.c688 = Constraint(expr= - 925.825187656502*m.b73 + m.x180 - m.x853 >= -925.825187656502) m.c689 = Constraint(expr= 447.864247971*m.b2 + m.x417 - m.x837 <= 447.864247971) m.c690 = Constraint(expr= 447.864247971*m.b3 + m.x421 - m.x839 <= 447.864247971) m.c691 = Constraint(expr= 447.864247971*m.b4 + m.x425 - m.x841 <= 447.864247971) m.c692 = Constraint(expr= 447.864247971*m.b5 + m.x429 - m.x843 <= 447.864247971) m.c693 = Constraint(expr= 447.864247971*m.b6 + m.x433 - m.x845 <= 447.864247971) m.c694 = Constraint(expr= 447.864247971*m.b7 + m.x437 - m.x847 <= 447.864247971) m.c695 = Constraint(expr= 672.20455381568*m.b8 + m.x443 - m.x837 <= 672.20455381568) m.c696 = Constraint(expr= 672.20455381568*m.b9 + m.x449 - m.x839 <= 672.20455381568) m.c697 = Constraint(expr= 672.20455381568*m.b10 + m.x455 - m.x841 <= 672.20455381568) m.c698 = Constraint(expr= 672.20455381568*m.b11 + m.x461 - m.x843 <= 672.20455381568) m.c699 = Constraint(expr= 672.20455381568*m.b12 + m.x467 - m.x845 <= 672.20455381568) m.c700 = Constraint(expr= 672.20455381568*m.b13 + m.x473 - m.x847 <= 672.20455381568) m.c701 = Constraint(expr= 672.20455381568*m.b14 + m.x479 - m.x837 <= 672.20455381568) m.c702 = Constraint(expr= 672.20455381568*m.b15 + m.x485 - m.x839 <= 672.20455381568) m.c703 = Constraint(expr= 672.20455381568*m.b16 + m.x491 - m.x841 <= 672.20455381568) m.c704 = Constraint(expr= 672.20455381568*m.b17 + m.x497 - m.x843 <= 672.20455381568) m.c705 = Constraint(expr= 672.20455381568*m.b18 + m.x503 - m.x845 <= 672.20455381568) m.c706 = Constraint(expr= 672.20455381568*m.b19 + m.x509 - m.x847 <= 672.20455381568) m.c707 = Constraint(expr= 672.20455381568*m.b20 + m.x515 - m.x837 <= 672.20455381568) m.c708 = Constraint(expr= 672.20455381568*m.b21 + m.x521 - m.x839 <= 672.20455381568) m.c709 = Constraint(expr= 672.20455381568*m.b22 + m.x527 - m.x841 <= 672.20455381568) m.c710 = Constraint(expr= 672.20455381568*m.b23 + m.x533 - m.x843 <= 672.20455381568) m.c711 = Constraint(expr= 672.20455381568*m.b24 + m.x539 - m.x845 <= 672.20455381568) m.c712 = Constraint(expr= 672.20455381568*m.b25 + m.x545 - m.x847 <= 672.20455381568) m.c713 = Constraint(expr= 672.20455381568*m.b26 + m.x551 - m.x837 <= 672.20455381568) m.c714 = Constraint(expr= 672.20455381568*m.b27 + m.x557 - m.x839 <= 672.20455381568) m.c715 = Constraint(expr= 672.20455381568*m.b28 - m.x841 + m.x915 <= 672.20455381568) m.c716 = Constraint(expr= 672.20455381568*m.b29 - m.x843 + m.x918 <= 672.20455381568) m.c717 = Constraint(expr= 672.20455381568*m.b30 - m.x845 + m.x921 <= 672.20455381568) m.c718 = Constraint(expr= 672.20455381568*m.b31 - m.x847 + m.x924 <= 672.20455381568) m.c719 = Constraint(expr= 672.20455381568*m.b32 - m.x837 + m.x927 <= 672.20455381568) m.c720 = Constraint(expr= 672.20455381568*m.b33 - m.x839 + m.x930 <= 672.20455381568) m.c721 = Constraint(expr= 672.20455381568*m.b34 + m.x93 - m.x841 <= 672.20455381568) m.c722 = Constraint(expr= 672.20455381568*m.b35 + m.x96 - m.x843 <= 672.20455381568) m.c723 = Constraint(expr= 672.20455381568*m.b36 + m.x99 - m.x845 <= 672.20455381568) m.c724 = Constraint(expr= 672.20455381568*m.b37 + m.x102 - m.x847 <= 672.20455381568) m.c725 = Constraint(expr= 672.20455381568*m.b38 + m.x105 - m.x837 <= 672.20455381568) m.c726 = Constraint(expr= 672.20455381568*m.b39 + m.x108 - m.x839 <= 672.20455381568) m.c727 = Constraint(expr= 672.20455381568*m.b40 + m.x111 - m.x841 <= 672.20455381568) m.c728 = Constraint(expr= 672.20455381568*m.b41 + m.x114 - m.x843 <= 672.20455381568) m.c729 = Constraint(expr= 672.20455381568*m.b42 + m.x117 - m.x845 <= 672.20455381568) m.c730 = Constraint(expr= 672.20455381568*m.b43 + m.x120 - m.x847 <= 672.20455381568) m.c731 = Constraint(expr= 447.864247971*m.b44 + m.x122 - m.x837 <= 447.864247971) m.c732 = Constraint(expr= 447.864247971*m.b45 + m.x124 - m.x839 <= 447.864247971) m.c733 = Constraint(expr= 447.864247971*m.b46 + m.x126 - m.x841 <= 447.864247971) m.c734 = Constraint(expr= 447.864247971*m.b47 + m.x128 - m.x843 <= 447.864247971) m.c735 = Constraint(expr= 447.864247971*m.b48 + m.x130 - m.x845 <= 447.864247971) m.c736 = Constraint(expr= 447.864247971*m.b49 + m.x132 - m.x847 <= 447.864247971) m.c737 = Constraint(expr= 1106.777870451*m.b50 + m.x134 - m.x848 <= 1106.777870451) m.c738 = Constraint(expr= 1106.777870451*m.b51 + m.x136 - m.x849 <= 1106.777870451) m.c739 = Constraint(expr= 1106.777870451*m.b52 + m.x138 - m.x850 <= 1106.777870451) m.c740 = Constraint(expr= 1106.777870451*m.b53 + m.x140 - m.x851 <= 1106.777870451) m.c741 = Constraint(expr= 1106.777870451*m.b54 + m.x142 - m.x852 <= 1106.777870451) m.c742 = Constraint(expr= 1106.777870451*m.b55 + m.x144 - m.x853 <= 1106.777870451) m.c743 = Constraint(expr= 1106.777870451*m.b56 + m.x146 - m.x848 <= 1106.777870451) m.c744 = Constraint(expr= 1106.777870451*m.b57 + m.x148 - m.x849 <= 1106.777870451) m.c745 = Constraint(expr= 1106.777870451*m.b58 + m.x150 - m.x850 <= 1106.777870451) m.c746 = Constraint(expr= 1106.777870451*m.b59 + m.x152 - m.x851 <= 1106.777870451) m.c747 = Constraint(expr= 1106.777870451*m.b60 + m.x154 -
# flake8: noqa """ Tests copied from cpython test suite: https://github.com/python/cpython/blob/3.9/Lib/test/test_complex.py """ # stdlib from math import atan2 from math import copysign from math import isnan import operator from random import random import sys import unittest # third party import pytest # syft absolute from syft.lib.python.complex import Complex INF = float("inf") NAN = float("nan") VALID_UNDERSCORE_LITERALS = [ "0_0_0", "4_2", "1_0000_0000", "0b1001_0100", "0xffff_ffff", "0o5_7_7", "1_00_00.5", "1_00_00.5e5", "1_00_00e5_1", "1e1_0", ".1_4", ".1_4e1", "0b_0", "0x_f", "0o_5", "1_00_00j", "1_00_00.5j", "1_00_00e5_1j", ".1_4j", "(1_2.5+3_3j)", "(.5_6j)", ] INVALID_UNDERSCORE_LITERALS = [ # Trailing underscores: "0_", "42_", "1.4j_", "0x_", "0b1_", "0xf_", "0o5_", "0 if 1_Else 1", # Underscores in the base selector: "0_b0", "0_xf", "0_o5", # Old-style octal, still disallowed: "0_7", "09_99", # Multiple consecutive underscores: "4_______2", "0.1__4", "0.1__4j", "0b1001__0100", "0xffff__ffff", "0x___", "0o5__77", "1e1__0", "1e1__0j", # Underscore right before a dot: "1_.4", "1_.4j", # Underscore right after a dot: "1._4", "1._4j", "._5", "._5j", # Underscore right after a sign: "1.0e+_1", "1.0e+_1j", # Underscore right before j: "1.4_j", "1.4e5_j", # Underscore right before e: "1_e1", "1.4_e1", "1.4_e1j", # Underscore right after e: "1e_1", "1.4e_1", "1.4e_1j", # Complex cases with parens: "(1+1.5_j_)", "(1+1.5_j)", ] class ComplexTest(unittest.TestCase): @staticmethod def assertIs(a, b): assert a == b def assertAlmostEqual(self, a, b): if isinstance(a, complex): if isinstance(b, complex): unittest.TestCase.assertAlmostEqual(self, a.real, b.real) unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag) else: unittest.TestCase.assertAlmostEqual(self, a.real, b) unittest.TestCase.assertAlmostEqual(self, a.imag, 0.0) else: if isinstance(b, complex): unittest.TestCase.assertAlmostEqual(self, a, b.real) unittest.TestCase.assertAlmostEqual(self, 0.0, b.imag) else: unittest.TestCase.assertAlmostEqual(self, a, b) def assertCloseAbs(self, x, y, eps=1e-9): """Return true iff floats x and y "are close".""" # put the one with larger magnitude second if abs(x) > abs(y): x, y = y, x if y == 0: return abs(x) < eps if x == 0: return abs(y) < eps # check that relative difference < eps self.assertTrue(abs((x - y) / y) < eps) def assertFloatsAreIdentical(self, x, y): """assert that floats x and y are identical, in the sense that: (1) both x and y are nans, or (2) both x and y are infinities, with the same sign, or (3) both x and y are zeros, with the same sign, or (4) x and y are both finite and nonzero, and x == y """ msg = "floats {!r} and {!r} are not identical" if isnan(x) or isnan(y): if isnan(x) and isnan(y): return elif x == y: if x != 0.0: return # both zero; check that signs match elif copysign(1.0, x) == copysign(1.0, y): return else: msg += ": zeros have different signs" self.fail(msg.format(x, y)) def assertClose(self, x, y, eps=1e-9): """Return true iff complexes x and y "are close".""" self.assertCloseAbs(x.real, y.real, eps) self.assertCloseAbs(x.imag, y.imag, eps) def check_div(self, x, y): """Compute Complex z=x*y, and check that z/x==y and z/y==x.""" z = x * y if x != 0: q = z / x self.assertClose(q, y) q = z.__truediv__(x) self.assertClose(q, y) if y != 0: q = z / y self.assertClose(q, x) q = z.__truediv__(y) self.assertClose(q, x) @pytest.mark.slow def test_truediv(self): simple_real = [float(i) for i in range(-5, 6)] simple_complex = [Complex(x, y) for x in simple_real for y in simple_real] for x in simple_complex: for y in simple_complex: self.check_div(x, y) # A naive Complex division algorithm (such as in 2.0) is very prone to # nonsense errors for these (overflows and underflows). self.check_div(Complex(1e200, 1e200), 1 + 0j) self.check_div(Complex(1e-200, 1e-200), 1 + 0j) # Just for fun. for i in range(100): self.check_div(Complex(random(), random()), Complex(random(), random())) self.assertRaises(ZeroDivisionError, Complex.__truediv__, 1 + 1j, 0 + 0j) self.assertRaises(OverflowError, pow, 1e200 + 1j, 1e200 + 1j) self.assertAlmostEqual(Complex.__truediv__(2 + 0j, 1 + 1j), 1 - 1j) self.assertRaises(ZeroDivisionError, Complex.__truediv__, 1 + 1j, 0 + 0j) for denom_real, denom_imag in [(0, NAN), (NAN, 0), (NAN, NAN)]: z = Complex(0, 0) / Complex(denom_real, denom_imag) self.assertTrue(isnan(z.real)) self.assertTrue(isnan(z.imag)) def test_floordiv(self): self.assertRaises(TypeError, Complex.__floordiv__, 3 + 0j, 1.5 + 0j) self.assertRaises(TypeError, Complex.__floordiv__, 3 + 0j, 0 + 0j) def test_richcompare(self): self.assertIs(Complex.__eq__(1 + 1j, 1 << 10000), False) self.assertIs(Complex.__lt__(1 + 1j, None), NotImplemented) self.assertIs(Complex.__eq__(1 + 1j, 1 + 1j), True) self.assertIs(Complex.__eq__(1 + 1j, 2 + 2j), False) self.assertIs(Complex.__ne__(1 + 1j, 1 + 1j), False) self.assertIs(Complex.__ne__(1 + 1j, 2 + 2j), True) for i in range(1, 100): f = i / 100.0 self.assertIs(Complex.__eq__(f + 0j, f), True) self.assertIs(Complex.__ne__(f + 0j, f), False) self.assertIs(Complex.__eq__(Complex(f, f), f), False) self.assertIs(Complex.__ne__(Complex(f, f), f), True) self.assertIs(Complex.__lt__(1 + 1j, 2 + 2j), NotImplemented) self.assertIs(Complex.__le__(1 + 1j, 2 + 2j), NotImplemented) self.assertIs(Complex.__gt__(1 + 1j, 2 + 2j), NotImplemented) self.assertIs(Complex.__ge__(1 + 1j, 2 + 2j), NotImplemented) self.assertRaises(TypeError, operator.lt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.le, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.gt, 1 + 1j, 2 + 2j) self.assertRaises(TypeError, operator.ge, 1 + 1j, 2 + 2j) self.assertIs(operator.eq(1 + 1j, 1 + 1j), True) self.assertIs(operator.eq(1 + 1j, 2 + 2j), False) self.assertIs(operator.ne(1 + 1j, 1 + 1j), False) self.assertIs(operator.ne(1 + 1j, 2 + 2j), True) @pytest.mark.slow def test_richcompare_boundaries(self): def check(n, deltas, is_equal, imag=0.0): for delta in deltas: i = n + delta z = Complex(i, imag) self.assertIs(Complex.__eq__(z, i), is_equal(delta)) self.assertIs(Complex.__ne__(z, i), not is_equal(delta)) # For IEEE-754 doubles the following should hold: # x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0 # where the interval is representable, of course. for i in range(1, 10): pow = 52 + i mult = 2**i check(2**pow, range(1, 101), lambda delta: delta % mult == 0) check(2**pow, range(1, 101), lambda delta: False, float(i)) check(2**53, range(-100, 0), lambda delta: True) def test_mod(self): # % is no longer supported on Complex numbers self.assertRaises(TypeError, (1 + 1j).__mod__, 0 + 0j) self.assertRaises(TypeError, lambda: (3.33 + 4.43j) % 0) self.assertRaises(TypeError, (1 + 1j).__mod__, 4.3j) def test_divmod(self): self.assertRaises(TypeError, divmod, 1 + 1j, 1 + 0j) self.assertRaises(TypeError, divmod, 1 + 1j, 0 + 0j) def test_pow(self): self.assertAlmostEqual(pow(1 + 1j, 0 + 0j), 1.0) self.assertAlmostEqual(pow(0 + 0j, 2 + 0j), 0.0) self.assertRaises(ZeroDivisionError, pow, 0 + 0j, 1j) self.assertAlmostEqual(pow(1j, -1), 1 / 1j) self.assertAlmostEqual(pow(1j, 200), 1) self.assertRaises(ValueError, pow, 1 + 1j, 1 + 1j, 1 + 1j) a = 3.33 + 4.43j self.assertEqual(a**0j, 1) self.assertEqual(a**0.0 + 0.0j, 1) self.assertEqual(3j**0j, 1) self.assertEqual(3j**0, 1) try: 0j**a except ZeroDivisionError: pass else: self.fail("should fail 0.0 to negative or Complex power") try: 0j ** (3 - 2j) except ZeroDivisionError: pass else: self.fail("should fail 0.0 to negative or Complex power") # The following is used to exercise certain code paths self.assertEqual(a**105, a**105) self.assertEqual(a**-105, a**-105) self.assertEqual(a**-30, a**-30) self.assertEqual(0.0j**0, 1) b = 5.1 + 2.3j self.assertRaises(ValueError, pow, a, b, 0) def test_boolcontext(self): for i in range(100): self.assertTrue(Complex(random() + 1e-6, random() + 1e-6)) self.assertTrue(not Complex(0.0, 0.0)) def test_conjugate(self): self.assertClose(Complex(5.3, 9.8).conjugate(), 5.3 - 9.8j) @pytest.mark.slow def test_constructor(self): class OS: def __init__(self, value): self.value = value def __complex__(self): return self.value class NS(object): def __init__(self, value): self.value = value def __complex__(self): return self.value self.assertEqual(Complex(OS(1 + 10j)), 1 + 10j) self.assertEqual(Complex(NS(1 + 10j)), 1 + 10j) self.assertRaises(TypeError, Complex, OS(None)) self.assertRaises(TypeError, Complex, NS(None)) self.assertRaises(TypeError, Complex, {}) self.assertRaises(TypeError, Complex, NS(1.5)) self.assertRaises(TypeError, Complex, NS(1)) self.assertAlmostEqual(Complex("1+10j"), 1 + 10j) self.assertAlmostEqual(Complex(10), 10 + 0j) self.assertAlmostEqual(Complex(10.0), 10 + 0j) self.assertAlmostEqual(Complex(10), 10 + 0j) self.assertAlmostEqual(Complex(10 + 0j), 10 + 0j) self.assertAlmostEqual(Complex(1, 10), 1 + 10j) self.assertAlmostEqual(Complex(1, 10), 1 + 10j) self.assertAlmostEqual(Complex(1, 10.0), 1 + 10j) self.assertAlmostEqual(Complex(1, 10), 1 + 10j) self.assertAlmostEqual(Complex(1, 10), 1 + 10j) self.assertAlmostEqual(Complex(1, 10.0), 1 + 10j) self.assertAlmostEqual(Complex(1.0, 10), 1 + 10j) self.assertAlmostEqual(Complex(1.0, 10), 1 + 10j) self.assertAlmostEqual(Complex(1.0, 10.0), 1 + 10j) self.assertAlmostEqual(Complex(3.14 + 0j), 3.14 + 0j) self.assertAlmostEqual(Complex(3.14), 3.14 + 0j) self.assertAlmostEqual(Complex(314), 314.0 + 0j) self.assertAlmostEqual(Complex(314), 314.0 + 0j) self.assertAlmostEqual(Complex(3.14 + 0j, 0j), 3.14 + 0j) self.assertAlmostEqual(Complex(3.14, 0.0), 3.14 + 0j) self.assertAlmostEqual(Complex(314, 0), 314.0 + 0j) self.assertAlmostEqual(Complex(314, 0), 314.0 + 0j) self.assertAlmostEqual(Complex(0j, 3.14j), -3.14 + 0j) self.assertAlmostEqual(Complex(0.0, 3.14j), -3.14 + 0j) self.assertAlmostEqual(Complex(0j, 3.14), 3.14j) self.assertAlmostEqual(Complex(0.0, 3.14), 3.14j) self.assertAlmostEqual(Complex("1"), 1 + 0j) self.assertAlmostEqual(Complex("1j"), 1j) self.assertAlmostEqual(Complex(), 0) self.assertAlmostEqual(Complex("-1"), -1) self.assertAlmostEqual(Complex("+1"), +1) self.assertAlmostEqual(Complex("(1+2j)"), 1 + 2j) self.assertAlmostEqual(Complex("(1.3+2.2j)"), 1.3 + 2.2j) self.assertAlmostEqual(Complex("3.14+1J"), 3.14 + 1j) self.assertAlmostEqual(Complex(" ( +3.14-6J )"), 3.14 - 6j) self.assertAlmostEqual(Complex(" ( +3.14-J )"), 3.14 - 1j) self.assertAlmostEqual(Complex(" ( +3.14+j )"), 3.14 + 1j) self.assertAlmostEqual(Complex("J"), 1j) self.assertAlmostEqual(Complex("( j )"), 1j) self.assertAlmostEqual(Complex("+J"), 1j) self.assertAlmostEqual(Complex("( -j)"), -1j) self.assertAlmostEqual(Complex("1e-500"), 0.0 +
ID: %s' % routerId) routerDataObj = deviceGroupObj.RouterData.find() routerIdMultivalue = routerDataObj.RouterId routerIdList = self.getMultivalueValues(routerIdMultivalue) if routerId in routerIdList: pimObj = deviceGroupObj.Ethernet.find().Ipv4.find().PimV4Interface.find() pimV4JoinPruneList = pimObj.PimV4JoinPruneList startMcastAddrMultivalue = pimV4JoinPruneList.groupV4Address listOfConfiguredMcastIpAddresses = self.ixnObj.getMultivalueValues(startMcastAddrMultivalue) self.ixnObj.logInfo('sendPimV4JoinNgpf: List of configured Mcast IP addresses: %s' % listOfConfiguredMcastIpAddresses) if not listOfConfiguredMcastIpAddresses: raise IxNetRestApiException('sendPimV4JoinNgpf: No Mcast IP address configured') if multicastIpAddress == 'all': listOfMcastAddresses = listOfConfiguredMcastIpAddresses else: listOfMcastAddresses = multicastIpAddress # Note: Index position is not zero based. indexListToSend = [] for eachMcastAddress in listOfMcastAddresses: index = listOfConfiguredMcastIpAddresses.index(eachMcastAddress) indexListToSend.append(index + 1) self.ixnObj.logInfo('\t%s' % multicastIpAddress) if action == 'join': pimV4JoinPruneList.Join(indexListToSend) if action == 'leave': pimV4JoinPruneList.Leave(indexListToSend) def sendMldJoinNgpf(self, mldObj, ipv6AddressList): """ Description For IPv6 only. This API will take the MLD object and loop through all the configured ports looking for the specified ipv6Address to send a join. Parameter ipv6AddressList: 'all' or a list of IPv6 addresses that must be EXACTLY how it is configured on the GUI. """ mldMcastIPv6GroupListObj = mldObj.MldMcastIPv6GroupList startMcastAddrMultivalue = mldMcastIPv6GroupListObj.StartMcastAddr listOfConfiguredGroupIpAddresses = self.getMultivalueValues(startMcastAddrMultivalue) if ipv6AddressList == 'all': listOfGroupAddresses = listOfConfiguredGroupIpAddresses else: listOfGroupAddresses = ipv6AddressList indexListToSend = [] for eachSpecifiedIpv6Addr in listOfGroupAddresses: index = listOfConfiguredGroupIpAddresses.index(eachSpecifiedIpv6Addr) indexListToSend.append(index + 1) mldMcastIPv6GroupListObj.MldJoinGroup(indexListToSend) def sendMldLeaveNgpf(self, mldObj, ipv6AddressList): """ Description For IPv6 only. This API will take the mld sessionUrl object and loop through all the configured ports looking for the specified ipv6Address to send a leave. Parameters mldObj ipv6AddressList: 'all' or a list of IPv6 addresses that must be EXACTLY how it is configured on the GUI. """ mldMcastIPv6GroupListObj = mldObj.MldMcastIPv6GroupList startMcastAddrMultivalue = mldMcastIPv6GroupListObj.StartMcastAddr listOfConfiguredGroupIpAddresses = self.getMultivalueValues(startMcastAddrMultivalue) if ipv6AddressList == 'all': listOfGroupAddresses = listOfConfiguredGroupIpAddresses else: listOfGroupAddresses = ipv6AddressList indexListToSend = [] for eachSpecifiedIpv6Addr in listOfGroupAddresses: index = listOfConfiguredGroupIpAddresses.index(eachSpecifiedIpv6Addr) indexListToSend.append(index + 1) mldMcastIPv6GroupListObj.MldLeaveGroup(indexListToSend) def getSessionStatus(self, protocolObj): """ Description Get the object's session status. Parameter protocolObj: (str): The protocol object. Returns Success: A list of up|down session status. Failed: An empty list """ return protocolObj.SessionStatus def getIpAddresses(self, ipObj): """ Description Get the configured ipv4|ipv6 addresses in a list. Parameter ipObj """ multivalueObj = ipObj.Address response = self.getMultivalueValues(multivalueObj) return response def showTopologies(self): """ Description Show the NGPF configuration: Topology Groups, Device Groups, Mac Addreseses, VLAN ID, IPv4, IPv6, protocol sessions. """ self.ixnObj.logInfo('Display all configs from the topology', timestamp=False) for topoObj in self.ixNetwork.Topology.find(): self.ixnObj.logInfo('TopologyGroup: {0} Name: {1}'.format( topoObj.href.split('/')[-1], topoObj.DescriptiveName), timestamp=False) self.ixnObj.logInfo(' Status: {0}'.format(topoObj.Status), timestamp=False) for vportObj in self.ixNetwork.Vport.find(): self.ixnObj.logInfo(' VportId: {0} Name: {1} AssignedTo: {2} State: {3}'. format(vportObj.href.split('/')[-1], vportObj.Name, vportObj.AssignedTo, vportObj.State), timestamp=False) self.ixnObj.logInfo('\n', end='', timestamp=False) for deviceGroup in topoObj.DeviceGroup.find(): self.ixnObj.logInfo(' DeviceGroup:{0} Name:{1}'.format( deviceGroup.href.split('/')[-1], deviceGroup.DescriptiveName), timestamp=False) self.ixnObj.logInfo('\tStatus: {0}'.format(deviceGroup.Status), end='\n\n', timestamp=False) for ethernet in deviceGroup.Ethernet.find(): ethernetSessionStatus = ethernet.Status self.ixnObj.logInfo('\tEthernet:{0} Name:{1}'.format( ethernet.href.split('/')[-1], ethernet.Name), timestamp=False) self.ixnObj.logInfo('\t Status: {0}'.format(ethernet.Status), timestamp=False) ipv6Obj = [] if not (ethernet.Ipv6.find()): ipv6Obj.insert(0, None) else: ipv6Obj = ethernet.Ipv6.find() for vlan, ipv4, ipv6 in zip(ethernet.Vlan.find(), ethernet.Ipv4.find(), ipv6Obj): self.ixnObj.logInfo('\tIPv4:{0} Status: {1}'.format( ipv4.href.split('/')[-1], ipv4.Status), timestamp=False) ipv4SessionStatus = ipv4.Status index = 1 self.ixnObj.logInfo('\t {0:8} {1:14} {2:7} {3:9} {4:12} {5:16} {6:12} ' '{7:7} {8:7}'.format('Index', 'MacAddress', 'VlanId', 'VlanPri', 'EthSession', 'IPv4Address', 'Gateway', 'Prefix', 'Ipv4Session'), timestamp=False) self.ixnObj.logInfo('\t {0}'.format('-' * 104), timestamp=False) for mac, vlanId, vlanPriority, ip, gateway, prefix, in zip( ethernet.Mac.Values, vlan.VlanId.Values, vlan.Priority.Values, ipv4.Address.Values, ipv4.GatewayIp.Values, ipv4.Prefix.Values): self.ixnObj.logInfo('\t {0:^5} {1:18} {2:^6} {3:^9} {4:13} {5:<15} ' '{6:<13} {7:6} {8:7}'.format(index, mac, vlanId, vlanPriority, ethernetSessionStatus, ip, gateway, prefix, ipv4SessionStatus), timestamp=False) index += 1 # IPv6 if None not in ipv6Obj: self.ixnObj.logInfo('\tIPv6:{0} Status: {1}'.format( ipv6.href.split('/')[-1], ipv6.Status), timestamp=False) self.ixnObj.logInfo('\t {0:8} {1:14} {2:7} {3:9} {4:12} {5:19} ' '{6:18} {7:7} {8:7}'.format('Index', 'MacAddress', 'VlanId', 'VlanPri', 'EthSession', 'IPv6Address', 'Gateway', 'Prefix', 'Ipv6Session'), timestamp=False) self.ixnObj.logInfo('\t %s' % '-' * 113) for mac, vlanId, vlanPriority, ip, gateway, prefix, in zip( ethernet.Mac.Values, vlan.VlanId.Values, vlan.Priority.Values, ipv6.Address.Values, ipv6.GatewayIp.Values, ipv6.Prefix.Values): self.ixnObj.logInfo('\t {0:^5} {1:18} {2:^6} {3:^9} {4:13} ' '{5:<15} {6:<13} {7:8} {8:7}'. format(index, mac, vlanId, vlanPriority, ethernetSessionStatus, ip, gateway, prefix, ipv6.Status), timestamp=False) index += 1 self.ixnObj.logInfo('\n', end='', timestamp=False) if ipv4.BgpIpv4Peer.find() != []: for bgpIpv4Peer in ipv4.BgpIpv4Peer.find(): self.ixnObj.logInfo('\tBGPIpv4Peer:{0} Name:{1}'.format( bgpIpv4Peer.href.split('/')[-1], bgpIpv4Peer.Name), timestamp=False) bgpType = bgpIpv4Peer.Type.Values localAs2Bytes = bgpIpv4Peer.LocalAs2Bytes.Values self.ixnObj.logInfo('\t Type: {0} localAs2Bytes: {1}'.format( bgpType[0], localAs2Bytes[0]), timestamp=False) self.ixnObj.logInfo('\t Status: {0}'.format(bgpIpv4Peer.Status), timestamp=False) index = 1 for dutIp, bgpSession, flap, uptime, downtime in zip( bgpIpv4Peer.DutIp.Values, bgpIpv4Peer.SessionStatus, bgpIpv4Peer.Flap.Values, bgpIpv4Peer.UptimeInSec.Values, bgpIpv4Peer.DowntimeInSec.Values): self.ixnObj.logInfo('\t\t{0}: DutIp:{1} SessionStatus:{2} ' 'Flap:{3} upTime:{4} downTime:{5}'. format(index, dutIp, bgpSession, flap, uptime, downtime), timestamp=False) index += 1 for ospfv2 in ipv4.Ospfv2.find(): self.ixnObj.logInfo('\t OSPFv2:{0} Name:{1}'.format( ospfv2.href.split('/')[-1], ospfv2.Name), timestamp=False) self.ixnObj.logInfo('\t\tStatus: {0}'.format(ospfv2.Status), end='\n\n', timestamp=False) for igmpHost in ipv4.IgmpHost.find(): self.ixnObj.logInfo('\t igmpHost:{0} Name:{1}'.format( igmpHost.href.split('/')[-1], igmpHost.Name), timestamp=False) self.ixnObj.logInfo('\t\tStatus: {0}'.format(igmpHost.Status), end='\n\n', timestamp=False) for igmpQuerier in ipv4.IgmpQuerier.find(): self.ixnObj.logInfo('\t igmpQuerier:{0} Name:{1}'.format( igmpQuerier.href.split('/')[-1], igmpQuerier.Name), timestamp=False) self.ixnObj.logInfo('\t\tStatus: {0}'.format(igmpQuerier.Status), end='\n\n', timestamp=False) for vxlan in ipv4.Vxlan.find(): self.ixnObj.logInfo('\t vxlan:{0} Name:{1}'.format( vxlan.href.split('/')[-1], vxlan.Name), timestamp=False) self.ixnObj.logInfo('\tStatus: {0}'.format(vxlan.Status), end='\n\n', timestamp=False) for networkGroup in deviceGroup.NetworkGroup.find(): self.ixnObj.logInfo('\n\tNetworkGroup:{0} Name:{1}'.format( networkGroup.href.split('/')[-1], networkGroup.Name), timestamp=False) self.ixnObj.logInfo('\t Multiplier: {0}'.format(networkGroup.Multiplier), timestamp=False) try: startingAddress = \ networkGroup.Ipv4PrefixPools.find()[0].NetworkAddress.Values[0] endingAddress = \ networkGroup.Ipv4PrefixPools.find()[0].NetworkAddress.Values[-1] prefixPoolLength = \ networkGroup.Ipv4PrefixPools.find()[0].PrefixLength.Values[0] self.ixnObj.logInfo('\t NetworkGroup Type: ipv4PrefixPools', timestamp=False) self.ixnObj.logInfo('\t StartingAddress:{0} EndingAddress:{1} ' 'Prefix:{2}'.format(startingAddress, endingAddress, prefixPoolLength), timestamp=False) except Exception as e: print(e) pass try: startingAddress = \ networkGroup.Ipv6PrefixPools.find()[0].NetworkAddress.Values[0] endingAddress = \ networkGroup.Ipv6PrefixPools.find()[0].NetworkAddress.Values[-1] prefixPoolLength = \ networkGroup.Ipv6PrefixPools.find()[0].PrefixLength.Values[0] self.ixnObj.logInfo('\t NetworkGroup Type: ipv6PrefixPools', timestamp=False) self.ixnObj.logInfo('\t StartingAddress:{0} EndingAddress:{1} ' 'Prefix:{2}'.format(startingAddress, endingAddress, prefixPoolLength), timestamp=False) except Exception as e: print(e) pass if None not in ipv6Obj: for ipv6 in ethernet.ipv6.find(): self.ixnObj.logInfo('\t IPv6:{0} Name:{1}'.format( ipv6.href.split('/')[-1], ipv6.Name), timestamp=False) for bgpIpv6Peer in ipv6.BgpIpv6Peer.find(): self.ixnObj.logInfo('\t BGPIpv6Peer:{0} Name:{1}'.format( bgpIpv6Peer.href.split('/')[-1], bgpIpv6Peer.Name), timestamp=False) for ospfv3 in ipv6.Ospfv3.find(): self.ixnObj.logInfo('\t OSPFv3:{0} Name:{1}'.format( ospfv3.href.split('/')[-1], ospfv3.Name), timestamp=False) for mldHost in ipv6.MldHost.find(): self.ixnObj.logInfo('\t mldHost:{0} Name:{1}'.format( mldHost.href.split('/')[-1], mldHost.Name), timestamp=False) for mldQuerier in ipv6.MldQuerier.find(): self.ixnObj.logInfo('\t mldQuerier:{0} Name:{1}'.format( mldQuerier.href.split('/')[-1], mldQuerier.Name), timestamp=False) self.ixnObj.logInfo('\n', timestamp=False) def getBgpObject(self, topologyName=None, bgpAttributeList=None): """ Description Get the BGP object from the specified Topology Group name and return the specified attributes Parameters topologyName: The Topology Group name bgpAttributeList: The BGP attributes to get. Example: bgpAttributeMultivalue = restObj.getBgpObject(topologyName='Topo1', bgpAttributeList=['flap', 'uptimeInSec', 'downtimeInSec']) restObj.configMultivalue(bgpAttributeMultivalue['flap'], multivalueType='valueList', data={'values': ['true', 'true']}) restObj.configMultivalue(bgpAttributeMultivalue['uptimeInSec'], multivalueType='singleValue', data={'value': '60'}) restObj.configMultivalue(bgpAttributeMultivalue['downtimeInSec'], multivalueType='singleValue', data={'value': '30'}) """ bgpAttributeDict = {} if (self.ixNetwork.Topology.find(Name=topologyName).DeviceGroup.find().Ethernet.find(). Ipv4.find().BgpIpv4Peer.find()): bgpObj = self.ixNetwork.Topology.find(Name=topologyName).DeviceGroup.find() \ .Ethernet.find().Ipv4.find().BgpIpv4Peer.find()[0] if bgpAttributeList is not None: for attribute in bgpAttributeList: newattribute = attribute[0].upper() + attribute[1:] bgpAttributeDict[attribute] = getattr(bgpObj, newattribute) return bgpAttributeDict else: raise Exception("No bgp config found on the specified topology {}".format(topologyName)) def isRouterIdInDeviceGroupObj(self, routerId, deviceGroupObj): routerIdList = deviceGroupObj.RouterData.find().RouterId.find().RouterId.Values if routerId in routerIdList: return True else: return False def configBgpNumberOfAs(self, routerId, numberOfAs): """ Description Set the total number of BGP AS # List. In the GUI, under NetworkGroup, BGP Route Range tab, bottom tab ASPathSegments, enter number of AS Segments. NOTE! Currently, this API will get the first Network Group object even if there are multiple Network Groups. Network Groups could be filtered by the name or by the first route range address. Haven't decided yet. Don't want to filter by name because in a situation where customers are also using Spirent, Spirent doesn't go by name. Parameters routerId: The Device Group router ID numberOfAs: The total number of AS list to create. Requirements getDeviceGroupByRouterId() """ deviceGroupObj = self.getDeviceGroupByRouterId(routerId=routerId) if deviceGroupObj is None: raise Exception('No Device Group found for router ID: %s' % routerId) try: for bgpSegObj in deviceGroupObj.NetworkGroup.find().Ipv4PrefixPools.find() \ .BgpIPRouteProperty.find().BgpAsPathSegmentList.find(): bgpSegObj.NumberOfAsNumberInSegment = numberOfAs except Exception as e: print(e) for bgpSegObj in deviceGroupObj.NetworkGroup.find().Ipv6PrefixPools.find() \ .BgpIPRouteProperty.find().BgpAsPathSegmentList.find(): bgpSegObj.NumberOfAsNumberInSegment = numberOfAs def configBgpAsPathSegmentListNumber(self, routerId, asNumber, indexAndAsNumber): """ Description Set BGP AS numbers in the route range. If there are 5 AS# created under "Number of AS# In Segment-1", the asNumberList is the AS# that you want to modify for all route ranges (Device Group multiplier). The indexAndAsNumber is the route range index and value: [3, 300]. 3 = the 2nd route range (zero based) and 300 is the value. NOTE! Currently, this API will get the first Network Group object even if there are multiple Network Groups. Network Groups could be filtered by the name or by the first route range address. Haven't decided yet. Don't want to filter by name because in a situation where customers are also using Spirent, Spirent doesn't go by name. Parameters routerId: The Device Group router ID where the BGP is configured. asListNumber: 1|2|3|...|6|..: The AS# to modify. (On GUI, click NetworkGroup, on bottom tab asPathSegment, and on top tab, use the "Number of AS# In Segment-1" to set number of AS#1 or AS#2
<filename>Pymongo/prod/mylib.py #!/usr/bin/python from bson import BSON, Binary, Code, decode_all from bson.json_util import loads, dumps from bson.objectid import ObjectId from pymongo import InsertOne, DeleteOne, ReplaceOne import pprint, datetime, time, calendar, sys, os, errno, glob, shutil import tarfile, gzip """Time range validator""" def timerange_validate (BEGIN_DAY, BEGIN_MONTH, BEGIN_YEAR, END_DAY, END_MONTH, END_YEAR): """Validate time range to cut log""" RESULT = 2 if BEGIN_YEAR > END_YEAR: print ("Invalid time range. END YEAR is lower than BEGIN YEAR") RESULT = 1 elif BEGIN_YEAR == END_YEAR: if BEGIN_MONTH > END_MONTH: print ("Invalid time range. END MONTH is lower than BEGIN MONTH") RESULT = 1 elif BEGIN_MONTH == END_MONTH: if BEGIN_DAY > END_DAY: print ("Invalid time range. END DAY is lower than BEGIN DAY") RESULT = 1 else: RESULT = 0 else: RESULT = 0 return RESULT """*******************End of def*****************************************""" """Main archive engine, find, backup and delete document. For special ObjectId created by Mondra code.""" def backup_delete_docs_customid(BACKUP_PATTERN, INDEX_PATTERN, BEGIN_DAY, BEGIN_MONTH, BEGIN_YEAR, DAY, MONTH, YEAR, collection, BEGIN_PART_NUM): """Convert normal DATE to EPOCH DATE""" BEGIN_DATE = datetime.datetime(BEGIN_YEAR,BEGIN_MONTH,BEGIN_DAY,0,0,0) END_DATE = datetime.datetime(YEAR,MONTH,DAY,0,0,0) EPOCH = datetime.datetime.utcfromtimestamp(0) E_BEGIN_DATE = (BEGIN_DATE - EPOCH).total_seconds() * 1000 E_END_DATE = (END_DATE - EPOCH).total_seconds() * 1000 FLAG = 0 PART_NUM = BEGIN_PART_NUM while FLAG == 0: """Generate filename to backup/delete""" OUTPUT_FILENAME = str(BACKUP_PATTERN)+'_'+str(DAY)+'_'+str(MONTH)+'_'+str(YEAR)+'_'+str(PART_NUM)+'.bson' DELETELIST_FILENAME = str(BACKUP_PATTERN)+'_'+str(DAY)+'_'+str(MONTH)+'_'+str(YEAR)+'_'+str(PART_NUM)+'.txt' print ("Current filename: ",DELETELIST_FILENAME) """Generate cursor to find document""" print ("Searching 50.000 docs to delete...") cursor = collection.find( { '$and': [ {INDEX_PATTERN: { '$lt': E_END_DATE } }, \ {INDEX_PATTERN: { '$gte': E_BEGIN_DATE } } \ ] } )\ .max_scan(50000) # pprint.pprint(cursor.explain()) # leave here to debug of neccessary """Backup, list docs to delete""" total = 0 totaldel = 0 with open(DELETELIST_FILENAME, 'wb') as lf: with open(OUTPUT_FILENAME, 'wb') as tf: for item in cursor: total += 1 # print ("Add docs to delete: "+ str(item['_id'])) tf.write(BSON.encode(item)) lf.write(str(item['_id'])+'\n') print (OUTPUT_FILENAME+" .Total documents: ", total) if total > 0: """Sleep to reduce memory stress on Mongo server""" print ("Search completed. Waiting 2 seconds for Mongo server...") time.sleep(2) else: print ("No docs found. Skip waiting.") """Decide either stop or continue to search""" """FLAG 0: continue, FLAG 1: stop""" """DELETELIST_FILENAME = 0 bytes mean no record to delete, then stop""" if (os.stat(DELETELIST_FILENAME).st_size == 0) == True: FLAG = 1 print (DELETELIST_FILENAME+": No more docs to delete") else: FLAG = 0 """Else, Delete docs""" print (DELETELIST_FILENAME+": BEGIN DELETING DOCS!") with open(DELETELIST_FILENAME, 'rb') as lf: for line in lf: ID = line.rstrip() RESULT = collection.delete_one({'_id': ID}) if int(RESULT.deleted_count) > 0: totaldel += 1 # print("Deleting: ",ID," Return: ",RESULT.deleted_count) # time.sleep(0.001) PART_NUM += 1 print(DELETELIST_FILENAME+". Total deleted documents: ", totaldel) print ("Current FLAG: ",FLAG) print ("Last filename: ",DELETELIST_FILENAME) print;print;print; """*******************End of def**************************************""" """Main archive engine, find, backup and delete document. For standard ObjectId.""" def backup_delete_docs_stdid(BACKUP_PATTERN, INDEX_PATTERN, BEGIN_DAY, BEGIN_MONTH, BEGIN_YEAR, DAY, MONTH, YEAR, collection, BEGIN_PART_NUM): """Convert normal DATE to EPOCH DATE""" BEGIN_DATE = datetime.datetime(BEGIN_YEAR,BEGIN_MONTH,BEGIN_DAY,0,0,0) END_DATE = datetime.datetime(YEAR,MONTH,DAY,0,0,0) EPOCH = datetime.datetime.utcfromtimestamp(0) E_BEGIN_DATE = (BEGIN_DATE - EPOCH).total_seconds() * 1000 E_END_DATE = (END_DATE - EPOCH).total_seconds() * 1000 FLAG = 0 PART_NUM = BEGIN_PART_NUM while FLAG == 0: """Generate filename to backup/delete""" OUTPUT_FILENAME = str(BACKUP_PATTERN)+'_'+str(DAY)+'_'+str(MONTH)+'_'+str(YEAR)+'_'+str(PART_NUM)+'.bson' DELETELIST_FILENAME = str(BACKUP_PATTERN)+'_'+str(DAY)+'_'+str(MONTH)+'_'+str(YEAR)+'_'+str(PART_NUM)+'.txt' print ("Current filename: ",DELETELIST_FILENAME) """Generate cursor to find document""" print ("Searching 50.000 docs to delete...") cursor = collection.find( { '$and': [ {INDEX_PATTERN: { '$lt': E_END_DATE } }, \ {INDEX_PATTERN: { '$gte': E_BEGIN_DATE } } \ ] } )\ .max_scan(50000) # pprint.pprint(cursor.explain()) # leave here to debug of neccessary """Backup, list docs to delete""" total = 0 totaldel = 0 with open(DELETELIST_FILENAME, 'wb') as lf: with open(OUTPUT_FILENAME, 'wb') as tf: for item in cursor: total += 1 # print ("Add docs to delete: "+ str(item['_id'])) tf.write(BSON.encode(item)) lf.write(str(item['_id'])+'\n') print (OUTPUT_FILENAME+" .Total documents: ", total) if total > 0: """Sleep to reduce memory stress on Mongo server""" print ("Search completed. Waiting 2 seconds for Mongo server...") time.sleep(2) else: print ("No docs found. Skip waiting.") """Decide either stop or continue to search""" """FLAG 0: continue, FLAG 1: stop""" """DELETELIST_FILENAME = 0 bytes mean no record to delete, then stop""" if (os.stat(DELETELIST_FILENAME).st_size == 0) == True: FLAG = 1 print (DELETELIST_FILENAME+": No more docs to delete") else: FLAG = 0 """Else, Delete docs""" print (DELETELIST_FILENAME+": BEGIN DELETING DOCS!") with open(DELETELIST_FILENAME, 'rb') as lf: for line in lf: ID = line.rstrip() RESULT = collection.delete_one({'_id': ObjectId(ID)}) if int(RESULT.deleted_count) > 0: totaldel += 1 # print("Deleting: ",ID," Return: ",RESULT.deleted_count) # time.sleep(0.001) PART_NUM += 1 print(DELETELIST_FILENAME+". Total deleted documents: ", totaldel) print ("Current FLAG: ",FLAG) print ("Last filename: ",DELETELIST_FILENAME) print;print;print; """*******************End of def**************************************""" """Move backed up file to new location""" def re_arrange(BACKUP_PATTERN): """ Check if we have existing directory for BSON and TXT file """ DIR = "" for file in glob.glob(BACKUP_PATTERN): DIR = file if DIR == "": print("No existing BSON directory found. Create new directory.") os.makedirs(BACKUP_PATTERN,mode=0o755) else: print("Found BSON directory: ", DIR) DIR = "" for file in glob.glob(BACKUP_PATTERN+"_removelist"): DIR = file if DIR == "": print("No existing TXT directory found. Create new directory.") os.makedirs(BACKUP_PATTERN+'_removelist',mode=0o755) else: print("Found TXT directory: ", DIR) print("Begin cleaning up files.") """Remove empty file, then Move all .bson, .txt file to directory""" DST = BACKUP_PATTERN for file in glob.glob(BACKUP_PATTERN+"*.bson"): if (os.stat(file).st_size == 0) == True: print("Delete empty BSON file ",file) os.remove(file) else: print("Move ",file) shutil.move(file, DST) DST = BACKUP_PATTERN+'_removelist' for file in glob.glob(BACKUP_PATTERN+"*.txt"): if (os.stat(file).st_size == 0) == True: print("Delete empty LIST ",file) os.remove(file) else: print("Move ",file) shutil.move(file, DST) """*******************End of def**************************************""" """ Restore docs from BSON file """ def restore_docs(DIR, BACKUP_PATTERN, DAY, MONTH, YEAR, collection, BEGIN_PART_NUM): FULLPATH = os.path.join(DIR + BACKUP_PATTERN, BACKUP_PATTERN + "_" + str(DAY) + \ "_" + str(MONTH) + "_" + str(YEAR)) print ("Restoring ",BACKUP_PATTERN,DAY,MONTH,YEAR) for file in glob.glob(FULLPATH + "*.bson"): print ("Restoring from ",file) with open(file, 'rb') as f: collection.insert(decode_all(f.read())) """*******************End of def**************************************""" """ Compress BSON file """ def compress_docs(DIR, BACKUP_PATTERN, MONTH, YEAR): LISTDIR = [] ### Get list of child directories inside backed up directory. ### Break: only decend 1 level for (root, dirs, files) in os.walk(DIR, topdown=True, followlinks=False): LISTDIR = dirs break ### Filter child directories list. Only scan .BSON file inside directories which have BACKUP_PATTERN for CHILD_DIR in LISTDIR: if BACKUP_PATTERN in CHILD_DIR and '_removelist' not in CHILD_DIR: ### Get FULLPATH, use for glob scan later FULLPATH = os.path.join(DIR , CHILD_DIR) ### Create archive TARFILE = os.path.join(DIR, CHILD_DIR, BACKUP_PATTERN + '_' + str(MONTH) + '_' + str(YEAR) + '.tar.gz') if not os.path.isfile(TARFILE): try: with tarfile.TarFile.gzopen(TARFILE, mode='w', compresslevel=9) as targz: print("Create archive: ", TARFILE) for FILE in glob.glob(FULLPATH + '/' + BACKUP_PATTERN + '_*_' + str(MONTH) + '_' + str(YEAR) + '_*' + ".bson"): try: ### Split head, tail to avoid adding fullpath to tarfile head, tail = os.path.split(FILE) targz.add(FILE, arcname=tail) # print("Compressed BSON file: ", tail) except: print("Error adding file ", FILE, "Error: ", sys.exc_info()) raise except: print("Unexpected error:", sys.exc_info()) raise ### Delete compressed .BSON files for FILE in glob.glob(FULLPATH + '/' + BACKUP_PATTERN + '_*_' + str(MONTH) + '_' + str(YEAR) + '_*' + ".bson"): # print("Delete compressed BSON file: ", FILE) os.remove(FILE) else: print("File existed, will not create new archive: ",TARFILE) if (os.stat(TARFILE).st_size < 100) == True: # print("Delete empty archive file ",TARFILE) os.remove(TARFILE) ### Uncomment this block to check content inside tar file # print # print('Contents:') # t = tarfile.open(TARFILE, 'r') # for member_info in t.getmembers(): # print(member_info.name) # print ########################################################### ### Remove all contents inside BACKUP_PATTERN_removelist directory ### NOTE: NEVER put any data files inside this directory. ### Files inside this directory is temporary file, and are used to store temporary pointer's address only if BACKUP_PATTERN in CHILD_DIR and '_removelist' in CHILD_DIR: FULLPATH = os.path.join(DIR , CHILD_DIR) shutil.rmtree(FULLPATH) """*******************End of def**************************************""" """Resume from last interuptted find/delete""" def resume_stdid(BACKUP_PATTERN, INDEX_PATTERN, collection): """Delete empty files, left by interuptted operation""" for file in glob.glob(BACKUP_PATTERN+"*.bson"): if (os.stat(file).st_size == 0) == True: print("Delete empty BSON file ",file) os.remove(file) for file in glob.glob(BACKUP_PATTERN+"*.txt"): if (os.stat(file).st_size == 0) == True: print("Delete empty TXT file ",file) os.remove(file) """Find the last BSON and TXT file to resume""" LAST_MTIME
A length :meth:`~DiscreteQuadraticModel.num_interactions` array. If the case interactions were defined in a sparse matrix, these would be the row indices. - `icol`: A length :meth:`~DiscreteQuadraticModel.num_interactions` array. If the case interactions were defined in a sparse matrix, these would be the column indices. - `quadratic_biases`: A length :meth:`~DiscreteQuadraticModel.num_interactions` array. If the case interactions were defined in a sparse matrix, these would be the values. labels (list, optional): The variable labels. Defaults to index-labeled. Example: >>> dqm = dimod.DiscreteQuadraticModel() >>> u = dqm.add_variable(5) >>> v = dqm.add_variable(3, label='3var') >>> dqm.set_quadratic(u, v, {(0, 2): 1}) >>> vectors = dqm.to_numpy_vectors() >>> new = dimod.DiscreteQuadraticModel.from_numpy_vectors(*vectors) See Also: :meth:`~DiscreteQuadraticModel.to_numpy_vectors` """ obj = cls() obj._cydqm = cyDiscreteQuadraticModel.from_numpy_vectors( case_starts, linear_biases, quadratic) if labels is not None: if len(labels) != obj._cydqm.num_variables(): raise ValueError( "labels does not match the length of the DQM" ) for v in labels: obj.variables._append(v) else: for v in range(obj._cydqm.num_variables()): obj.variables._append() return obj def get_linear(self, v): """The linear biases associated with variable `v`. Args: v: A variable in the discrete quadratic model. Returns: :class:`~numpy.ndarray`: The linear biases in an array. """ return self._cydqm.get_linear(self.variables.index(v)) def get_linear_case(self, v, case): """The linear bias associated with case `case` of variable `v`. Args: v: A variable in the discrete quadratic model. case (int): The case of `v`. Returns: The linear bias. """ return self._cydqm.get_linear_case(self.variables.index(v), case) def get_quadratic(self, u, v, array=False): """The biases associated with the interaction between `u` and `v`. Args: u: A variable in the discrete quadratic model. v: A variable in the discrete quadratic model. array (bool, optional, default=False): If True, a dense array is returned rather than a dict. Returns: The quadratic biases. If `array=False`, returns a dictionary of the form `{case_u, case_v: bias, ...}` If `array=True`, returns a :meth:`~DiscreteQuadraticModel.num_cases(u)` by :meth:`~DiscreteQuadraticModel.num_cases(v)` numpy array. """ return self._cydqm.get_quadratic( self.variables.index(u), self.variables.index(v), array=array) def get_quadratic_case(self, u, u_case, v, v_case): """The bias associated with the interaction between two cases of `u` and `v`. Args: u: A variable in the discrete quadratic model. u_case (int): The case of `u`. v: A variable in the discrete quadratic model. v_case (int): The case of `v`. Returns: The quadratic bias. """ return self._cydqm.get_quadratic_case( self.variables.index(u), u_case, self.variables.index(v), v_case) def num_cases(self, v=None): """If v is provided, the number of cases associated with v, otherwise the total number of cases in the DQM. """ if v is None: return self._cydqm.num_cases() return self._cydqm.num_cases(self.variables.index(v)) def num_case_interactions(self): """The total number of case interactions.""" return self._cydqm.num_case_interactions() def num_variable_interactions(self): """The total number of variable interactions""" return self._cydqm.num_variable_interactions() def num_variables(self): """The number of variables in the discrete quadratic model.""" return self._cydqm.num_variables() def relabel_variables(self, mapping, inplace=True): if not inplace: return self.copy().relabel_variables(mapping, inplace=True) self.variables._relabel(mapping) return self def relabel_variables_as_integers(self, inplace=True): """Relabel the variables of the DQM to integers. Args: inplace (bool, optional, default=True): If True, the discrete quadratic model is updated in-place; otherwise, a new discrete quadratic model is returned. Returns: tuple: A 2-tuple containing: A discrete quadratic model with the variables relabeled. If `inplace` is set to True, returns itself. dict: The mapping that will restore the original labels. """ if not inplace: return self.copy().relabel_variables_as_integers(inplace=True) return self, self.variables._relabel_as_integers() def set_linear(self, v, biases): """Set the linear biases associated with `v`. Args: v: A variable in the discrete quadratic model. biases (array-like): The linear biases in an array. """ self._cydqm.set_linear(self.variables.index(v), np.asarray(biases)) def set_linear_case(self, v, case, bias): """The linear bias associated with case `case` of variable `v`. Args: v: A variable in the discrete quadratic model. case (int): The case of `v`. bias (float): The linear bias. """ self._cydqm.set_linear_case(self.variables.index(v), case, bias) def set_quadratic(self, u, v, biases): """Set biases associated with the interaction between `u` and `v`. Args: u: A variable in the discrete quadratic model. v: A variable in the discrete quadratic model. biases (array-like/dict): The quadratic biases. If a dict, then a dictionary of the form `{case_u, case_v: bias, ...}`. Otherwise, then should be, a :meth:`~DiscreteQuadraticModel.num_cases(u)` by :meth:`~DiscreteQuadraticModel.num_cases(v)` array-like. """ self._cydqm.set_quadratic( self.variables.index(u), self.variables.index(v), biases) def set_quadratic_case(self, u, u_case, v, v_case, bias): """Set the bias associated with the interaction between two cases of `u` and `v`. Args: u: A variable in the discrete quadratic model. u_case (int): The case of `u`. v: A variable in the discrete quadratic model. v_case (int): The case of `v`. bias (float): The quadratic bias. """ self._cydqm.set_quadratic_case( self.variables.index(u), u_case, self.variables.index(v), v_case, bias) def _to_file_numpy(self, file, compressed): # the biases etc, saved using numpy # we'd like to just let numpy handle the header etc, but it doesn't # do a good job of cleaning up after itself in np.load, so we record # the section length ourselves file.write(DATA_MAGIC_PREFIX) file.write(b' ') # will be replaced by the length start = file.tell() vectors = self.to_numpy_vectors() if compressed: save = np.savez_compressed else: save = np.savez save(file, case_starts=vectors.case_starts, linear_biases=vectors.linear_biases, quadratic_row_indices=vectors.quadratic.row_indices, quadratic_col_indices=vectors.quadratic.col_indices, quadratic_biases=vectors.quadratic.biases, ) # record the length end = file.tell() file.seek(start-4) file.write(np.dtype('<u4').type(end - start).tobytes()) file.seek(end) def to_file(self, compressed=False, ignore_labels=False, spool_size=int(1e9)): """Convert the DQM to a file-like object. Args: compressed (bool, optional default=False): If True, most of the data will be compressed. ignore_labels (bool, optional, default=False): Treat the DQM as unlabeled. This is useful for large DQMs to save on space. spool_size (int, optional, default=int(1e9)): Defines the `max_size` passed to the constructor of :class:`tempfile.SpooledTemporaryFile`. Determines whether the returned file-like's contents will be kept on disk or in memory. Returns: :class:`tempfile.SpooledTemporaryFile`: A file-like object that can be used to construct a copy of the DQM. Format Specification (Version 1.0): This format is inspired by the `NPY format`_ **Header** The first 8 bytes are a magic string: exactly `"DIMODDQM"`. The next 1 byte is an unsigned byte: the major version of the file format. The next 1 byte is an unsigned byte: the minor version of the file format. The next 4 bytes form a little-endian unsigned int, the length of the header data `HEADER_LEN`. The next `HEADER_LEN` bytes form the header data. This is a json-serialized dictionary. The dictionary is exactly: .. code-block:: python dict(num_variables=dqm.num_variables(), num_cases=dqm.num_cases(), num_case_interactions=dqm.num_case_interactions(), num_variable_interactions=dqm.num_variable_interactions(), variables=not (ignore_labels or dqm.variables.is_range), ) it is padded with spaces to make the entire length of the header divisible by 64. **DQM Data** The first 4 bytes are exactly `"BIAS"` The next 4 bytes form a little-endian unsigned int, the length of the DQM data `DATA_LEN`. The next `DATA_LEN` bytes are the vectors as returned by :meth:`DiscreteQuadraticModel.to_numpy_vectors` saved using :func:`numpy.save`. **Variable Data** The first 4 bytes are exactly "VARS". The next 4 bytes form a little-endian unsigned int, the length of the variables array `VARIABLES_LENGTH`. The next VARIABLES_LENGTH bytes are a json-serialized array. As constructed by `json.dumps(list(bqm.variables)). .. _NPY format: https://docs.scipy.org/doc/numpy/reference/generated/numpy.lib.format.html See Also: :meth:`DiscreteQuadraticModel.from_file` """ file = tempfile.SpooledTemporaryFile(max_size=spool_size) # attach the header header_parts = [DQM_MAGIC_PREFIX, VERSION, bytes(4), # placeholder for HEADER_LEN ] index_labeled = ignore_labels or self.variables.is_range header_data = json.dumps( dict(num_variables=self.num_variables(), num_cases=self.num_cases(), num_case_interactions=self.num_case_interactions(), num_variable_interactions=self.num_variable_interactions(), variables=not index_labeled, ), sort_keys=True).encode('ascii') header_parts.append(header_data) # make the entire header length divisible by 64 length = sum(len(part) for part in header_parts) if length % 64: padding = b' '*(64 - length % 64) else: padding = b'' header_parts.append(padding) HEADER_LEN = len(padding) + len(header_data) header_parts[2] = np.dtype('<u4').type(HEADER_LEN).tobytes() for part in header_parts: file.write(part) # the section containing most of the data, encoded with numpy self._to_file_numpy(file, compressed) if not index_labeled: file.write(VariablesSection(self.variables).dumps()) file.seek(0) return file def to_numpy_vectors(self): """Convert the DQM to five numpy vectors and the labels. Returns: :class:`DQMVectors`: A named tuple with fields `['case_starts', 'linear_biases', 'quadratic', 'labels']. - `case_starts`: A length :meth:`~DiscreteQuadraticModel.num_variables` array. The cases associated with variable `v` are in the range `[case_starts[v], cases_starts[v+1])`. - `linear_biases`: A length :meth:`~DiscreteQuadraticModel.num_cases` array. The linear biases. - `quadratic`: A named tuple with fields `['row_indices', 'col_indices', 'biases']`. * `row_indices`: A length :meth:`~DiscreteQuadraticModel.num_case_interactions` array. If the case interactions were defined in a sparse matrix, these would be the row indices. * `col_indices`: A length :meth:`~DiscreteQuadraticModel.num_case_interactions` array. If the case interactions were defined in a sparse matrix, these would be the column indices. * `biases`: A length
from kivy.config import Config Config.set('graphics','resizable',0) from kivy.app import App from kivy.metrics import cm,dp,sp,Metrics from kivy.uix.scrollview import ScrollView from kivy.uix.button import Button from kivy.uix.floatlayout import FloatLayout from kivy.uix.boxlayout import BoxLayout from kivy.uix.gridlayout import GridLayout from kivy.uix.togglebutton import ToggleButton from kivy.uix.label import Label from kivy.uix.textinput import TextInput from kivy.uix.checkbox import CheckBox from random import shuffle, randrange, choice, choices from kivy.storage.jsonstore import JsonStore from kivy.uix.popup import Popup from kivy.uix.actionbar import ActionBar,ActionPrevious,ActionView,ActionButton from kivy.graphics import Color, Rectangle, BorderImage from kivy.core.audio import SoundLoader import time from kivy.uix.image import Image,AsyncImage from kivy.atlas import Atlas from kivy.base import runTouchApp from kivy.uix.dropdown import DropDown from kivy.core.window import Window # Config.set('graphics', 'width', '1080') # Config.set('graphics', 'height', '2340') from kivy.uix.accordion import Accordion, AccordionItem from kivy.animation import Animation from kivy.clock import Clock '''Changelog: 1) Added Wrath of Ashardalon, 2) Changes to tools and deck changes, 3) Added Treasure Tokens, 4) Monster Tokens and level up system changed, 5) Sound effects added, 6) Much more... 7) Experimental TOEE/Ravenloft Added''' RESOLUTION_CHOICE = (1280, 720) def get_resolution(): try: global RESOLUTION_CHOICE preferences = JsonStore('settings.json') RESOLUTION_CHOICE = tuple(preferences.get('resolution')['resolution']) except: pass #Get resolution from choice get_resolution() #Metrics/Window Window.size = RESOLUTION_CHOICE if Window.height <= 480: Metrics.density = 1 Metrics.fontscale = .8 elif Window.height <= 720: Metrics.density = 1.5 Metrics.fontscale = 0.8 elif Window.height <= 2040: Metrics.density = 2 Metrics.fontscale = .9 #Change Resolution def resolution_changer(instance,value): global RESOLUTION_CHOICE resolution_pop = Popup(title='Restart?', separator_color=[0, 100 / 255, 0, 1], title_size='18sp',title_align='center', size_hint=(.4, .25),auto_dismiss=False) resolution_grid = BoxLayout(orientation='horizontal') resolution_grid_second = GridLayout(cols=1) yes_btn = Button(text='YES') no_btn = Button(text='NO') res_label = Label(text='To apply the changes you need to restart the application. Restart now?', size_hint_y=None,markup=True,halign='left', valign='bottom') res_label.text_size = ((Window.width * .35), None) res_label.texture_update() res_label.height = res_label.texture_size[1] resolution_grid_second.add_widget(res_label) resolution_grid.add_widget(yes_btn) resolution_grid.add_widget(no_btn) resolution_grid_second.add_widget(resolution_grid) resolution_pop.content = resolution_grid_second resolution_pop.open() if value == '1920x1080': RESOLUTION_CHOICE = (1920,1080) if value == '1280x720': RESOLUTION_CHOICE = (1280, 720) if value == '720x480': RESOLUTION_CHOICE = (720, 480) mainbutton.text = 'Resolution: {}'.format(str(RESOLUTION_CHOICE[0]) + 'x' + str(RESOLUTION_CHOICE[1])) no_btn.bind(on_release=resolution_pop.dismiss) yes_btn.bind(on_release=save_preference) yes_btn.bind(on_release=DnDGenerator().stop) backImages_list = ['images/dnd_background.jpg', 'images/dnd_background_2.jpg', 'images/dnd_background_3.jpg'] shuffle(backImages_list) fireworks = Image(source='images/fireworks.zip', allow_stretch=True, anim_delay=.1, anim_loop=3, keep_ratio=True, keep_data=False) win_sound = SoundLoader.load('music/win.ogg') firework_sound = SoundLoader.load('music/fireworks.ogg') #Theme Music # theme = SoundLoader.load('theme.mp3') # if theme: # print("Sound found at %s" % theme.source) # print("Sound is %.3f seconds" % theme.length) # theme.loop = True # theme.play() #Settings Pop and Values #Save Preferences def save_preference(instance): global RESOLUTION_CHOICE preferences = JsonStore('settings.json') preferences.put('music', check=check_music.active) preferences.put('save', check=check_save.active) preferences.put('resolution', resolution=RESOLUTION_CHOICE) settings.dismiss() settings = Popup(title='Settings', separator_color=[0, 100 / 255, 0, 1], size_hint=(.5, .5),auto_dismiss=False) grid_settings = GridLayout(cols=1) grid_settings_second = GridLayout(cols=2,size=(settings.width, settings.height)) check_music = CheckBox(active=True) check_save = CheckBox(active=False) # Making a resolution button resolution = DropDown() resolutions = ['1920x1080', '1280x720', '720x480'] resolution_choice = Window.size for item in resolutions: # When adding widgets, we need to specify the height manually # (disabling the size_hint_y) so the dropdown can calculate # the area it needs. btn = Button(text=item, size_hint_y=None, height=30) # for each button, attach a callback that will call the select() method # on the dropdown. We'll pass the text of the button as the data of the # selection. btn.bind(on_release=lambda btn: resolution.select(btn.text)) # then add the button inside the dropdown resolution.add_widget(btn) # create a big main button mainbutton = Button(text='Resolution: {}'.format(str(Window.width) + 'x' + str(Window.height)),size_hint_y=None,height=60) # show the dropdown menu when the main button is released # note: all the bind() calls pass the instance of the caller (here, the # mainbutton instance) as the first argument of the callback (here, # dropdown.open.). mainbutton.bind(on_release=resolution.open) # one last thing, listen for the selection in the dropdown list and # assign the data to the button text. # resolution.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x)) resolution.bind(on_select=resolution_changer) grid_settings.add_widget(mainbutton) grid_settings_second.add_widget(Label(text='Autosave')) grid_settings_second.add_widget(check_save) grid_settings_second.add_widget(Label(text='Sound')) grid_settings_second.add_widget(check_music) #End grid_settings.add_widget(grid_settings_second) btn_ok =Button(text='SAVE SETTINGS',font_size='18sp', background_color=(1,0,0,1),size_hint_y=.15) grid_settings.add_widget(btn_ok) settings.content = grid_settings btn_ok.bind(on_release=save_preference) #Levels Pop level_pop = Popup(title='Give a level to: ', title_size='18sp',title_align='center',separator_color=[0, 100 / 255, 0, 1], size_hint=(.4, .4)) #Choose heroes pop prompt_heroes = Popup(title='Choose Heroes or don\'t for random heroes.(1-4)', title_size='18sp',title_align='center',separator_color=[0, 100 / 255, 0, 1], size_hint=(1, 1),auto_dismiss=False) # prompt_heroes.content = BoxLayout(orientation='vertical') gridforpop = GridLayout(cols=1) prompt_heroes.content = gridforpop #Help Pop and values text = open('help_tab.txt', 'r').read() help = Popup(title='Help', separator_color=[0, 100 / 255, 0, 1], size_hint=(.75, .75)) # help_text = TextInput(text=text, multiline=True, disabled=True, background_color=[0,0,0,0], disabled_foreground_color=[1,1,1,1], size_hint_y=None) help_text = Label(text=text,size_hint_y=None,markup=True,halign='left', valign='bottom') help_text.text_size = ((Window.width*.7), None) help_text.texture_update() help_text.height = help_text.texture_size[1] some_scroll = ScrollView() some_scroll.add_widget(help_text) help.content = some_scroll #Villain Manual Pop-up villain = Popup(title='Villains Manual', separator_color=[0, 100 / 255, 0, 1], size_hint=(.75, .75), title_size= '16sp') some_text = open('villains_tab_a.txt', 'r').read() some_other_text = open('villains_tab_b.txt', 'r').read() villain_grid = GridLayout(cols=2) villain_label = Label(text=some_text,size_hint_y=None,markup=True,halign='left', valign='bottom') villain_label.text_size = ((Window.width*.6)/2, None) villain_label.texture_update() villain_label.height = villain_label.texture_size[1] villain_label_second = Label(text=some_other_text,size_hint_y=None,markup=True,halign='left', valign='bottom') villain_label_second.text_size = ((Window.width*.6)/2, None) villain_label_second.texture_update() villain_label_second.height = villain_label_second.texture_size[1] other_scroll = ScrollView() other_scroll.add_widget(villain_label) other_scroll_second = ScrollView() other_scroll_second.add_widget(villain_label_second) villain_grid.add_widget(other_scroll) villain_grid.add_widget(other_scroll_second) villain.content = villain_grid #Choose Board games Pop-up board_games = Popup(title='Select Board Games:', separator_color=[0, 100 / 255, 0, 1], size_hint=(.75, .75),title_align='center') castle_raven = ToggleButton(text='Castle Ravenloft',background_color=(0,1,1,1)) wrath_ashard = ToggleButton(text='Wrath of Ashardalon',background_color=(0,1,1,1)) legend_drizzt = ToggleButton(text='The Legend of Drizzt',background_color=(0,1,1,1)) temple_evil = ToggleButton(text='Temple of Elemental Evil',background_color=(0,1,1,1)) tomb_an = ToggleButton(text='Tomb of Annihilation',background_color=(0,1,1,1),disabled=True) mad_mage = ToggleButton(text='Waterdeep - Dungeon of the Mad Mage',background_color=(0,1,1,1)) grid_board = GridLayout(cols=1) grid_second_board = GridLayout(cols=2,size=(board_games.width, board_games.height)) grid_second_board.add_widget(castle_raven) grid_second_board.add_widget(wrath_ashard) grid_second_board.add_widget(legend_drizzt) grid_second_board.add_widget(temple_evil) grid_second_board.add_widget(tomb_an) grid_second_board.add_widget(mad_mage) grid_board.add_widget(grid_second_board) confirm_btn = Button(text='CONFIRM',font_size='18sp', background_color=(1,0,0,1),size_hint_y=.15) grid_board.add_widget(confirm_btn) board_games.content = grid_board #Tools Pop-up toolbox = Popup(title='Tools:', separator_color=[0, 100 / 255, 0, 1], size_hint=(.4, .4),title_align='center') gridbig_tools = BoxLayout(orientation='vertical') gridtools = GridLayout(cols=2) toolbox.content = gridbig_tools #Difficulty Pop-up difficulty_sel = Popup(title='Select Difficulty:', separator_color=[0, 100 / 255, 0, 1], size_hint=(.4, .3),title_align='center') grid_diff = GridLayout(cols=2) normal_diff = Button(text='Normal',background_color=(0,1,1,1)) hard_diff = Button(text='Hard',background_color=(0,1,1,1)) grid_diff.add_widget(normal_diff) grid_diff.add_widget(hard_diff) difficulty_sel.content = grid_diff #Load preferences def load_preference(): try: preferences = JsonStore('settings.json') check_music.active = preferences.get('music')['check'] check_save.active = preferences.get('save')['check'] except: pass #Load load_preference() class MyUI(FloatLayout): def __init__(self, **kwargs): super(MyUI, self).__init__(**kwargs) with self.canvas.before: Color(1, 1, 1, .6) # green; colors range from 0-1 instead of 0-255 self.rect = Rectangle(size=self.size, pos=self.pos,source=backImages_list[0]) self.bind(pos=self.update_rect, size=self.update_rect) # ActionBar self.menubar = ActionBar(pos_hint={'top': 1}) self.menuview = ActionView() self.menubar.add_widget(self.menuview) self.menuAcPrevious = ActionPrevious(id='ap',with_previous=False,title='Dungeons And Dragons Campaign Generator',size_hint_x=None,width=0,app_icon_width=.1) # self.menuAcPrevious.bind(on_release=test) self.menuview.add_widget(self.menuAcPrevious) self.settings = ActionButton(text='Settings') self.menuview.add_widget(self.settings) self.villain_manual = ActionButton(text='Villains Manual') self.menuview.add_widget(self.villain_manual) self.help = ActionButton(text='Help') self.menuview.add_widget(self.help) self.add_widget(self.menubar) # self.menuAcPrevious.clear_widgets() # Labels/Texts self.add_widget(Label(text='Tavern', size_hint=(.1, .05), pos_hint={'x': 0, 'y': .85})) # self.tavern = TextInput(multiline=False, disabled=True, size_hint=(.16, .05), pos_hint={'x': .02, 'y': .80}, # disabled_foreground_color=[1,1,1,1], background_disabled_normal='', # font_size='14sp',background_color=[0,0,0,0]) self.tavern = Label(size_hint=(None, None),width=Window.width*.16,height=Window.height*.05, pos_hint={'x': .02, 'y': .8},font_size='14sp',valign='top') # self.tavern = Label(size_hint=(.1, .07), pos_hint={'x': .02, 'y': .8},font_size='14sp',valign='top') self.tavern.text_size = self.tavern.width, self.tavern.height self.add_widget(self.tavern) self.add_widget(Label(text='Heroes', size_hint=(.1, .05), pos_hint={'x': .15, 'y': .85})) # self.heroes = TextInput(multiline=True, size_hint=(.12, .3), pos_hint={'x': .15, 'y': .55}, disabled=True, # disabled_foreground_color=[1, 1, 1, 1], background_disabled_normal='',background_color=[0,0,0,0]) self.heroes = Label(size_hint=(None, None), width=Window.width * .12, height=Window.height * .3, pos_hint={'x': .15, 'y': .54}, font_size='15sp', valign='top') # self.heroes = Label(size_hint=(.1, .45), pos_hint={'x': .15, 'y': .54}, font_size='15sp', valign='top') self.heroes.text_size = self.heroes.width, self.heroes.height self.add_widget(self.heroes) self.add_widget(Label(text='Gold', size_hint=(.1, .05), pos_hint={'x': .36, 'y': .85})) self.gold = TextInput(multiline=True, size_hint=(.12, .26), pos_hint={'x': .36, 'y': .59}, input_filter='int',foreground_color=[1,1,1,1],background_color=[1,1,1,.1],font_size='15sp') self.add_widget(self.gold) self.add_widget(Label(text='LVL', size_hint=(.1, .05), pos_hint={'x': .23, 'y': .85})) # self.lvl = TextInput(multiline=True, size_hint=(.03, .3), pos_hint={'x': .26, 'y': .55}, input_filter='int',foreground_color=[1,1,1,1],background_color=[0,0,0,0],disabled=True, # disabled_foreground_color=[1, 1, 1, 1], background_disabled_normal='') self.lvl = Label(size_hint=(None, None), width=Window.width * .03, height=Window.height * .3, pos_hint={'x': .26, 'y': .54}, valign='top', font_size='15sp') self.lvl.text_size = self.lvl.width, self.lvl.height self.add_widget(self.lvl) self.add_widget(Label(text='Items', size_hint=(.1, .05), pos_hint={'x': .48, 'y': .85})) self.itembox = TextInput(multiline=True, size_hint=(.12, .26), pos_hint={'x': .48, 'y': .59},foreground_color=[1,1,1,1],background_color=[1,1,1,.1],font_size='15sp') self.add_widget(self.itembox) self.add_widget(Label(text='Deck Changes', size_hint=(.1, .05), pos_hint={'x': .7, 'y': .85})) # self.deck_change = TextInput(multiline=True, size_hint=(.24, .45), pos_hint={'x': .64, 'y': .4},disabled=True, # disabled_foreground_color=[1, 1, 1, 1], background_disabled_normal='', # font_size='14sp',foreground_color=[1,1,1,1],background_color=[0,0,0,0]) self.deck_change = Label(size_hint=(None, None), width=Window.width * .24, height=Window.height * .45, pos_hint={'x': .64, 'y': .4}, valign='top') self.deck_change.text_size = self.deck_change.width, self.deck_change.height self.add_widget(self.deck_change) self.add_widget(Label(text='Difficulty', size_hint=(.1, .05), pos_hint={'x': .88, 'y': .85})) # self.diff = TextInput(multiline=True, size_hint=(.05, .05), pos_hint={'x': .9, 'y': .78}, disabled=True, # disabled_foreground_color=[1, 1, 1, 1], background_disabled_normal='',foreground_color=[1,1,1,1],background_color=[0,0,0,0]) self.diff = Label(size_hint=(None, None), width=Window.width * .05, height=Window.height * .05, pos_hint={'x': .9, 'y': .79}, valign='top') self.diff.text_size = self.diff.width, self.diff.height self.add_widget(self.diff) self.add_widget(Label(text='Adventure No', size_hint=(.1, .05), pos_hint={'x': .02, 'y': .76})) # self.advno = TextInput(multiline=False, size_hint=(.05, .05), pos_hint={'x': .03, 'y': .71},disabled=True, # disabled_foreground_color=[1, 1, 1, 1], background_disabled_normal='',foreground_color=[1,1,1,1],background_color=[0,0,0,0]) self.advno = Label(size_hint=(None, None), width=Window.width * .05, height=Window.height * .05, pos_hint={'x': .03, 'y': .71}, valign='top') self.advno.text_size = self.advno.width, self.advno.height self.add_widget(self.advno) # self.advno.text = '1' # self.diff.text = '10' self.add_widget( Label(text='Adventure Case', size_hint=(.1, .05), pos_hint={'x': .1, 'y': .35}, font_size='14sp')) self.adv_case = Label(size_hint=(None, None), width=Window.width / 2, height=Window.height / 2.5, pos_hint={'x': .05, 'y': .04}, font_size='18sp') self.adv_case.text_size = self.adv_case.width, self.adv_case.height - sp(10) self.add_widget(self.adv_case) self.add_widget(Label(text='Tiles', size_hint=(.1, .05), pos_hint={'x': .7, 'y': .35})) self.tiles = Label(size_hint=(None, None), width=Window.width / 2, height=Window.height / 3.8, pos_hint={'x': .5, 'y': .15}, font_size='14sp') self.adv_case.text_size = self.adv_case.width, self.adv_case.height self.add_widget(self.tiles) # Buttons self.create = Button(text='Create Campaign', font_size='14sp', size_hint=(.15, .1), pos_hint={'x': .2, 'y': .4}, background_color=[70/255,70/255,70/255,.8],background_normal='') self.add_widget(self.create) self.create.bind(on_release=difficulty_sel.open) self.next_adv = Button(text='Next Adventure', font_size='14sp', size_hint=(.15, .1), pos_hint={'x': .45, 'y': .48}, background_normal='', background_color=[145/255, 23/255, 60/255, .8], disabled=True) self.next_adv.bind(on_release=self.next_adventure) self.saving = Button(text='Save', font_size='14sp', size_hint=(.1, .07), pos_hint={'x': .03, 'y': .5}, background_color=[0, .2, 1,.7], disabled=True, background_normal='') self.add_widget(self.saving) self.saving.bind(on_release=self.save_state) self.loading = Button(text='Load', font_size='14sp', size_hint=(.1, .07), pos_hint={'x': .03, 'y': .42}, background_color=[0, .2, 1, .7], background_normal='') self.add_widget(self.loading) self.loading.bind(on_release=self.load_state) # Monster Tokens self.monster_tokens = Button(text='Monster Tokens', font_size='14sp', background_color=[.2, 1, .8, 1]) gridtools.add_widget(self.monster_tokens) self.monster_tokens.bind(on_release=self.monster_tokens_menu) # toolbox.bind(on_dismiss=self.restore_tools) # PopUp Button self.help.bind(on_release=help.open) self.settings.bind(on_release=settings.open) self.villain_manual.bind(on_release=villain.open) #Toolbox open self.tools = Button(text='Tools', font_size='14sp', size_hint=(.15, .1), pos_hint={'x': .45, 'y':
<gh_stars>0 # Copyright (c) 2021, Oracle and/or its affiliates. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License, version 2.0, as # published by the Free Software Foundation. # # This program is also distributed with certain software (including # but not limited to OpenSSL) that is licensed under separate terms, # as designated in a particular file or component or in included license # documentation. The authors of MySQL hereby grant you an # additional permission to link the program and your derivative works # with the separately licensed software that they have included with # MySQL. # # Without limiting anything contained in the foregoing, this file, # which is part of MySQL Connector/Python, is also subject to the # Universal FOSS Exception, version 1.0, a copy of which can be found at # http://oss.oracle.com/licenses/universal-foss-exception. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License, version 2.0, for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import os import threading import time import unittest import mysqlx import tests @unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 25), "XPlugin not compatible") class CollectionAddTests(tests.MySQLxTests): """Tests for collection.find().""" def _drop_collection_if_exists(self, name): collection = self.schema.get_collection(name) if collection.exists_in_database(): self.schema.drop_collection(name) @tests.foreach_session() def test_collection_find1(self): """Test collection.find.fields.""" self._drop_collection_if_exists("mycoll5") collection = self.schema.create_collection("mycoll5") collection.add( {"_id": 1, "name": "a", "age": 21}, {"_id": 2, "name": "b"}, {"_id": 3, "name": "c"}, ).execute() result = ( collection.find().fields("sum($.age)").group_by("$.age").execute() ).fetch_all() self.assertEqual(len(result), 2) self.schema.drop_collection("mycoll5") @unittest.skip("TODO: Fix me") @tests.foreach_session() def test_collection_find2(self): """Test the collection.find.groupby and having.""" self._drop_collection_if_exists("mycoll6") collection = self.schema.create_collection("mycoll6") collection.add( {"_id": 1, "a": 1, "b": 2, "c": 100}, {"_id": 2, "a": 2, "b": 1, "c": 200}, {"_id": 3, "a": 3, "b": 2, "c": 300}, ).execute() result = ( collection.find() .fields("$.a, $.b") .group_by("$.b") .having("$.a > 1") .execute() ).fetch_all() self.assertEqual(len(result), 2) self.schema.drop_collection("mycoll6") @tests.foreach_session() def test_collection_find3(self): """Test collection.find with sort.""" self._drop_collection_if_exists("mycoll9") collection = self.schema.create_collection("mycoll9") collection.add( {"_id": 1, "a": 1, "b": 2, "c": 100}, {"_id": 2, "a": 2, "b": 1, "c": 200}, {"_id": 3, "a": 3, "b": 2, "c": 300}, ).execute() result = collection.find().fields("$.a, $b").sort("a DESC").execute() row = result.fetch_all() self.assertEqual(row[0]["$.a"], 3) self.schema.drop_collection("mycoll9") @tests.foreach_session() def test_collection_find4(self): """Test collection.find with limit with offset.""" self._drop_collection_if_exists("mycoll10") collection = self.schema.create_collection("mycoll10") collection.add( {"_id": 1, "a": 1, "b": 2, "c": 100}, {"_id": 2, "a": 2, "b": 1, "c": 200}, {"_id": 3, "a": 3, "b": 2, "c": 300}, ).execute() result = ( collection.find("$.a > 1") .fields("$.a") .limit(2) .offset(1) .execute() ) row = result.fetch_all() self.schema.drop_collection("mycoll10") @unittest.skip("TODO: Fix me") @tests.foreach_session() def test_collection_find5(self): """Test collection.find with like.""" self._drop_collection_if_exists("mycoll11") collection = self.schema.create_collection("mycoll11") collection.add( {"_id": 1, "name": "Sana"}, {"_id": 2, "name": "Sam"}, {"_id": 3, "name": "amr"}, ).execute() result = collection.find("$.name like S*").execute() row = result.fetch_all() self.assertEqual(len(row), 2) self.assertEqual(row[1]["name"], "Sam") self.schema.drop_collection("mycoll11") @tests.foreach_session() def test_collection_find6(self): """Test collection.find with bind.""" self._drop_collection_if_exists("mycoll11") collection = self.schema.create_collection("mycoll11") collection.add( {"_id": 1, "name": "Sana"}, {"_id": 2, "name": "Sam"}, {"_id": 3, "name": "amr"}, ).execute() result = ( collection.find("$.name == :name").bind("name", "Sana").execute() ) row = result.fetch_all()[0] self.assertEqual(row["_id"], 1) self.schema.drop_collection("mycoll11") @tests.foreach_session() def test_collection_find7(self): """Test collection.find with parameter list.""" self._drop_collection_if_exists("mycoll19") collection = self.schema.create_collection("mycoll19") collection.add( {"_id": 1, "name": "Sana"}, {"_id": 2, "name": "Sam"}, {"_id": 3, "name": "amr"}, ).execute() result = ( collection.find("$._id > 1").fields("$._id", "$.name").execute() ).fetch_all() self.assertEqual(len(result), 2) self.schema.drop_collection("mycoll19") @unittest.skipIf( tests.MYSQL_EXTERNAL_SERVER, "Test not available for external MySQL servers", ) @tests.foreach_session() def test_collection_find8(self): """Test collection.find.groupby with parameter list.""" self._drop_collection_if_exists("mycoll20") collection = self.schema.create_collection( "mycol20", ) collection.add( {"_id": 1, "a": 1, "b": 2, "c": 100}, {"_id": 2, "a": 2, "b": 2, "c": 300}, {"_id": 3, "a": 3, "b": 2, "c": 100}, ).execute() result = ( collection.find() .fields("$a,$.b,$.c") .group_by("$.b", "$.c") .execute() ).fetch_all() self.assertEqual(len(result), 2) self.schema.drop_collection("mycol20") @tests.foreach_session() def test_collection_find9(self): """Test collection.find.sort with param list.""" self._drop_collection_if_exists("mycoll21") collection = self.schema.create_collection("mycoll21") collection.add( {"_id": 1, "a": 1, "b": 10, "c": 100}, {"_id": 2, "a": 1, "b": 11, "c": 200}, {"_id": 3, "a": 2, "b": 10, "c": 300}, ).execute() result = ( collection.find() .fields("$.a, $.b") .sort("a ASC", "b DESC") .execute() ).fetch_all() self.assertEqual(result[0]["$.b"], 11) self.schema.drop_collection("mycoll21") @tests.foreach_session() def test_collection_find10(self): """Test collection.find using where() condition.""" self._drop_collection_if_exists("newcoll1") collection = self.schema.create_collection("newcoll1") collection.add( {"_id": 1, "a": 1, "b": 10, "c": 100}, {"_id": 2, "a": 1, "b": 11, "c": 200}, {"_id": 3, "a": 2, "b": 10, "c": 300}, ).execute() result = collection.find().where("$.c >= 200").execute() self.assertEqual(len(result.fetch_all()), 2) self.schema.drop_collection("newcoll1") @unittest.skipUnless( tests.ARCH_64BIT, "Test available only for 64 bit platforms" ) @unittest.skipIf(os.name == "nt", "Test not available for Windows") @tests.foreach_session() def test_collection_find11(self): """Test collection.find with offset as large positive number.""" self._drop_collection_if_exists("newcoll2") collection = self.schema.create_collection( "newcoll2", ) collection.add( {"_id": 1, "a": 1, "b": 10, "c": 100}, {"_id": 2, "a": 1, "b": 11, "c": 200}, {"_id": 3, "a": 2, "b": 10, "c": 300}, ).execute() result = collection.find().limit(2).offset(92898832378723).execute() self.assertEqual(len(result.fetch_all()), 0) self.schema.drop_collection("newcoll2") @tests.foreach_session() def test_collection_find12(self): """Test collection.find with offset as negative number.""" self._drop_collection_if_exists("mycoll3") collection = self.schema.create_collection("newcoll3") collection.add( {"_id": 1, "a": 1, "b": 10, "c": 100}, {"_id": 2, "a": 1, "b": 11, "c": 200}, {"_id": 3, "a": 2, "b": 10, "c": 300}, ).execute() self.assertRaises( ValueError, collection.find().limit(2).offset, -2378723, ) self.schema.drop_collection("newcoll3") @tests.foreach_session() def test_operator1(self): """Test binary operator and.""" self._drop_collection_if_exists("mycoll1") collection = self.schema.create_collection("mycoll1") collection.add( {"_id": 1, "name": "Sana"}, {"_id": 2, "name": "Sam"}, {"_id": 3, "name": "amr"}, ).execute() result = ( collection.find("$.name == :name and $._id == :id") .bind('{"name":"Sana" ,"id":1}') .execute() ) row = result.fetch_all()[0] self.assertEqual(row["name"], "Sana") self.schema.drop_collection("mycoll1") @tests.foreach_session() def test_operator4(self): """Test 'between' operator.""" self._drop_collection_if_exists("mycoll2") collection = self.schema.create_collection("mycoll2") collection.add( {"_id": 1, "name": "Sana"}, {"_id": 2, "name": "Sam"}, {"_id": 3, "name": "amr"}, {"_id": 4, "name": "abc"}, {"_id": 5, "name": "def"}, ).execute() result = collection.find("$._id between 2 and 4").execute() self.assertEqual(len(result.fetch_all()), 3) self.schema.drop_collection("mycoll2") # Testing the contains operator with single operand on both sides @tests.foreach_session() def test_contains_operator_test1(self): """Test IN operator with string on both sides - With LHS in RHS.""" self._drop_collection_if_exists("mycoll1") collection = self.schema.create_collection("mycoll1") collection.add({"name": "a"}, {"name": "b"}).execute() result = collection.find("'a' IN $.name").execute() self.assertEqual(len(result.fetch_all()), 1) self.schema.drop_collection("mycoll1") @tests.foreach_session() def test_contains_operator2(self): """Test IN operator with int as operand - With LHS in RHS.""" self._drop_collection_if_exists("mycoll2") collection = self.schema.create_collection("mycoll2") collection.add( {"name": "a", "age": 21}, {"name": "b", "age": 21} ).execute() result = collection.find("21 IN $.age").execute() self.assertEqual(len(result.fetch_all()), 2) self.schema.drop_collection("mycoll2") @unittest.skip("TODO: Fix me") @tests.foreach_session() def test_contains_operator3(self): """Test IN operator with boolean as operand - With LHS in RHS.""" self._drop_collection_if_exists("mycoll3") collection = self.schema.create_collection("mycoll3") collection.add( {"name": "a", "age": 21, "ARR": [1, 4]}, {"name": "b", "age": 21, "ARR": 2}, ).execute() result = collection.find("(!false && true) IN [true]").execute() self.assertEqual(len(result.fetch_all()), 2) self.schema.drop_collection("mycoll3") @tests.foreach_session() def test_contains_operator4(self): """Test NOT IN operator with string operand - With LHS not in RHS.""" self._drop_collection_if_exists("mycoll4") collection = self.schema.create_collection("mycoll4") collection.add({"name": "a"}, {"name": "b"}, {"name": "c"}).execute() result = collection.find("$.name NOT IN 'a'").execute() self.assertEqual(len(result.fetch_all()), 2) self.schema.drop_collection("mycoll4") @tests.foreach_session() def test_contains_operator5(self): """Test NOT IN operator with int as operand - With LHS not in RHS.""" self._drop_collection_if_exists("mycoll5") collection = self.schema.create_collection("mycoll5") collection.add( {"name": "a", "age": 21}, {"name": "b", "age": 22} ).execute() result = collection.find("21 NOT IN $.age").execute() self.assertEqual(len(result.fetch_all()), 1) self.schema.drop_collection("mycoll5") @tests.foreach_session() def test_contains_operator6(self): self._drop_collection_if_exists("mycoll6") collection = self.schema.create_collection("mycoll6") collection.add( {"name": "a", "age": 21}, {"name": "b", "age": 21} ).execute() result = collection.find("'b' NOT IN $.name").execute() self.assertEqual(len(result.fetch_all()), 1) self.schema.drop_collection("mycoll6") @tests.foreach_session() def test_contains_operator7(self): """Test IN operator with different datatypes as operands.""" self._drop_collection_if_exists("mycoll7") collection = self.schema.create_collection("mycoll7") collection.add( {"name": "a", "age": 21}, {"name": "b", "age": 22} ).execute() result = collection.find("21 IN $.name").execute() result1 = collection.find("'b' IN $.age").limit(1).execute() self.assertEqual(len(result.fetch_all()), 0) self.assertEqual(len(result1.fetch_all()), 0), self.schema.drop_collection("mycoll7") @tests.foreach_session() def test_contains_operator8(self): """Test IN operator with single element on LHS and array/list on RHS and vice versa.""" self._drop_collection_if_exists("mycoll8") collection = self.schema.create_collection("mycoll8") collection.add( {"_id": 1, "name": "a", "age": 21, "prof": ["x", "y"]}, {"_id": 2, "name": "b", "age": 24, "prof": ["p", "q"]}, {"_id": 3, "name": "c", "age": 26}, ).execute() result = collection.find("$.age IN [21,23,24,28]").execute() result1 = collection.find("$.name IN ['a','b','c','d','e']").execute() result2 = collection.find("$.age IN (21,23)").execute() result3 = ( collection.find() .fields("21 IN (22,23) as test") .limit(1) .execute() ) result4 = collection.find('["p","q"] IN $.prof').execute() self.assertEqual(len(result.fetch_all()), 2) self.assertEqual(len(result1.fetch_all()), 3) self.assertEqual(len(result2.fetch_all()), 1) self.assertEqual(result3.fetch_all()[0].test, False) self.assertEqual(len(result4.fetch_all()), 1) self.schema.drop_collection("mycoll8") @tests.foreach_session() def test_contains_operator9(self): """Test IN operator with single element on LHS and dict on RHS and vice versa."""
from __future__ import division import re import unittest import mock import pytest from ddtrace.constants import AUTO_KEEP from ddtrace.constants import AUTO_REJECT from ddtrace.constants import SAMPLE_RATE_METRIC_KEY from ddtrace.constants import SAMPLING_AGENT_DECISION from ddtrace.constants import SAMPLING_LIMIT_DECISION from ddtrace.constants import SAMPLING_PRIORITY_KEY from ddtrace.constants import SAMPLING_RULE_DECISION from ddtrace.internal.compat import iteritems from ddtrace.internal.rate_limiter import RateLimiter from ddtrace.sampler import AllSampler from ddtrace.sampler import DatadogSampler from ddtrace.sampler import RateByServiceSampler from ddtrace.sampler import RateSampler from ddtrace.sampler import SamplingRule from ddtrace.span import Span from ..utils import DummyTracer from ..utils import override_env @pytest.fixture def dummy_tracer(): return DummyTracer() def assert_sampling_decision_tags(span, agent=None, limit=None, rule=None): assert span.get_metric(SAMPLING_AGENT_DECISION) == agent assert span.get_metric(SAMPLING_LIMIT_DECISION) == limit assert span.get_metric(SAMPLING_RULE_DECISION) == rule def create_span(tracer=None, name="test.span", service=""): tracer = tracer or DummyTracer() span = tracer.trace(name=name, service=service) span.finish() return span class RateSamplerTest(unittest.TestCase): def test_set_sample_rate(self): sampler = RateSampler() assert sampler.sample_rate == 1.0 for rate in [0.001, 0.01, 0.1, 0.25, 0.5, 0.75, 0.99999999, 1.0, 1]: sampler.set_sample_rate(rate) assert sampler.sample_rate == float(rate) sampler.set_sample_rate(str(rate)) assert sampler.sample_rate == float(rate) def test_set_sample_rate_str(self): sampler = RateSampler() sampler.set_sample_rate("0.5") assert sampler.sample_rate == 0.5 def test_sample_rate_deviation(self): for sample_rate in [0.1, 0.25, 0.5, 1]: tracer = DummyTracer() tracer.sampler = RateSampler(sample_rate) iterations = int(1e4 / sample_rate) for i in range(iterations): span = tracer.trace(str(i)) span.finish() samples = tracer.pop() # We must have at least 1 sample, check that it has its sample rate properly assigned assert samples[0].get_metric(SAMPLE_RATE_METRIC_KEY) == sample_rate # Less than 5% deviation when 'enough' iterations (arbitrary, just check if it converges) deviation = abs(len(samples) - (iterations * sample_rate)) / (iterations * sample_rate) assert deviation < 0.05, "Deviation too high %f with sample_rate %f" % (deviation, sample_rate) def test_deterministic_behavior(self): """Test that for a given trace ID, the result is always the same""" tracer = DummyTracer() tracer.sampler = RateSampler(0.5) for i in range(10): span = tracer.trace(str(i)) span.finish() samples = tracer.pop() assert len(samples) <= 1, "there should be 0 or 1 spans" sampled = 1 == len(samples) for j in range(10): other_span = Span(tracer, str(i), trace_id=span.trace_id) assert sampled == tracer.sampler.sample( other_span ), "sampling should give the same result for a given trace_id" def test_negative_sample_rate_raises_error(self): tracer = DummyTracer() with pytest.raises(ValueError, match="sample_rate of -0.5 is negative"): tracer.sampler = RateSampler(sample_rate=-0.5) def test_sample_rate_0_does_not_reset_to_1(self): # Regression test for case where a sample rate of 0 caused the sample rate to be reset to 1 tracer = DummyTracer() tracer.sampler = RateSampler(sample_rate=0) assert tracer.sampler.sample_rate == 0 class RateByServiceSamplerTest(unittest.TestCase): def test_default_key(self): assert ( "service:,env:" == RateByServiceSampler._default_key ), "default key should correspond to no service and no env" def test_key(self): assert RateByServiceSampler._default_key == RateByServiceSampler._key() assert "service:mcnulty,env:" == RateByServiceSampler._key(service="mcnulty") assert "service:,env:test" == RateByServiceSampler._key(env="test") assert "service:mcnulty,env:test" == RateByServiceSampler._key(service="mcnulty", env="test") assert "service:mcnulty,env:test" == RateByServiceSampler._key("mcnulty", "test") def test_sample_rate_deviation(self): for sample_rate in [0.1, 0.25, 0.5, 1]: tracer = DummyTracer() writer = tracer.writer tracer.configure(sampler=AllSampler()) # We need to set the writer because tracer.configure overrides it, # indeed, as we enable priority sampling, we must ensure the writer # is priority sampling aware and pass it a reference on the # priority sampler to send the feedback it gets from the agent assert writer is not tracer.writer, "writer should have been updated by configure" tracer.priority_sampler.set_sample_rate(sample_rate) iterations = int(1e4 / sample_rate) for i in range(iterations): span = tracer.trace(str(i)) span.finish() samples = tracer.writer.pop() samples_with_high_priority = 0 for sample in samples: if sample.get_metric(SAMPLING_PRIORITY_KEY) is not None: if sample.get_metric(SAMPLING_PRIORITY_KEY) > 0: samples_with_high_priority += 1 else: assert 0 == sample.get_metric( SAMPLING_PRIORITY_KEY ), "when priority sampling is on, priority should be 0 when trace is to be dropped" assert_sampling_decision_tags(sample, agent=sample_rate) # We must have at least 1 sample, check that it has its sample rate properly assigned assert samples[0].get_metric(SAMPLE_RATE_METRIC_KEY) is None # Less than 5% deviation when 'enough' iterations (arbitrary, just check if it converges) deviation = abs(samples_with_high_priority - (iterations * sample_rate)) / (iterations * sample_rate) assert deviation < 0.05, "Deviation too high %f with sample_rate %f" % (deviation, sample_rate) def test_update_rate_by_service_sample_rates(self): cases = [ { "service:,env:": 1, }, { "service:,env:": 1, "service:mcnulty,env:dev": 0.33, "service:postgres,env:dev": 0.7, }, { "service:,env:": 1, "service:mcnulty,env:dev": 0.25, "service:postgres,env:dev": 0.5, "service:redis,env:prod": 0.75, }, ] tracer = DummyTracer() tracer.configure(sampler=AllSampler()) priority_sampler = tracer.priority_sampler for case in cases: priority_sampler.update_rate_by_service_sample_rates(case) rates = {} for k, v in iteritems(priority_sampler._by_service_samplers): rates[k] = v.sample_rate assert case == rates, "%s != %s" % (case, rates) # It's important to also test in reverse mode for we want to make sure key deletion # works as well as key insertion (and doing this both ways ensures we trigger both cases) cases.reverse() for case in cases: priority_sampler.update_rate_by_service_sample_rates(case) rates = {} for k, v in iteritems(priority_sampler._by_service_samplers): rates[k] = v.sample_rate assert case == rates, "%s != %s" % (case, rates) @pytest.mark.parametrize( "sample_rate,allowed", [ # Min/max allowed values (0.0, True), (1.0, True), # Accepted boundaries (0.000001, True), (0.999999, True), # Outside the bounds (-0.000000001, False), (1.0000000001, False), ] + [ # Try a bunch of decimal values between 0 and 1 (1 / i, True) for i in range(1, 50) ] + [ # Try a bunch of decimal values less than 0 (-(1 / i), False) for i in range(1, 50) ] + [ # Try a bunch of decimal values greater than 1 (1 + (1 / i), False) for i in range(1, 50) ], ) def test_sampling_rule_init_sample_rate(sample_rate, allowed): if allowed: rule = SamplingRule(sample_rate=sample_rate) assert rule.sample_rate == sample_rate else: with pytest.raises(ValueError): SamplingRule(sample_rate=sample_rate) def test_sampling_rule_init_defaults(): rule = SamplingRule(sample_rate=1.0) assert rule.sample_rate == 1.0 assert rule.service == SamplingRule.NO_RULE assert rule.name == SamplingRule.NO_RULE def test_sampling_rule_init(): name_regex = re.compile(r"\.request$") rule = SamplingRule( sample_rate=0.0, # Value service="my-service", # Regex name=name_regex, ) assert rule.sample_rate == 0.0 assert rule.service == "my-service" assert rule.name == name_regex def test_sampling_rule_init_via_env(): # Testing single sampling rule with override_env(dict(DD_TRACE_SAMPLING_RULES='[{"sample_rate":1.0,"service":"xyz","name":"abc"}]')): sampling_rule = DatadogSampler().rules assert sampling_rule[0].sample_rate == 1.0 assert sampling_rule[0].service == "xyz" assert sampling_rule[0].name == "abc" assert len(sampling_rule) == 1 # Testing multiple sampling rules with override_env( dict( DD_TRACE_SAMPLING_RULES='[{"sample_rate":1.0,"service":"xyz","name":"abc"}, \ {"sample_rate":0.5,"service":"my-service","name":"my-name"}]' ) ): sampling_rule = DatadogSampler().rules assert sampling_rule[0].sample_rate == 1.0 assert sampling_rule[0].service == "xyz" assert sampling_rule[0].name == "abc" assert sampling_rule[1].sample_rate == 0.5 assert sampling_rule[1].service == "my-service" assert sampling_rule[1].name == "my-name" assert len(sampling_rule) == 2 # Testing for only Sample rate being set with override_env(dict(DD_TRACE_SAMPLING_RULES='[{"sample_rate":1.0}]')): sampling_rule = DatadogSampler().rules assert sampling_rule[0].sample_rate == 1.0 assert sampling_rule[0].service == SamplingRule.NO_RULE assert sampling_rule[0].name == SamplingRule.NO_RULE assert len(sampling_rule) == 1 # Testing for no name being set with override_env(dict(DD_TRACE_SAMPLING_RULES='[{"sample_rate":1.0,"service":"xyz"}]')): sampling_rule = DatadogSampler().rules assert sampling_rule[0].sample_rate == 1.0 assert sampling_rule[0].service == "xyz" assert sampling_rule[0].name == SamplingRule.NO_RULE assert len(sampling_rule) == 1 # Testing for no service being set with override_env(dict(DD_TRACE_SAMPLING_RULES='[{"sample_rate":1.0,"name":"abc"}]')): sampling_rule = DatadogSampler().rules assert sampling_rule[0].sample_rate == 1.0 assert sampling_rule[0].service == SamplingRule.NO_RULE assert sampling_rule[0].name == "abc" assert len(sampling_rule) == 1 # The Following error handling test use assertions on the json items instead of asserting on # the returned stringdue to older version of python not keeping load order in dictionaires # Testing for Sample rate greater than 1.0 with pytest.raises(ValueError) as excinfo: with override_env(dict(DD_TRACE_SAMPLING_RULES='[{"sample_rate":2.0,"service":"xyz","name":"abc"}]')): sampling_rule = DatadogSampler().rules assert str(excinfo.value).endswith( "SamplingRule(sample_rate=2.0) must be greater than or equal to 0.0 and less than or equal to 1.0" ) assert '"sample_rate": 2.0' in str(excinfo.value) assert '"service": "xyz"' in str(excinfo.value) assert '"name": "abc"' in str(excinfo.value) # Testing for no Sample rate with pytest.raises(KeyError) as excinfo: with override_env(dict(DD_TRACE_SAMPLING_RULES='[{"service":"xyz","name":"abc"}]')): sampling_rule = DatadogSampler().rules assert str(excinfo.value).startswith("'No sample_rate provided for sampling rule: ") assert '"service": "xyz"' in str(excinfo.value) assert '"name": "abc"' in str(excinfo.value) # Testing for Invalid JSON with pytest.raises(ValueError) as excinfo: with override_env(dict(DD_TRACE_SAMPLING_RULES='["sample_rate":1.0,"service":"xyz","name":"abc"]')): sampling_rule = DatadogSampler().rules assert 'Unable to parse DD_TRACE_SAMPLING_RULES=["sample_rate":1.0,"service":"xyz","name":"abc"]' == str( excinfo.value ) # # Testing invalid rule with multiple rules defined with pytest.raises(KeyError) as excinfo: with override_env( dict( DD_TRACE_SAMPLING_RULES='[{"sample_rate":1.0,"service":"xyz","name":"abc"},' + '{"service":"my-service","name":"my-name"}]' ) ): sampling_rule = DatadogSampler().rules assert str(excinfo.value).startswith("'No sample_rate provided for sampling rule: ") assert '"service": "my-service"' in str(excinfo.value) assert '"name": "my-name"' in str(excinfo.value) @pytest.mark.parametrize( "span,rule,expected", [ # DEV: Use sample_rate=1 to ensure SamplingRule._sample always returns True (create_span(name=name), SamplingRule(sample_rate=1, name=pattern), expected) for name, pattern, expected in [ ("test.span", SamplingRule.NO_RULE, True), # DEV: `span.name` cannot be `None` ("test.span", None, False), ("test.span", "test.span", True), ("test.span", "test_span", False), ("test.span", re.compile(r"^test\.span$"), True), ("test_span", re.compile(r"^test.span$"), True), ("test.span", re.compile(r"^test_span$"), False), ("test.span", re.compile(r"test"), True), ("test.span", re.compile(r"test\.span|another\.span"), True), ("another.span", re.compile(r"test\.span|another\.span"), True), ("test.span", lambda name: "span" in name, True), ("test.span", lambda name: "span" not in name, False), ("test.span", lambda name: 1 / 0, False), ] ], ) def test_sampling_rule_matches_name(span, rule, expected):
import math import numpy as np import pandas as pd from analysis.osu.mania.map_data import ManiaMapData from osu.local.hitobject.mania.mania import Mania from misc.numpy_utils import NumpyUtils class ManiaActionData(): FREE = 0 # Finger free to float PRESS = 1 # Finger must impart force to press key HOLD = 2 # Finger must keep imparting force to keep key down RELEASE = 3 # Finger must depart force to unpress key @staticmethod def get_map_data(hitobjects, min_press_duration=1): """ [ [ col1_state, col2_state, ..., colN_state ], [ col1_state, col2_state, ..., colN_state ], [ col1_state, col2_state, ..., colN_state ], ... ] """ # It's easier to deal with start and end timings hitobject_data = ManiaMapData.get_hitobject_data(hitobjects) num_columns = len(hitobject_data) # Record data via dictionary to identify unique timings action_data = {} for col in range(num_columns): for hitobject in hitobject_data[col]: # Extract note timings note_start = hitobject[0] note_end = hitobject[1] # Adjust note ending based on whether it is single or hold note (determined via min_press_duration) note_end = note_end if (note_end - note_start >= min_press_duration) else (note_start + min_press_duration) # Record press state try: action_data[note_start] except KeyError: action_data[note_start] = np.zeros(num_columns) action_data[note_start] += np.asarray([ ManiaActionData.PRESS if col==c else ManiaActionData.FREE for c in range(num_columns) ]) # Record release state try: action_data[note_end] except KeyError: action_data[note_end] = np.zeros(num_columns) action_data[note_end] += np.asarray([ ManiaActionData.RELEASE if col==c else ManiaActionData.FREE for c in range(num_columns) ]) # Sort data by timings action_data = dict(sorted(action_data.items())) # Convert the dictionary of recorded timings and states into a pandas data action_data = pd.DataFrame.from_dict(action_data, orient='index') action_data.index.name = 'time' # Fill in HOLD data ManiaActionData.fill_holds(action_data) return action_data @staticmethod def get_replay_data(replay_events, cols): """ [ [ col1_state, col2_state, ..., colN_state ], [ col1_state, col2_state, ..., colN_state ], [ col1_state, col2_state, ..., colN_state ], ... ] """ cols = int(cols) # Record data via dictionary to identify unique timings replay_data = {} # Previous state of whether finger is holding key down hold_state = np.asarray([ False for _ in range(cols) ]) for replay_event in replay_events: is_key_hold = np.asarray([ ((int(replay_event.x) & (1 << col)) > 0) for col in range(cols) ]) if np.equal(hold_state, is_key_hold).all(): continue data = np.asarray([ ManiaActionData.FREE for _ in range(cols) ]) data[~hold_state & is_key_hold] = ManiaActionData.PRESS data[ hold_state & is_key_hold] = ManiaActionData.HOLD data[ hold_state & ~is_key_hold] = ManiaActionData.RELEASE replay_data[replay_event.t] = data hold_state = is_key_hold # Sort data by timings replay_data = dict(sorted(replay_data.items())) # Convert the dictionary of recorded timings and states into a pandas data replay_data = pd.DataFrame.from_dict(replay_data, orient='index') replay_data.index.name = 'time' return replay_data @staticmethod def num_keys(action_data): """ Gets number of keys according to the given ``action_data`` Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` Returns ------- int Number of keys """ return action_data.shape[1] @staticmethod def press_times(action_data, col): """ Gets list of press timings in ``action_data`` for column ``col`` Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` col : int Column to get timings for Returns ------- numpy.array Press timings """ return action_data[col].index[action_data[col] == ManiaActionData.PRESS] @staticmethod def release_times(action_data, col): """ Gets list of release timings in ``action_data`` for column ``col`` Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` col : int Column to get timings for Returns ------- numpy.array Release timings """ return action_data[col].index[action_data[col] == ManiaActionData.RELEASE] @staticmethod def filter_free(action_data): """ Removes timings that have no actions in any column (anything but FREE) Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` Returns ------- numpy.array Filtered ``action_data`` """ return action_data[~np.all(action_data == ManiaActionData.FREE, 1)] @staticmethod def fill_holds(action_data): """ Fill hold press states where they need to be. For example, if there are FREE between where PRESS and RELEASE occur, those will be filled with HOLD Thanks: DeltaEpsilon#7787 Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` Returns ------- numpy.array Filtered ``action_data`` """ data = action_data.values.T for col in range(len(data)): hold_flag = False for row in range(len(data[col])): elem = data[col, row] if hold_flag: if elem == ManiaActionData.PRESS: raise ValueError(f'Two consequtive hold starts: ({col}, {row})') elif elem == ManiaActionData.RELEASE: hold_flag = False elif elem == ManiaActionData.FREE: data[col, row] = ManiaActionData.HOLD else: if elem == ManiaActionData.PRESS: hold_flag = True elif elem == ManiaActionData.RELEASE: raise ValueError(f'Hold ended before it started: ({col}, {row})') @staticmethod def split_by_hand(action_data, left_handed=True): """ Splits ``action_data`` into left and right hands Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` left_handed : bool Whether to prefer spliting odd even keys for left hand or right hand. If ``True`` then left hand. If ``False`` then right hand. Returns ------- (numpy.array, numpy.array) A tuple of ``action_data`` for left hand and right hand """ keys = action_data.shape[1] left_half = math.ceil(keys/2) if left_handed else math.floor(keys/2) return action_data.loc[:, :left_half - 1], action_data.loc[:, left_half:] @staticmethod def mask_actions(action_data, actions, index_start=None, index_end=None, filter_free=False): """ Masks ``action_data`` between ``index_start`` and ``index_end``. If ``filter_free`` is ``True``, then also filters out entries in the range where there are no actions occuring. Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` actions : list A list of actions which to mask index_start : int Starting index of data in action data in which to mask actions for. ``None`` by default. index_end : int Ending index of data in action data in which to mask actions for. ``None`` by default filter_free : bool A flag for determining whether to filter out entries where there are no actions occuring. Doesn't filter by default. Returns ------- numpy.array ``masked_action_data`` mask of the actions specified """ if type(actions) != list: actions = [ actions ] masked_action_data = action_data.loc[index_start : index_end].isin(actions) if filter_free: masked_action_data = ManiaActionData.filter_free(masked_action_data) return masked_action_data @staticmethod def count_actions(action_data, actions, index_start=None, index_end=None): """ Gets number of specified ``actions`` between ``index_start`` and ``index_end`` throughout all timings. Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` index_start : int Starting index of data in action data in which to count number of actions for. ``None`` by default. index_end : int Ending index of data in action data in which to count number of actions for. ``None`` by default actions : list A list of actions which to count Returns ------- int ``action_count`` Number of actions in data """ action_mask = ManiaActionData.mask_actions(action_data, actions, index_start, index_end).to_numpy() return np.sum(action_mask) ''' @staticmethod def count_actions_per_timing(action_data, actions, index_start=None, index_end=None): """ Gets number of specified ``actions`` between ``index_start`` and ``index_end`` per timing. Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` index_start : int Starting index of data in action data in which to count number of actions for. ``None`` by default. index_end : int Ending index of data in action data in which to count number of actions for. ``None`` by default actions : list A list of actions which to count Returns ------- numpy.array ``action_data_count`` Action data representing number of actions specified for each entry """ action_mask = ManiaActionData.mask_actions(action_data, actions, index_start, index_end) count = np.zeros(action_mask[:, :2].shape) count[:, 0] = action_mask[:, 0] count[:, 1] = np.sum(count[:, 1:], axis=1) return count ''' @staticmethod def get_actions_between(action_data, ms_start, ms_end): """ Gets a slice of ``action_data`` between ``ms_start`` and ``ms_end``, inclusively Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` ms_start : int Starting time of milliseconds in action data in which to get actions for ms_end : int Ending time in milliseconds of data in action data in which to get actions for Returns ------- numpy.array ``action_data`` slice of data between the times specified """ return action_data.loc[ms_start : ms_end] @staticmethod def is_action_in(action_data, actions, columns, index_start=None, index_end=None): """ Checks whether specied ``actions`` in ``columns`` exist in slice of ``action_data`` between ``index_start`` and ``index_end`` Parameters ---------- action_data : numpy.array Action data from ``ManiaActionData.get_action_data`` index_start : int Starting index to look actions for index_start : int Ending index to look actions for actions : list A list of actions to look for columns : list A list of columns to look at, where first column is 1, second column is 2, etc Returns ------- bool ``found_actions`` Whether the ``actions`` have been found """ if type(columns) != list: columns =
# create filter options argument group group = optparse.OptionGroup(p, "Filter Options") group.add_option('--uuid', '-u', type="string", action="append", help="Service UUID(s) to match", metavar="UUID") group.add_option('--mac', '-m', type="string", action="append", help="MAC address(es) to match format :: xxXXxxXXxx", metavar="ADDRESS") group.add_option('--instr', '-S', type="string", action="append", help="Search match 'Payload data' for match ::format :: xxXXxxXXxx", metavar="payload_instr") group.add_option('--byte', '-y', type="string", action="append", help="Select single byte from 'Payload data' when there is a match from '--instr search' '--byte=byte_position ' output out in byte column , !! first character position is Zero !! ", metavar="byte_position") group.add_option('--rssi', '-R', type="int", help="RSSI minimum filter (-110 to -20), omit to disable", metavar="RSSI") group.add_option('--install', '-I' ,action="store_true", help="Guide of how to install ") p.add_option_group(group) # create output options argument group group = optparse.OptionGroup(p, "Output Options") group.add_option('--switch', '-Z', action="store_true", help="If options'--instr search' and '--byte=byte_position ' selected. Put byte value in RSSI column") group.add_option('--quiet', '-q', action="store_true", help="Quiet mode (suppress initial scan parameter display)") group.add_option('--time_in_ms', '-z', action="store_true", help="time_in_ms (Display time in milliseconds)") group.add_option('--csv', '-k', action="store_true", help="CVS mode (If options -q and -f are set output in direclty excel csv file friendly format)") group.add_option('--comma', '-c', action="store_true", help="Comma mode (If options -q and -f are set output in basic excel csv file not friendly format)") group.add_option('--plot', '-x', action="store_true", help="Plot mode , If options '-q -f -c -x --time_in_ms -d tr' are set use live plot graph of rssi verses time )") group.add_option('--plotbyte', '-X', action="store_true", help="Plot mode , If options '-q -f -c -x --time_in_ms -d tr' are set use live plot graph of payload selected byte verses time )") group.add_option('--friendly', '-f', action="store_true", help="Friendly mode (output in human-readable format)") group.add_option('--display', '-d', type="string", help="Display fields and order (default '%default')\n" " t = Unix time, with milliseconds\n" " r = RSSI measurement (signed integer)\n" " p = Packet type (0 = normal, 4 = scan response)\n" " s = Sender MAC address (hexadecimal)\n" " a = Address type (0 = public, 1 = random)\n" " b = Bonding status (255 = no bond, else bond handle)\n" " d = Advertisement data payload (hexadecimal)", metavar="FIELDS") p.add_option_group(group) # actually parse all of the arguments options, arguments = p.parse_args() # validate any supplied MAC address filters for arg in options.mac: if re.search('[^a-fA-F0-9:]', arg): p.print_help() print("\n================================================================") print("Invalid MAC filter argument '%s'\n-->must be in the form AA:BB:CC:DD:EE:FF" % arg) print("================================================================") exit(1) arg2 = arg.replace(":", "").upper() if (len(arg2) % 2) == 1: p.print_help() print("\n================================================================") print("Invalid MAC filter argument '%s'\n--> must be 1-6 full bytes in 0-padded hex form (00:01:02:03:04:05)" % arg) print("================================================================") exit(1) mac = [] for i in range(0, len(arg2), 2): mac.append(int(arg2[i : i + 2], 16)) filter_mac.append(mac) # validate any supplied UUID filters for arg in options.uuid: if re.search('[^a-fA-F0-9:]', arg): p.print_help() print("\n================================================================") print("Invalid UUID filter argument '%s'\n--> must be 2 or 16 full bytes in 0-padded hex form (180B or 0123456789abcdef0123456789abcdef)" % arg) print("================================================================") exit(1) arg2 = arg.replace(":", "").upper() if len(arg2) != 4 and len(arg2) != 32: p.print_help() print("\n================================================================") print("Invalid UUID filter argument '%s'\n--> must be 2 or 16 full bytes in 0-padded hex form (180B or 0123456789abcdef0123456789abcdef)" % arg) print("================================================================") exit(1) uuid = [] for i in range(0, len(arg2), 2): uuid.append(int(arg2[i : i + 2], 16)) filter_uuid.append(uuid) # validate RSSI filter argument filter_rssi = abs(int(options.rssi)) if filter_rssi > 0 and (filter_rssi < 20 or filter_rssi > 110): p.print_help() print("\n================================================================") print("Invalid RSSI filter argument '%s'\n--> must be between 20 and 110" % filter_rssi) print("================================================================") exit(1) # validate field output options options.display = options.display.lower() if re.search('[^trpsabd]', options.display): p.print_help() print("\n================================================================") print("Invalid display options '%s'\n--> must be some combination of 't', 'r', 'p', 's', 'a', 'b', 'd'" % options.display) print("================================================================") exit(1) if options.install : print("================================================================") print("Install for BLED112 Scanner for Python v%s" % __version__) print("================================================================") print(" ") Print("Program is designed to use Activestate ActivePython and not regular Python from www.python.org ") print(" ") print(" Go to https://www.activestate.com and download and install the latest version of activepython for your operating system ") print(" ") print(" Once ActivePython is install in a command window shell type the follow") print(" ") print(" pip3 install pyserial future pandas matplotlib ") print(" ") exit(2) # display scan parameter summary, if not in quiet mode if not(options.quiet) : print("================================================================") print("BLED112 Scanner for Python v%s" % __version__) print("================================================================") #p.set_defaults(port="/dev/ttyACM0", baud=115200, interval=0xC8, window=0xC8, display="trpsabd", uuid=[], mac=[], rssi=0, active=False, quiet=False, friendly=False) print("Serial port:\t%s" % options.port) print("Baud rate:\t%s" % options.baud) print("Scan interval:\t%d (%.02f ms)" % (options.interval, options.interval * 1.25)) print("Scan window:\t%d (%.02f ms)" % (options.window, options.window * 1.25)) print("Scan type:\t%s" % ['Passive', 'Active'][options.active]) print("UUID filters:\t",) if len(filter_uuid) > 0: print("0x%s" % ", 0x".join([''.join(['%02X' % b for b in uuid]) for uuid in filter_uuid])) else: print("None") print("MAC filter(s):\t",) if len(filter_mac) > 0: print(", ".join([':'.join(['%02X' % b for b in mac]) for mac in filter_mac])) else: print("None") print("RSSI filter:\t",) if filter_rssi > 0: print("-%d dBm minimum"% filter_rssi) else: print("None") print("Display fields:\t-",) field_dict = { 't':'Time', 'r':'RSSI', 'p':'Packet type', 's':'Sender MAC', 'a':'Address type', 'b':'Bond status', 'd':'Payload data' } print("\n\t\t- ".join([field_dict[c] for c in options.display])) print("Friendly mode:\t%s" % ['Disabled', 'Enabled'][options.friendly]) print("----------------------------------------------------------------") print("Starting scan for BLE advertisements...") # open serial port for BGAPI access try: ser = serial.Serial(port=options.port, baudrate=options.baud, timeout=1) except serial.SerialException as e: print("\n================================================================") print("Port error (name='%s', baud='%ld'): %s" % (options.port, options.baud, e)) print("================================================================") exit(2) #========================================================================================================= # # Make initial communications with the BLE112 USB device and set up the comms process # #========================================================================================================= # flush buffers #print "Flushing serial I/O buffers..." ser.flushInput() ser.flushOutput() # disconnect if we are connected already #print "Disconnecting if connected..." ble_cmd_connection_disconnect(ser, 0) response = ser.read(7) # 7-byte response #for b in response: print '%02X' % ord(b), # stop advertising if we are advertising already #print "Exiting advertising mode if advertising..." ble_cmd_gap_set_mode(ser, 0, 0) response = ser.read(6) # 6-byte response #for b in response: print '%02X' % ord(b), # stop scanning if we are scanning already #print "Exiting scanning mode if scanning..." ble_cmd_gap_end_procedure(ser) response = ser.read(6) # 6-byte response #for b in response: print '%02X' % ord(b), # set scan parameters #print "Setting scanning parameters..." ble_cmd_gap_set_scan_parameters(ser, options.interval, options.window, options.active) response = ser.read(6) # 6-byte response #for b in response: print '%02X' % ord(b), # start scanning now #print "Entering scanning mode for general discoverable..." ble_cmd_gap_discover(ser, 1) #========================================================================================================= #========================================================================================================= if not(options.byte) : if options.quiet and options.friendly and options.csv and options.time_in_ms : print("\"Time_in_Milliseconds\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\"") if options.quiet and options.friendly and options.csv and not(options.time_in_ms) : print("\"Time\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\"") if options.quiet and options.friendly and options.csv and options.time_in_ms and options.instr and options.byte : print("\"Time_in_Milliseconds\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\";\"Selected_Byte(Dec)\"") if options.quiet and options.friendly and options.csv and not(options.time_in_ms) and options.instr and options.byte : print("\"Time\";\"RSSI\";\"Packet_type\";\"Sender_MAC\";\"Address_type\";\"Bond_status\";\"Payload_status\";\"Selected_Byte(Dec) \"") if options.instr : instr_search = str(options.instr) instr_search=instr_search[2:len(instr_search)] # Original "['FFFE96B6E511']" strip and remove [' '] bits instr_search=instr_search[0:(len(instr_search)-2)] else : instr_search = "" if options.byte and (len(str(options.byte)) > 4) : byte_str=(str(options.byte))[2:len((str(options.byte)))] byte_str=byte_str[0:(len(byte_str)-2)] byte_position_=abs(int(byte_str)) byte_position_=(byte_position_ -1 ) *2 if (byte_position_ < 0) : byte_position_ = 0 # print("byte to pick up from payload_status is :: " + str(byte_position_)) #Debug else : byte_position_ = -1 if options.instr and options.byte and options.switch : byte_switch = True #------------------------------------------------------------------------------------------------------------------- # # Real time graph plotting routine setup section # # #------------------------------------------------------------------------------------------------------------------- if options.plot : # create plot plt.ion() # <-- work in "interactive mode" fig, ax = plt.subplots() fig.canvas.set_window_title('Live BLE RSSI level Chart') ax.set_title("Primary RSSI level in dB verse time in Milliseconds") # create an empty pandas dataframe that will store streaming data df = pd.DataFrame() if options.instr and options.byte and options.plotbyte : # create plot plt.ion() # <-- work in "interactive mode" fig, bx = plt.subplots() fig.canvas.set_window_title('Live BLE Payload data selected Byte Chart [ Byte position in Payload data = ' + byte_str + ' ] ') bx.set_title("Selected Byte value (0-255) verse time in Milliseconds") # create an empty pandas dataframe that will store streaming data df_byte = pd.DataFrame() #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- while (1): # catch all incoming data # if options.quiet and options.friendly and options.plot : # while (ser.inWaiting()): bgapi_parse_plot(ord(ser.read())); #else: # if options.quiet and options.friendly and options.comma : #
resources = p.run() self.assertEqual(len(resources), 1) def test_config_source(self): factory = self.replay_flight_data("test_security_group_config_source") p = self.load_policy( { "name": "sg-test", "resource": "security-group", "filters": [{"GroupId": "sg-6c7fa917"}], }, session_factory=factory, ) d_resources = p.run() self.assertEqual(len(d_resources), 1) p = self.load_policy( { "name": "sg-test", "source": "config", "resource": "security-group", "filters": [{"type": "default-vpc"}, {"GroupId": "sg-6c7fa917"}], }, session_factory=factory, ) c_resources = p.run() self.assertEqual(len(c_resources), 1) self.assertEqual(c_resources[0]["GroupId"], "sg-6c7fa917") self.maxDiff = None self.assertEqual(c_resources, d_resources) p = self.load_policy( { "name": "sg-test", "resource": "security-group", "filters": [ {"type": "ingress", "Cidr": {"value": "172.16.58.3/32"}} ], }, session_factory=factory, ) c_resources = p.run() self.assertEqual(len(c_resources), 1) self.assertEqual(c_resources[0]["GroupId"], "sg-6c7fa917") def test_config_rule(self): factory = self.replay_flight_data("test_security_group_config_rule") p = self.load_policy( { "name": "sg-test", "mode": {"type": "config-rule"}, "resource": "security-group", "filters": [{"type": "ingress", "Cidr": {"value": "0.0.0.0/0"}}], }, session_factory=factory, ) mode = p.get_execution_mode() event = event_data("event-config-rule-security-group.json") resources = mode.run(event, None) self.assertEqual(len(resources), 1) self.assertEqual(resources[0]["GroupId"], "sg-e2fb6999") def test_only_ports_ingress(self): p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [{"type": "ingress", "OnlyPorts": [80]}], } ) resources = [ { "Description": "Typical Internet-Facing Security Group", "GroupId": "sg-abcd1234", "GroupName": "TestInternetSG", "IpPermissions": [ { "FromPort": 53, "IpProtocol": "tcp", "IpRanges": ["10.0.0.0/8"], "PrefixListIds": [], "ToPort": 53, "UserIdGroupPairs": [], } ], "IpPermissionsEgress": [], "OwnerId": "123456789012", "Tags": [ {"Key": "Value", "Value": "InternetSecurityGroup"}, {"Key": "Key", "Value": "Name"}, ], "VpcId": "vpc-1234abcd", } ] manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) @functional def test_only_ports_and_cidr_ingress(self): factory = self.replay_flight_data("test_only_ports_and_cidr_ingress") client = factory().client("ec2") vpc_id = client.create_vpc(CidrBlock="10.4.0.0/16")["Vpc"]["VpcId"] self.addCleanup(client.delete_vpc, VpcId=vpc_id) sg_id = client.create_security_group( GroupName="c7n-only-ports-and-cidr-test", VpcId=vpc_id, Description="cloud-custodian test SG" )["GroupId"] self.addCleanup(client.delete_security_group, GroupId=sg_id) client.authorize_security_group_ingress( GroupId=sg_id, IpProtocol="tcp", FromPort=0, ToPort=62000, CidrIp="10.2.0.0/16", ) client.authorize_security_group_ingress( GroupId=sg_id, IpProtocol="tcp", FromPort=80, ToPort=80, CidrIp="0.0.0.0/0", ) client.authorize_security_group_ingress( GroupId=sg_id, IpProtocol="tcp", FromPort=1234, ToPort=4321, CidrIp="0.0.0.0/0", ) client.authorize_security_group_ingress( GroupId=sg_id, IpProtocol="tcp", FromPort=443, ToPort=443, CidrIp="0.0.0.0/0", ) client.authorize_security_group_ingress( GroupId=sg_id, IpProtocol="tcp", FromPort=8080, ToPort=8080, CidrIp="0.0.0.0/0", ) p = self.load_policy( { "name": "sg-find", "resource": "security-group", "filters": [ {"VpcId": vpc_id}, {"GroupName": "c7n-only-ports-and-cidr-test"}, { "type": "ingress", "OnlyPorts": [80, 443], "Cidr": {"value": "0.0.0.0/0"} } ], "actions": [ {"type": "remove-permissions", "ingress": "matched"} ] }, session_factory=factory, ) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(resources[0]["GroupId"], sg_id) self.assertEqual(resources[0]['IpPermissions'], [ { u'PrefixListIds': [], u'FromPort': 80, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'ToPort': 80, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'PrefixListIds': [], u'FromPort': 8080, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'ToPort': 8080, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'PrefixListIds': [], u'FromPort': 0, u'IpRanges': [{u'CidrIp': '10.2.0.0/16'}], u'ToPort': 62000, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'PrefixListIds': [], u'FromPort': 1234, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'ToPort': 4321, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'PrefixListIds': [], u'FromPort': 443, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'ToPort': 443, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] } ]) self.assertEqual( resources[0]['c7n:MatchedFilters'], [u'VpcId', u'GroupName'] ) self.assertEqual( resources[0]['MatchedIpPermissions'], [ { u'FromPort': 8080, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'PrefixListIds': [], u'ToPort': 8080, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'FromPort': 1234, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'PrefixListIds': [], u'ToPort': 4321, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] } ] ) group_info = client.describe_security_groups( GroupIds=[sg_id] )["SecurityGroups"][0] self.assertEqual(group_info.get("IpPermissions", []), [ { u'PrefixListIds': [], u'FromPort': 80, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'ToPort': 80, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'PrefixListIds': [], u'FromPort': 0, u'IpRanges': [{u'CidrIp': '10.2.0.0/16'}], u'ToPort': 62000, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] }, { u'PrefixListIds': [], u'FromPort': 443, u'IpRanges': [{u'CidrIp': '0.0.0.0/0'}], u'ToPort': 443, u'IpProtocol': 'tcp', u'UserIdGroupPairs': [], u'Ipv6Ranges': [] } ]) def test_multi_attribute_ingress(self): p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ {"type": "ingress", "Cidr": {"value": "10.0.0.0/8"}, "Ports": [53]} ], } ) resources = [ { "Description": "Typical Internet-Facing Security Group", "GroupId": "sg-abcd1234", "GroupName": "TestInternetSG", "IpPermissions": [ { "FromPort": 53, "IpProtocol": "tcp", "IpRanges": [{"CidrIp": "10.0.0.0/8"}], "PrefixListIds": [], "ToPort": 53, "UserIdGroupPairs": [], } ], "IpPermissionsEgress": [], "OwnerId": "123456789012", "Tags": [ {"Key": "Value", "Value": "InternetSecurityGroup"}, {"Key": "Key", "Value": "Name"}, ], "VpcId": "vpc-1234abcd", } ] manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) def test_ports_ingress(self): p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [{"type": "ingress", "Ports": [53]}], } ) resources = [ { "Description": "Typical Internet-Facing Security Group", "GroupId": "sg-abcd1234", "GroupName": "TestInternetSG", "IpPermissions": [ { "FromPort": 53, "IpProtocol": "tcp", "IpRanges": ["10.0.0.0/8"], "PrefixListIds": [], "ToPort": 53, "UserIdGroupPairs": [], } ], "IpPermissionsEgress": [], "OwnerId": "123456789012", "Tags": [ {"Key": "Value", "Value": "InternetSecurityGroup"}, {"Key": "Key", "Value": "Name"}, ], "VpcId": "vpc-1234abcd", } ] manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) def test_self_reference_ingress_false_positives(self): resources = [ { "Description": "Typical Security Group", "GroupId": "sg-abcd1234", "GroupName": "TestSG", "IpPermissions": [ { "FromPort": 22, "IpProtocol": "tcp", "IpRanges": [], "PrefixListIds": [], "ToPort": 22, "UserIdGroupPairs": [ {"UserId": "123456789012", "GroupId": "sg-abcd1234"} ], } ], "IpPermissionsEgress": [], "OwnerId": "123456789012", "Tags": [ {"Key": "Value", "Value": "TypicalSecurityGroup"}, {"Key": "Key", "Value": "Name"}, ], "VpcId": "vpc-1234abcd", } ] p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "match-operator": "and", "SelfReference": True, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "match-operator": "and", "SelfReference": False, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 0) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "match-operator": "and", "Cidr": { "value": "0.0.0.0/0", "op": "eq", "value_type": "cidr" }, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 0) resources = [ { "Description": "Typical Security Group", "GroupId": "sg-abcd1234", "GroupName": "TestSG", "IpPermissions": [ { "FromPort": 22, "IpProtocol": "tcp", "IpRanges": [ {"CidrIp": "10.42.2.0/24"}, {"CidrIp": "10.42.4.0/24"} ], "PrefixListIds": [], "ToPort": 22, "UserIdGroupPairs": [ {"UserId": "123456789012", "GroupId": "sg-abcd5678"} ], } ], "IpPermissionsEgress": [], "OwnerId": "123456789012", "Tags": [ {"Key": "Value", "Value": "TypicalSecurityGroup"}, {"Key": "Key", "Value": "Name"}, ], "VpcId": "vpc-1234abcd", } ] p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "Cidr": { "value": "10.42.4.0/24", "op": "eq", "value_type": "cidr" }, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "match-operator": "and", "Cidr": { "value": "10.42.3.0/24", "op": "eq", "value_type": "cidr" }, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 0) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "Cidr": { "value": "10.42.3.0/24", "op": "ne", "value_type": "cidr" }, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Ports": [22], "Cidr": { "value": "0.0.0.0/0", "op": "in", "value_type": "cidr" }, } ], } ) manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) def test_egress_ipv6(self): p = self.load_policy({ "name": "ipv6-test", "resource": "security-group", "filters": [{ "type": "egress", "CidrV6": { "value": "::/0"}}] }) resources = [{ "IpPermissionsEgress": [ { "IpProtocol": "-1", "PrefixListIds": [], "IpRanges": [ { "CidrIp": "0.0.0.0/0" } ], "UserIdGroupPairs": [], "Ipv6Ranges": [ { "CidrIpv6": "::/0" } ] } ], "Description": "default VPC security group", "IpPermissions": [ { "IpProtocol": "-1", "PrefixListIds": [], "IpRanges": [], "UserIdGroupPairs": [ { "UserId": "644160558196", "GroupId": "sg-b744bafc" } ], "Ipv6Ranges": [] } ], "GroupName": "default", "VpcId": "vpc-f8c6d983", "OwnerId": "644160558196", "GroupId": "sg-b744bafc" }] manager = p.load_resource_manager() self.assertEqual(len(manager.filter_resources(resources)), 1) def test_permission_expansion(self): factory = self.replay_flight_data("test_security_group_perm_expand") client = factory().client("ec2") vpc_id = client.create_vpc(CidrBlock="10.42.0.0/16")["Vpc"]["VpcId"] self.addCleanup(client.delete_vpc, VpcId=vpc_id) sg_id = client.create_security_group( GroupName="allow-some-ingress", VpcId=vpc_id, Description="inbound access" )[ "GroupId" ] sg2_id = client.create_security_group( GroupName="allowed-reference", VpcId=vpc_id, Description="inbound ref access", )[ "GroupId" ] self.addCleanup(client.delete_security_group, GroupId=sg2_id) self.addCleanup(client.delete_security_group, GroupId=sg_id) client.authorize_security_group_ingress( GroupId=sg_id, IpPermissions=[ { "IpProtocol": "tcp", "FromPort": 443, "ToPort": 443, "IpRanges": [{"CidrIp": "10.42.1.0/24"}], } ], ) client.authorize_security_group_ingress( GroupId=sg_id, IpPermissions=[ { "IpProtocol": "tcp", "FromPort": 443, "ToPort": 443, "IpRanges": [{"CidrIp": "10.42.2.0/24"}], } ], ) client.authorize_security_group_ingress( GroupId=sg_id, IpPermissions=[ { "IpProtocol": "tcp", "FromPort": 443, "ToPort": 443, "UserIdGroupPairs": [{"GroupId": sg2_id}], } ], ) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Cidr": { "value": "10.42.1.1", "op": "in", "value_type": "cidr" }, } ], }, session_factory=factory, ) resources = p.run() self.assertEqual(len(resources), 1) self.assertEqual(len(resources[0].get("MatchedIpPermissions", [])), 1) self.assertEqual( resources[0].get("MatchedIpPermissions", []), [ { u"FromPort": 443, u"IpProtocol": u"tcp", u"Ipv6Ranges": [], u"PrefixListIds": [], u"UserIdGroupPairs": [], u"IpRanges": [{u"CidrIp": u"10.42.1.0/24"}], u"ToPort": 443, } ], ) @functional def test_cidr_ingress(self): factory = self.replay_flight_data("test_security_group_cidr_ingress") client = factory().client("ec2") vpc_id = client.create_vpc(CidrBlock="10.42.0.0/16")["Vpc"]["VpcId"] self.addCleanup(client.delete_vpc, VpcId=vpc_id) sg_id = client.create_security_group( GroupName="allow-https-ingress", VpcId=vpc_id, Description="inbound access" )[ "GroupId" ] self.addCleanup(client.delete_security_group, GroupId=sg_id) client.authorize_security_group_ingress( GroupId=sg_id, IpPermissions=[ { "IpProtocol": "tcp", "FromPort": 443, "ToPort": 443, "IpRanges": [{"CidrIp": "10.42.1.0/24"}], } ], ) p = self.load_policy( { "name": "ingress-access", "resource": "security-group", "filters": [ { "type": "ingress", "Cidr": { "value": "10.42.1.239", "op": "in", "value_type": "cidr" }, } ], }, session_factory=factory, ) resources = p.run() self.assertEqual(len(resources), 1)
<reponame>int-brain-lab/ibllib ''' Computes task related output ''' import numpy as np from scipy.stats import ranksums, wilcoxon, ttest_ind, ttest_rel from ._statsmodels import multipletests from sklearn.metrics import roc_auc_score import pandas as pd from brainbox.population.decode import get_spike_counts_in_bins def responsive_units(spike_times, spike_clusters, event_times, pre_time=[0.5, 0], post_time=[0, 0.5], alpha=0.05): """ Determine responsive neurons by doing a Wilcoxon Signed-Rank test between a baseline period before a certain task event (e.g. stimulus onset) and a period after the task event. Parameters ---------- spike_times : 1D array spike times (in seconds) spike_clusters : 1D array cluster ids corresponding to each event in `spikes` event_times : 1D array times (in seconds) of the events from the two groups pre_time : two-element array time (in seconds) preceding the event to get the baseline (e.g. [0.5, 0.2] would be a window starting 0.5 seconds before the event and ending at 0.2 seconds before the event) post_time : two-element array time (in seconds) to follow the event times alpha : float alpha to use for statistical significance Returns ------- significant_units : ndarray an array with the indices of clusters that are significatly modulated stats : 1D array the statistic of the test that was performed p_values : ndarray the p-values of all the clusters cluster_ids : ndarray cluster ids of the p-values """ # Get spike counts for baseline and event timewindow baseline_times = np.column_stack(((event_times - pre_time[0]), (event_times - pre_time[1]))) baseline_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, baseline_times) times = np.column_stack(((event_times + post_time[0]), (event_times + post_time[1]))) spike_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times) # Do statistics p_values = np.empty(spike_counts.shape[0]) stats = np.empty(spike_counts.shape[0]) for i in range(spike_counts.shape[0]): if np.sum(baseline_counts[i, :] - spike_counts[i, :]) == 0: p_values[i] = 1 stats[i] = 0 else: stats[i], p_values[i] = wilcoxon(baseline_counts[i, :], spike_counts[i, :]) # Perform FDR correction for multiple testing sig_units, p_values, _, _ = multipletests(p_values, alpha, method='fdr_bh') significant_units = cluster_ids[sig_units] return significant_units, stats, p_values, cluster_ids def differentiate_units(spike_times, spike_clusters, event_times, event_groups, pre_time=0, post_time=0.5, test='ranksums', alpha=0.05): """ Determine units which significantly differentiate between two task events (e.g. stimulus left/right) by performing a statistical test between the spike rates elicited by the two events. Default is a Wilcoxon Rank Sum test. Parameters ---------- spike_times : 1D array spike times (in seconds) spike_clusters : 1D array cluster ids corresponding to each event in `spikes` event_times : 1D array times (in seconds) of the events from the two groups event_groups : 1D array group identities of the events as either 0 or 1 pre_time : float time (in seconds) to precede the event times to get the baseline post_time : float time (in seconds) to follow the event times test : string which statistical test to use, options are: 'ranksums' Wilcoxon Rank Sums test 'signrank' Wilcoxon Signed Rank test (for paired observations) 'ttest' independent samples t-test 'paired_ttest' paired t-test alpha : float alpha to use for statistical significance Returns ------- significant_units : 1D array an array with the indices of clusters that are significatly modulated stats : 1D array the statistic of the test that was performed p_values : 1D array the p-values of all the clusters cluster_ids : ndarray cluster ids of the p-values """ # Check input assert test in ['ranksums', 'signrank', 'ttest', 'paired_ttest'] if (test == 'signrank') or (test == 'paired_ttest'): assert np.sum(event_groups == 0) == np.sum(event_groups == 1), \ 'For paired tests the number of events in both groups needs to be the same' # Get spike counts for the two events times_1 = np.column_stack(((event_times[event_groups == 0] - pre_time), (event_times[event_groups == 0] + post_time))) counts_1, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times_1) times_2 = np.column_stack(((event_times[event_groups == 1] - pre_time), (event_times[event_groups == 1] + post_time))) counts_2, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times_2) # Do statistics p_values = np.empty(len(cluster_ids)) stats = np.empty(len(cluster_ids)) for i in range(len(cluster_ids)): if (np.sum(counts_1[i, :]) == 0) and (np.sum(counts_2[i, :]) == 0): p_values[i] = 1 stats[i] = 0 else: if test == 'ranksums': stats[i], p_values[i] = ranksums(counts_1[i, :], counts_2[i, :]) elif test == 'signrank': stats[i], p_values[i] = wilcoxon(counts_1[i, :], counts_2[i, :]) elif test == 'ttest': stats[i], p_values[i] = ttest_ind(counts_1[i, :], counts_2[i, :]) elif test == 'paired_ttest': stats[i], p_values[i] = ttest_rel(counts_1[i, :], counts_2[i, :]) # Perform FDR correction for multiple testing sig_units, p_values, _, _ = multipletests(p_values, alpha, method='fdr_bh') significant_units = cluster_ids[sig_units] return significant_units, stats, p_values, cluster_ids def roc_single_event(spike_times, spike_clusters, event_times, pre_time=[0.5, 0], post_time=[0, 0.5]): """ Determine how well neurons respond to a certain task event by calculating the area under the ROC curve between a baseline period before the event and a period after the event. Values of > 0.5 indicate the neuron respons positively to the event and < 0.5 indicate a negative response. Parameters ---------- spike_times : 1D array spike times (in seconds) spike_clusters : 1D array cluster ids corresponding to each event in `spikes` event_times : 1D array times (in seconds) of the events from the two groups pre_time : two-element array time (in seconds) preceding the event to get the baseline (e.g. [0.5, 0.2] would be a window starting 0.5 seconds before the event and ending at 0.2 seconds before the event) post_time : two-element array time (in seconds) to follow the event times Returns ------- auc_roc : 1D array the area under the ROC curve cluster_ids : 1D array cluster ids of the p-values """ # Get spike counts for baseline and event timewindow baseline_times = np.column_stack(((event_times - pre_time[0]), (event_times - pre_time[1]))) baseline_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, baseline_times) times = np.column_stack(((event_times + post_time[0]), (event_times + post_time[1]))) spike_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times) # Calculate area under the ROC curve per neuron auc_roc = np.empty(spike_counts.shape[0]) for i in range(spike_counts.shape[0]): auc_roc[i] = roc_auc_score(np.concatenate((np.zeros(baseline_counts.shape[1]), np.ones(spike_counts.shape[1]))), np.concatenate((baseline_counts[i, :], spike_counts[i, :]))) return auc_roc, cluster_ids def roc_between_two_events(spike_times, spike_clusters, event_times, event_groups, pre_time=0, post_time=0.25): """ Calcluate area under the ROC curve that indicates how well the activity of the neuron distiguishes between two events (e.g. movement to the right vs left). A value of 0.5 indicates the neuron cannot distiguish between the two events. A value of 0 or 1 indicates maximum distinction. Significance is determined by bootstrapping the ROC curves. If 0.5 is not included in the 95th percentile of the bootstrapped distribution, the neuron is deemed to be significant. Parameters ---------- spike_times : 1D array spike times (in seconds) spike_clusters : 1D array cluster ids corresponding to each event in `spikes` event_times : 1D array times (in seconds) of the events from the two groups event_groups : 1D array group identities of the events as either 0 or 1 pre_time : float time (in seconds) to precede the event times post_time : float time (in seconds) to follow the event times Returns ------- auc_roc : 1D array an array of the area under the ROC curve for every neuron cluster_ids : 1D array cluster ids of the AUC values """ # Get spike counts times = np.column_stack(((event_times - pre_time), (event_times + post_time))) spike_counts, cluster_ids = get_spike_counts_in_bins(spike_times, spike_clusters, times) # Calculate area under the ROC curve per neuron auc_roc = np.empty(spike_counts.shape[0]) for i in range(spike_counts.shape[0]): auc_roc[i] = roc_auc_score(event_groups, spike_counts[i, :]) return auc_roc, cluster_ids def _get_biased_probs(n: int, idx: int = -1, prob: float = 0.5) -> list: n_1 = n - 1 z = n_1 + prob p = [1 / z] * (n_1 + 1) p[idx] *= prob return p def _draw_contrast( contrast_set: list, prob_type: str = "biased", idx: int = -1, idx_prob: float = 0.5 ) -> float: if prob_type == "biased": p = _get_biased_probs(len(contrast_set), idx=idx, prob=idx_prob) return np.random.choice(contrast_set, p=p) elif prob_type == "uniform": return np.random.choice(contrast_set) def _draw_position(position_set, stim_probability_left): return int( np.random.choice( position_set, p=[stim_probability_left, 1 - stim_probability_left] ) ) def generate_pseudo_blocks(n_trials, factor=60, min_=20, max_=100, first5050=90): """ Generate a pseudo block structure Parameters ---------- n_trials : int how many trials to generate factor : int factor of the exponential min_ : int minimum number of trials per block max_ : int maximum number of trials per block first5050 : int amount of trials with 50/50 left right probability at the beginning Returns
""" Copyright © 2021-2022 The Johns Hopkins University Applied Physics Laboratory LLC Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import abc import functools import inspect import itertools import sys import typing import warnings import yaml import numpy as np import gym # key: curriculum name, value: AbstractCurriculum class object or factory curriculum_registry = {} class ValidationError(ValueError): """Raised when there is a problem with a curriculum.""" pass class TaskVariant: """ A TaskVariant represents a fixed number of steps or episodes in a single type of :class:`gym.Env`. """ def __init__( self, task_cls: typing.Type[gym.Env], *, rng_seed: int, params: typing.Optional[typing.Dict] = None, task_label: typing.Optional[str] = None, variant_label: typing.Optional[str] = "Default", num_episodes: typing.Optional[int] = None, num_steps: typing.Optional[int] = None, ) -> None: """ :param task_cls: the :class:`gym.Env` of this task variant :param rng_seed: An integer seed used to repeatably instantiate the environment :param params: An optional dict of parameters to be passed as constructor arguments to the environment. :param task_label: The name of the task which describes this environment. :param variant_label: The name of the variant (of task_label) which describes this environment. :param num_episodes: The length limit of this experience in number of episodes. :param num_steps: The length limit of this experience in number of steps. :raises ValidationError: if neither `num_episodes` or `num_steps` is provided. :raises ValidationError: if both `num_episodes` and `num_steps` are provided. """ if params is None: params = {} if task_label is None: task_label = task_cls.__name__ self.task_cls = task_cls self.params = params self.task_label = task_label self.variant_label = variant_label self.rng_seed = rng_seed if num_episodes is None and num_steps is None: raise ValidationError("Neither num_episodes nor num_steps provided") if num_episodes is not None and num_steps is not None: raise ValidationError("Both num_episodes and num_steps provided") self.num_episodes = num_episodes self.num_steps = num_steps def validate(self) -> None: """ A method to validate that the experience is set up properly. :raises ValidationError: if the experience is not set up properly. """ validate_params(self.task_cls, list(self.params.keys())) def make_env(self) -> gym.Env: """ Initializes the gym environment object """ return self.task_cls(**self.params) class TaskBlock: """ A TaskBlock represents a sequence of one or more :class:`TaskVariant` which all share the same general task. """ def __init__( self, task_label: str, task_variants: typing.Iterable[TaskVariant] ) -> None: """ :param task_label: The name of the task which describes the environment for all contained variants. :param task_variants: A sequence of one or more :class:`TaskVariant` """ super().__init__() self.task_label = task_label self._task_variants = task_variants def task_variants(self) -> typing.Iterable[TaskVariant]: """ :return: An Iterable of :class:`TaskVariant`. """ self._task_variants, task_variants = itertools.tee(self._task_variants, 2) return task_variants class Block: """ Represents a sequence of one or more :class:`TaskBlock` """ @property @abc.abstractmethod def is_learning_allowed(self) -> bool: """ :return: Bool indicating if this block is intended for learning or evaluation """ raise NotImplementedError def __init__(self, task_blocks: typing.Iterable[TaskBlock]) -> None: """ :param task_blocks: A sequence of one or more :class:`TaskBlock` """ self._task_blocks = task_blocks def task_blocks(self) -> typing.Iterable[TaskBlock]: """ :return: An Iterable of :class:`TaskBlock`. """ self._task_blocks, task_blocks = itertools.tee(self._task_blocks, 2) return task_blocks class LearnBlock(Block): """ A :class:`Block` where the data can be used for learning. """ is_learning_allowed = True class EvalBlock(Block): """ A :class:`Block` where the data can NOT be used for learning. """ is_learning_allowed = False class AbstractCurriculum: """ Represents a lifelong/continual learning curriculum. A curriculum is simply a sequence of one or more :class:`Block`. """ def __init__(self, rng_seed: int, config_file: typing.Optional[str] = None): """ :param rng_seed: The seed to be used in setting random number generators. :param config_file: Path to a config file for the curriculum or None if no config. """ self.rng_seed = rng_seed self.rng = np.random.default_rng(rng_seed) if config_file is not None: with open(config_file) as file: self.config = yaml.safe_load(file) else: self.config = {} self.config_file = config_file def copy(self) -> "AbstractCurriculum": """ :return: A new instance of this curriculum, initialized with the same arguments. .. NOTE:: Curriculum authors will need to overwrite this method for subclasses with different arguments. """ return self.__class__(self.rng_seed, self.config_file) @abc.abstractmethod def learn_blocks_and_eval_blocks(self) -> typing.Iterable[Block]: """ Generate the learning and eval blocks of this curriculum. :return: An Iterable of :class:`Block`. """ raise NotImplementedError def validate(self) -> None: """ A method to validate that the curriculum is set up properly. This is an optional capability for curriculum authors to implement. :raises ValidationError: if the curriculum is not set up properly. """ pass class InterleavedEvalCurriculum(AbstractCurriculum): """ A common curriculum format where a single evaluation block is repeated before, between, and after the curriculum's learning blocks. This class implements :meth:`AbstractCurriculum.learn_blocks_and_eval_blocks()`, and expects the user to implement two new methods: 1. learn_blocks(), which returns the sequence of :class:`LearnBlock`. 2. eval_block(), which returns the single :class:`EvalBlock` to be interleaved between each :class:`LearnBlock`. """ def __init__(self, rng_seed: int, config_file: typing.Optional[str] = None): super().__init__(rng_seed, config_file) # Also save a fixed eval_rng_seed so that eval environments are the same in each block self.eval_rng_seed = self.rng.bit_generator.random_raw() @abc.abstractmethod def learn_blocks(self) -> typing.Iterable[LearnBlock]: """ :return: An iterable of :class:`LearnBlock`. """ raise NotImplementedError @abc.abstractmethod def eval_block(self) -> EvalBlock: """ :return: The single :class:`EvalBlock` to interleave between each individual :class:`LearnBlock` returned from :meth:`InterleavedEvalCurriculum.learn_blocks`. """ raise NotImplementedError def learn_blocks_and_eval_blocks(self) -> typing.Iterable[Block]: yield self.eval_block() for block in self.learn_blocks(): yield block yield self.eval_block() def split_task_variants( task_variants: typing.Iterable[TaskVariant], ) -> typing.Iterable[TaskBlock]: """ Divides task variants into one or more blocks of matching tasks :param task_variants: The iterable of :class:`TaskVariant` to be placed into task blocks. :return: An iterable of one or more :class:`TaskBlock` which contain the provided task variants. """ for task_label, variants_in_block in itertools.groupby( task_variants, key=lambda task: task.task_label ): yield TaskBlock(task_label, variants_in_block) def simple_learn_block(task_variants: typing.Iterable[TaskVariant]) -> LearnBlock: """ Constructs a :class:`LearnBlock` with the task variants passed in. :class:`TaskBlock` are divided as needed. :param task_variants: The iterable of :class:`TaskVariant` to include in the :class:`LearnBlock`. :return: A :class:`LearnBlock` with one or more :class:`TaskBlock` which contain the provided task variants. """ return LearnBlock(split_task_variants(task_variants)) def simple_eval_block(task_variants: typing.Iterable[TaskVariant]) -> EvalBlock: """ Constructs a :class:`EvalBlock` with the task variants passed in. :class:`TaskBlock` are divided as needed. :param task_variants: The iterable of :class:`TaskVariant` to include in the :class:`EvalBlock`. :return: A :class:`EvalBlock` with one or more :class:`TaskBlock` which contain the provided task variants. """ return EvalBlock(split_task_variants(task_variants)) Observation = typing.TypeVar("Observation") #: Observation of environment state Action = typing.TypeVar("Action") #: Action taken in environment Reward = float #: Float reward from environment Done = bool #: Bool, True if episode has ended class Transition(typing.NamedTuple): """ A named tuple containing data from a single step in an MDP: (observation, action, reward, done, next_observation) """ observation: Observation action: Action reward: typing.Optional[Reward] done: Done next_observation: Observation def summarize_curriculum( curriculum: AbstractCurriculum, ) -> str: """ Generate a detailed string summarizing the contents of the curriculum. :return: A string that would print as a formatted outline of this curriculum's contents. """ def maybe_plural(num: int, label: str): return f"{num} {label}" + ("" if num == 1 else "s") block_summaries = [] for i_block, block in enumerate(curriculum.learn_blocks_and_eval_blocks()): task_summaries = [] for i_task, task_block in enumerate(block.task_blocks()): variant_summaries = [] for i_variant, task_variant in enumerate(task_block.task_variants()): variant_summary = ( f"\n\t\t\tTask variant {i_variant+1}, " f"{task_variant.task_label} - {task_variant.variant_label}: " + ( f"{maybe_plural(task_variant.num_episodes, 'episode')}." if task_variant.num_episodes is not None else f"{maybe_plural(task_variant.num_steps, 'step')}." ) ) variant_summaries.append(variant_summary) task_summary = ( f"\n\t\tTask {i_task+1}, {task_block.task_label}: " f"{maybe_plural(len(variant_summaries), 'variant')}" ) task_summaries.append(task_summary + "".join(variant_summaries)) block_summary = ( f"\n\n\tBlock {i_block+1}, " f"{'learning' if
is this value negative?""" return self._negative @property def isinf(self): """Is this value infinite?""" return self._isinf @property def isnan(self): """Is this value NaN?""" return self._isnan # rounding envelopes and inexactness _inexact : bool = None # approximate bit _interval_full : bool = None # envelope interval size _interval_sided : bool = None # envelope interval position _interval_open_top : bool = None # is the top bound exclusive? _interval_open_bottom : bool = None # ditto for the bottom bound _rc : int = None # as MPFR result code. 0 if value is exact, -1 if rounded up, 1 if rounded down. # views for interval properties @property def inexact(self): """Is this value inexact?""" return self._inexact @property def interval_full(self): """Does the rounding envelope for this number extend a full ulp on each side? (if false, it is a half ulp) """ return self._interval_full @property def interval_sided(self): """Does the rounding envelope only extend away from zero? (if False, it is symmetric on both sides) """ return self._interval_sided @property def interval_open_top(self): """Is the top of the rounding envelope exclusive? (if False, it is inclusive, or closed) """ return self._interval_open_top @property def interval_open_bottom(self): """Is the bottom of the rounding envelope exclusive? (if False, it is inclusive, or closed) """ return self._interval_open_bottom @property def rc(self): """Result code. 1 if this value was rounded toward 0, -1 if it was rounded away. """ return self._rc # other useful properties def is_exactly_zero(self): return self._c == 0 and not self._inexact def is_zero(self): return self._c == 0 def is_integer(self): return self._exp >= 0 or self._c & bitmask(-self._exp) == 0 def is_identical_to(self, x): return ( self._c == x._c and self._exp == x._exp and self._negative == x._negative and self._isinf == x._isinf and self._isnan == x._isnan and self._inexact == x._inexact and self._interval_full == x._interval_full and self._interval_sided == x._interval_sided and self._interval_open_top == x._interval_open_top and self._interval_open_bottom == x._interval_open_bottom and self._rc == x._rc ) def __init__(self, # The base value of the sink, either as a sink to copy # or a string / float / mpfr to parse. base=None, # value information about the sink to construct m=None, exp=None, # either m must be specified, or c and negative must be specified c=None, # negative can be specified alone to change the sign negative=None, # inf and nan can be set independently of other properties of the # number, though having both set at once is not well defined inf=None, nan=None, # inexactness information can be specified or modified independently inexact=None, full=None, sided=None, open_top=None, open_bottom=None, rc=None, # rounding properties; ignored unless parsing a string max_p=None, min_n=None, rm=conversion.ROUND_NEAREST_EVEN ): """Create a new sinking point number. The value can be specified in 3 ways: If base is None, then the new number must have its value specified by exp and either m, or c and negative. Note that since integer 0 in Python does not have a sign, a signed zero must be specified with c and negative. If base is an existing Sink, then that number is copied, and its fields can be updated individually. If base is a numeric type or a string, then that number is converted to a sink with the closest possible value, as per the rounding specification. In practice, rounding will only occur for strings. If the specified rounding is impossible (i.e. rm is None, or both max_p and min_n are unspecified for a value such as Pi with no finite representation) then an exception will be raised. """ # raw, non-converting forms if base is None or isinstance(base, Sink): # create from mantissa / exponent form if base is None: if not ((m is not None and (c is None and negative is None)) or (m is None and (c is not None and negative is not None))): raise ValueError('must specify either m, or c and negative') elif inf and nan: raise ValueError('number cannot be simultaneously inf and nan') if m is not None: self._c = abs(m) self._negative = (m < 0) else: self._c = c self._negative = negative self._exp = exp self._isinf = bool(inf) self._isnan = bool(nan) self._inexact = bool(inexact) self._interval_full = bool(full) self._interval_sided = bool(sided) self._interval_open_top = bool(open_top) self._interval_open_bottom = bool(open_bottom) if rc is None: self._rc = 0 else: self._rc = rc # copy from existing sink else: if m is not None and (c is not None or negative is not None): raise ValueError('cannot specify c or negative if m is specified') if m is not None: self._c = abs(m) self._negative = (m < 0) else: self._c = c if c is not None else base.c self._negative = negative if negative is not None else base.negative self._exp = exp if exp is not None else base.exp self._isinf = inf if inf is not None else base.isinf self._isnan = nan if nan is not None else base.isnan if self.isnan and self.isinf: raise ValueError('cannot update number to simultaneously be inf and nan') self._inexact = inexact if inexact is not None else base.inexact self._interval_full = full if full is not None else base.interval_full self._interval_sided = sided if sided is not None else base.interval_sided self._interval_open_top = open_top if open_top is not None else base.interval_open_top self._interval_open_bottom = open_bottom if open_bottom is not None else base.interval_open_bottom self._rc = rc if rc is not None else base.rc # convert another representation into sinking point else: if not (m is None and exp is None and c is None and negative is None and inf is None and nan is None): raise ValueError('cannot specify numeric properties when converting another numeric type') if isinstance(base, str): # TODO unimplemented base = float(base) # TODO does not support inf and nan negative, c, exp = conversion.numeric_to_signed_mantissa_exp(base) self._c = c self._negative = negative self._exp = exp self._isinf = False self._isnan = False # TODO conflict with rounding self._inexact = bool(inexact) self._interval_full = bool(full) self._interval_sided = bool(sided) self._interval_open_top = bool(open_top) self._interval_open_bottom = bool(open_bottom) # round to specified precision def __repr__(self): return 'Sink({}, c={}, exp={}, negative={}, inexact={}, full={}, sided={}, rc={})'.format( repr(self.to_mpfr()), self.c, self.exp, self.negative, self.inexact, self.interval_full, self.interval_sided, self.rc, ) def __str__(self): """yah""" if self.c == 0: sgn = '-' if self.negative else '' if self._inexact: return '{}0~@{:d}'.format(sgn, self.n) else: #print(repr(self)) return '{}0'.format(sgn) else: rep = re.search(r"'(.*)'", repr(self.to_mpfr())).group(1).split('e') s = rep[0] sexp = '' if len(rep) > 1: sexp = 'e' + 'e'.join(rep[1:]) return '{}{}{}'.format(s, '~' if self._inexact else '', sexp) # return '{}{}'.format(rep, '~@{:d}'.format(self.n) if self._inexact else '') def round_m(self, max_p, min_n=None, rm=RM.RNE): """Round the mantissa to at most max_p precision, or a least absolute digit in position min_n, whichever is less precise. Exact numbers can always be rounded to any precision, but rounding will fail if it would attempt to increase the precision of an inexact number. Rounding respects the rc, and sets it accordingly for the rounded result. Rounding can use any specified rounding mode, defaulting to IEEE 754 style nearest even. """ # some values cannot be rounded; return unchanged if self.is_zero() or self.isinf or self.isnan: return Sink(self) # determine where we're rounding to if min_n is None: n = self.e - max_p else: n = max(min_n, self.e - max_p) offset = n - self.n if offset < 0: if self.inexact: # If this number is inexact, then we'd have to make up bits to # extend the precision. raise PrecisionError('rounding inexact number cannot produce more precise result') else: # If the number is exact, then we can always extend with zeros. This is independent # of the rounding mode. return Sink(self, c=self.c << -offset, exp=self.exp + offset) # Break up the significand lost_bits = self.c & bitmask(offset) left_bits = self.c >> offset if offset > 0: offset_m1 = offset - 1 low_bits = lost_bits & bitmask(offset_m1) half_bit = lost_bits >> offset_m1 else: # Rounding to the same precision is equivalent to having zero in the # lower
.redirect_incompatible_row_settings_py3 import RedirectIncompatibleRowSettings from .staging_settings_py3 import StagingSettings from .tabular_translator_py3 import TabularTranslator from .copy_translator_py3 import CopyTranslator from .salesforce_sink_py3 import SalesforceSink from .dynamics_sink_py3 import DynamicsSink from .odbc_sink_py3 import OdbcSink from .azure_search_index_sink_py3 import AzureSearchIndexSink from .azure_data_lake_store_sink_py3 import AzureDataLakeStoreSink from .oracle_sink_py3 import OracleSink from .polybase_settings_py3 import PolybaseSettings from .sql_dw_sink_py3 import SqlDWSink from .sql_sink_py3 import SqlSink from .document_db_collection_sink_py3 import DocumentDbCollectionSink from .file_system_sink_py3 import FileSystemSink from .blob_sink_py3 import BlobSink from .azure_table_sink_py3 import AzureTableSink from .azure_queue_sink_py3 import AzureQueueSink from .sap_cloud_for_customer_sink_py3 import SapCloudForCustomerSink from .copy_sink_py3 import CopySink from .copy_activity_py3 import CopyActivity from .execution_activity_py3 import ExecutionActivity from .append_variable_activity_py3 import AppendVariableActivity from .set_variable_activity_py3 import SetVariableActivity from .filter_activity_py3 import FilterActivity from .until_activity_py3 import UntilActivity from .wait_activity_py3 import WaitActivity from .for_each_activity_py3 import ForEachActivity from .if_condition_activity_py3 import IfConditionActivity from .execute_pipeline_activity_py3 import ExecutePipelineActivity from .control_activity_py3 import ControlActivity from .linked_integration_runtime_py3 import LinkedIntegrationRuntime from .self_hosted_integration_runtime_node_py3 import SelfHostedIntegrationRuntimeNode from .self_hosted_integration_runtime_status_py3 import SelfHostedIntegrationRuntimeStatus from .managed_integration_runtime_operation_result_py3 import ManagedIntegrationRuntimeOperationResult from .managed_integration_runtime_error_py3 import ManagedIntegrationRuntimeError from .managed_integration_runtime_node_py3 import ManagedIntegrationRuntimeNode from .managed_integration_runtime_status_py3 import ManagedIntegrationRuntimeStatus from .linked_integration_runtime_rbac_authorization_py3 import LinkedIntegrationRuntimeRbacAuthorization from .linked_integration_runtime_key_authorization_py3 import LinkedIntegrationRuntimeKeyAuthorization from .linked_integration_runtime_type_py3 import LinkedIntegrationRuntimeType from .self_hosted_integration_runtime_py3 import SelfHostedIntegrationRuntime from .integration_runtime_custom_setup_script_properties_py3 import IntegrationRuntimeCustomSetupScriptProperties from .integration_runtime_ssis_catalog_info_py3 import IntegrationRuntimeSsisCatalogInfo from .integration_runtime_ssis_properties_py3 import IntegrationRuntimeSsisProperties from .integration_runtime_vnet_properties_py3 import IntegrationRuntimeVNetProperties from .integration_runtime_compute_properties_py3 import IntegrationRuntimeComputeProperties from .managed_integration_runtime_py3 import ManagedIntegrationRuntime from .integration_runtime_node_ip_address_py3 import IntegrationRuntimeNodeIpAddress from .ssis_object_metadata_py3 import SsisObjectMetadata from .ssis_object_metadata_list_response_py3 import SsisObjectMetadataListResponse from .integration_runtime_node_monitoring_data_py3 import IntegrationRuntimeNodeMonitoringData from .integration_runtime_monitoring_data_py3 import IntegrationRuntimeMonitoringData from .integration_runtime_auth_keys_py3 import IntegrationRuntimeAuthKeys from .integration_runtime_regenerate_key_parameters_py3 import IntegrationRuntimeRegenerateKeyParameters from .integration_runtime_connection_info_py3 import IntegrationRuntimeConnectionInfo except (SyntaxError, ImportError): from .resource import Resource from .sub_resource import SubResource from .expression import Expression from .secure_string import SecureString from .linked_service_reference import LinkedServiceReference from .azure_key_vault_secret_reference import AzureKeyVaultSecretReference from .secret_base import SecretBase from .factory_identity import FactoryIdentity from .factory_repo_configuration import FactoryRepoConfiguration from .factory import Factory from .integration_runtime import IntegrationRuntime from .integration_runtime_resource import IntegrationRuntimeResource from .integration_runtime_reference import IntegrationRuntimeReference from .integration_runtime_status import IntegrationRuntimeStatus from .integration_runtime_status_response import IntegrationRuntimeStatusResponse from .integration_runtime_status_list_response import IntegrationRuntimeStatusListResponse from .update_integration_runtime_request import UpdateIntegrationRuntimeRequest from .update_integration_runtime_node_request import UpdateIntegrationRuntimeNodeRequest from .linked_integration_runtime_request import LinkedIntegrationRuntimeRequest from .create_linked_integration_runtime_request import CreateLinkedIntegrationRuntimeRequest from .parameter_specification import ParameterSpecification from .linked_service import LinkedService from .linked_service_resource import LinkedServiceResource from .dataset_folder import DatasetFolder from .dataset import Dataset from .dataset_resource import DatasetResource from .activity_dependency import ActivityDependency from .user_property import UserProperty from .activity import Activity from .variable_specification import VariableSpecification from .pipeline_folder import PipelineFolder from .pipeline_resource import PipelineResource from .trigger import Trigger from .trigger_resource import TriggerResource from .create_run_response import CreateRunResponse from .factory_vsts_configuration import FactoryVSTSConfiguration from .factory_git_hub_configuration import FactoryGitHubConfiguration from .factory_repo_update import FactoryRepoUpdate from .git_hub_access_token_request import GitHubAccessTokenRequest from .git_hub_access_token_response import GitHubAccessTokenResponse from .user_access_policy import UserAccessPolicy from .access_policy_response import AccessPolicyResponse from .pipeline_reference import PipelineReference from .trigger_pipeline_reference import TriggerPipelineReference from .factory_update_parameters import FactoryUpdateParameters from .dataset_reference import DatasetReference from .run_query_filter import RunQueryFilter from .run_query_order_by import RunQueryOrderBy from .run_filter_parameters import RunFilterParameters from .pipeline_run_invoked_by import PipelineRunInvokedBy from .pipeline_run import PipelineRun from .pipeline_runs_query_response import PipelineRunsQueryResponse from .activity_run import ActivityRun from .activity_runs_query_response import ActivityRunsQueryResponse from .trigger_run import TriggerRun from .trigger_runs_query_response import TriggerRunsQueryResponse from .rerun_tumbling_window_trigger_action_parameters import RerunTumblingWindowTriggerActionParameters from .rerun_tumbling_window_trigger import RerunTumblingWindowTrigger from .rerun_trigger_resource import RerunTriggerResource from .operation_display import OperationDisplay from .operation_log_specification import OperationLogSpecification from .operation_metric_availability import OperationMetricAvailability from .operation_metric_dimension import OperationMetricDimension from .operation_metric_specification import OperationMetricSpecification from .operation_service_specification import OperationServiceSpecification from .operation import Operation from .get_ssis_object_metadata_request import GetSsisObjectMetadataRequest from .ssis_object_metadata_status_response import SsisObjectMetadataStatusResponse from .exposure_control_request import ExposureControlRequest from .exposure_control_response import ExposureControlResponse from .self_dependency_tumbling_window_trigger_reference import SelfDependencyTumblingWindowTriggerReference from .trigger_reference import TriggerReference from .tumbling_window_trigger_dependency_reference import TumblingWindowTriggerDependencyReference from .trigger_dependency_reference import TriggerDependencyReference from .dependency_reference import DependencyReference from .retry_policy import RetryPolicy from .tumbling_window_trigger import TumblingWindowTrigger from .blob_events_trigger import BlobEventsTrigger from .blob_trigger import BlobTrigger from .recurrence_schedule_occurrence import RecurrenceScheduleOccurrence from .recurrence_schedule import RecurrenceSchedule from .schedule_trigger_recurrence import ScheduleTriggerRecurrence from .schedule_trigger import ScheduleTrigger from .multiple_pipeline_trigger import MultiplePipelineTrigger from .azure_function_linked_service import AzureFunctionLinkedService from .responsys_linked_service import ResponsysLinkedService from .azure_databricks_linked_service import AzureDatabricksLinkedService from .azure_data_lake_analytics_linked_service import AzureDataLakeAnalyticsLinkedService from .script_action import ScriptAction from .hd_insight_on_demand_linked_service import HDInsightOnDemandLinkedService from .salesforce_marketing_cloud_linked_service import SalesforceMarketingCloudLinkedService from .netezza_linked_service import NetezzaLinkedService from .vertica_linked_service import VerticaLinkedService from .zoho_linked_service import ZohoLinkedService from .xero_linked_service import XeroLinkedService from .square_linked_service import SquareLinkedService from .spark_linked_service import SparkLinkedService from .shopify_linked_service import ShopifyLinkedService from .service_now_linked_service import ServiceNowLinkedService from .quick_books_linked_service import QuickBooksLinkedService from .presto_linked_service import PrestoLinkedService from .phoenix_linked_service import PhoenixLinkedService from .paypal_linked_service import PaypalLinkedService from .marketo_linked_service import MarketoLinkedService from .maria_db_linked_service import MariaDBLinkedService from .magento_linked_service import MagentoLinkedService from .jira_linked_service import JiraLinkedService from .impala_linked_service import ImpalaLinkedService from .hubspot_linked_service import HubspotLinkedService from .hive_linked_service import HiveLinkedService from .hbase_linked_service import HBaseLinkedService from .greenplum_linked_service import GreenplumLinkedService from .google_big_query_linked_service import GoogleBigQueryLinkedService from .eloqua_linked_service import EloquaLinkedService from .drill_linked_service import DrillLinkedService from .couchbase_linked_service import CouchbaseLinkedService from .concur_linked_service import ConcurLinkedService from .azure_postgre_sql_linked_service import AzurePostgreSqlLinkedService from .amazon_mws_linked_service import AmazonMWSLinkedService from .sap_hana_linked_service import SapHanaLinkedService from .sap_bw_linked_service import SapBWLinkedService from .sftp_server_linked_service import SftpServerLinkedService from .ftp_server_linked_service import FtpServerLinkedService from .http_linked_service import HttpLinkedService from .azure_search_linked_service import AzureSearchLinkedService from .custom_data_source_linked_service import CustomDataSourceLinkedService from .amazon_redshift_linked_service import AmazonRedshiftLinkedService from .amazon_s3_linked_service import AmazonS3LinkedService from .sap_ecc_linked_service import SapEccLinkedService from .sap_cloud_for_customer_linked_service import SapCloudForCustomerLinkedService from .salesforce_linked_service import SalesforceLinkedService from .azure_data_lake_store_linked_service import AzureDataLakeStoreLinkedService from .mongo_db_linked_service import MongoDbLinkedService from .cassandra_linked_service import CassandraLinkedService from .web_client_certificate_authentication import WebClientCertificateAuthentication from .web_basic_authentication import WebBasicAuthentication from .web_anonymous_authentication import WebAnonymousAuthentication from .web_linked_service_type_properties import WebLinkedServiceTypeProperties from .web_linked_service import WebLinkedService from .odata_linked_service import ODataLinkedService from .hdfs_linked_service import HdfsLinkedService from .odbc_linked_service import OdbcLinkedService from .azure_ml_linked_service import AzureMLLinkedService from .teradata_linked_service import TeradataLinkedService from .db2_linked_service import Db2LinkedService from .sybase_linked_service import SybaseLinkedService from .postgre_sql_linked_service import PostgreSqlLinkedService from .my_sql_linked_service import MySqlLinkedService from .azure_my_sql_linked_service import AzureMySqlLinkedService from .oracle_linked_service import OracleLinkedService from .file_server_linked_service import FileServerLinkedService from .hd_insight_linked_service import HDInsightLinkedService from .dynamics_linked_service import DynamicsLinkedService from .cosmos_db_linked_service import CosmosDbLinkedService from .azure_key_vault_linked_service import AzureKeyVaultLinkedService from .azure_batch_linked_service import AzureBatchLinkedService from .azure_sql_database_linked_service import AzureSqlDatabaseLinkedService from .sql_server_linked_service import SqlServerLinkedService from .azure_sql_dw_linked_service import AzureSqlDWLinkedService from .azure_table_storage_linked_service import AzureTableStorageLinkedService from .azure_blob_storage_linked_service import AzureBlobStorageLinkedService from .azure_storage_linked_service import AzureStorageLinkedService from .responsys_object_dataset import ResponsysObjectDataset from .salesforce_marketing_cloud_object_dataset import SalesforceMarketingCloudObjectDataset from .vertica_table_dataset import VerticaTableDataset from .netezza_table_dataset import NetezzaTableDataset from .zoho_object_dataset import ZohoObjectDataset from .xero_object_dataset import XeroObjectDataset from .square_object_dataset import SquareObjectDataset from .spark_object_dataset import SparkObjectDataset from .shopify_object_dataset import ShopifyObjectDataset from .service_now_object_dataset import ServiceNowObjectDataset from .quick_books_object_dataset import QuickBooksObjectDataset from .presto_object_dataset import PrestoObjectDataset from .phoenix_object_dataset import PhoenixObjectDataset from .paypal_object_dataset import PaypalObjectDataset from .marketo_object_dataset import MarketoObjectDataset from .maria_db_table_dataset import MariaDBTableDataset from .magento_object_dataset import MagentoObjectDataset from .jira_object_dataset import JiraObjectDataset from .impala_object_dataset import ImpalaObjectDataset from .hubspot_object_dataset import HubspotObjectDataset from .hive_object_dataset import HiveObjectDataset from .hbase_object_dataset import HBaseObjectDataset from .greenplum_table_dataset import GreenplumTableDataset from .google_big_query_object_dataset import GoogleBigQueryObjectDataset from .eloqua_object_dataset import EloquaObjectDataset from .drill_table_dataset import DrillTableDataset from .couchbase_table_dataset import CouchbaseTableDataset from .concur_object_dataset import ConcurObjectDataset from .azure_postgre_sql_table_dataset import AzurePostgreSqlTableDataset from .amazon_mws_object_dataset import AmazonMWSObjectDataset from .dataset_zip_deflate_compression import DatasetZipDeflateCompression from .dataset_deflate_compression import DatasetDeflateCompression from .dataset_gzip_compression import DatasetGZipCompression from .dataset_bzip2_compression import DatasetBZip2Compression from .dataset_compression import DatasetCompression from .parquet_format import ParquetFormat from .orc_format import OrcFormat from .avro_format import AvroFormat from .json_format import JsonFormat from .text_format import TextFormat from .dataset_storage_format import DatasetStorageFormat from .http_dataset import HttpDataset from .azure_search_index_dataset import AzureSearchIndexDataset from .web_table_dataset import WebTableDataset from .sql_server_table_dataset import SqlServerTableDataset from .sap_ecc_resource_dataset import SapEccResourceDataset from .sap_cloud_for_customer_resource_dataset import SapCloudForCustomerResourceDataset from .salesforce_object_dataset import SalesforceObjectDataset from .relational_table_dataset import RelationalTableDataset from .azure_my_sql_table_dataset import AzureMySqlTableDataset from .oracle_table_dataset import OracleTableDataset from .odata_resource_dataset import ODataResourceDataset from .mongo_db_collection_dataset import MongoDbCollectionDataset from .file_share_dataset import FileShareDataset from .azure_data_lake_store_dataset import AzureDataLakeStoreDataset from .dynamics_entity_dataset import DynamicsEntityDataset from .document_db_collection_dataset import DocumentDbCollectionDataset from .custom_dataset import CustomDataset from .cassandra_table_dataset import CassandraTableDataset from .azure_sql_dw_table_dataset import AzureSqlDWTableDataset from .azure_sql_table_dataset import AzureSqlTableDataset from .azure_table_dataset import AzureTableDataset from .azure_blob_dataset import AzureBlobDataset from .amazon_s3_dataset import AmazonS3Dataset from .activity_policy import ActivityPolicy from .azure_function_activity import AzureFunctionActivity from .databricks_spark_python_activity import DatabricksSparkPythonActivity from .databricks_spark_jar_activity import DatabricksSparkJarActivity from .databricks_notebook_activity import DatabricksNotebookActivity from .data_lake_analytics_usql_activity import DataLakeAnalyticsUSQLActivity from .azure_ml_update_resource_activity import AzureMLUpdateResourceActivity from .azure_ml_web_service_file import AzureMLWebServiceFile from .azure_ml_batch_execution_activity import AzureMLBatchExecutionActivity from .get_metadata_activity import GetMetadataActivity from .web_activity_authentication import WebActivityAuthentication from .web_activity import WebActivity from .redshift_unload_settings import RedshiftUnloadSettings from .amazon_redshift_source import AmazonRedshiftSource from .responsys_source import ResponsysSource from .salesforce_marketing_cloud_source import SalesforceMarketingCloudSource from .vertica_source import VerticaSource from .netezza_source import NetezzaSource from .zoho_source import ZohoSource from .xero_source import XeroSource from .square_source import SquareSource from .spark_source import SparkSource from .shopify_source import ShopifySource from .service_now_source import ServiceNowSource from .quick_books_source import QuickBooksSource from .presto_source import PrestoSource from .phoenix_source import PhoenixSource from .paypal_source import PaypalSource from .marketo_source import MarketoSource from .maria_db_source import MariaDBSource from .magento_source import MagentoSource from .jira_source import JiraSource from .impala_source import ImpalaSource from .hubspot_source import HubspotSource from .hive_source import HiveSource from .hbase_source import HBaseSource from .greenplum_source import GreenplumSource from .google_big_query_source import
str, slc_type: int, slc_state: int, slc_resource_type: str, properties, slc_graph_id: str = None, lease_start: datetime = None, lease_end: datetime = None): """ Update a slice @param slc_guid slice id @param slc_name slice name @param slc_type slice type @param slc_state slice state @param slc_resource_type slice resource type @param lease_start Lease Start time @param lease_end Lease End time @param properties pickled instance @param slc_graph_id slice graph id """ try: with session_scope(self.db_engine) as session: slc_obj = session.query(Slices).filter(Slices.slc_guid == slc_guid).first() if slc_obj is not None: slc_obj.properties = properties slc_obj.slc_name = slc_name slc_obj.slc_type = slc_type slc_obj.slc_resource_type = slc_resource_type slc_obj.slc_state = slc_state slc_obj.lease_start = lease_start slc_obj.lease_end = lease_end if slc_graph_id is not None: slc_obj.slc_graph_id = slc_graph_id else: raise DatabaseException(self.OBJECT_NOT_FOUND.format("Slice", slc_guid)) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e def remove_slice(self, *, slc_guid: str): """ Remove Slice @param slc_guid slice id """ try: with session_scope(self.db_engine) as session: session.query(Slices).filter(Slices.slc_guid == slc_guid).delete() except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e @staticmethod def generate_slice_dict_from_row(row) -> dict: """ Generate dictionary representing a slice row read from database """ if row is None: return None slice_obj = {'slc_id': row.slc_id, 'slc_guid': row.slc_guid, 'slc_name': row.slc_name, 'slc_type': row.slc_type, 'slc_resource_type': row.slc_resource_type, 'properties': row.properties, 'slc_state': row.slc_state} if row.slc_graph_id is not None: slice_obj['slc_graph_id'] = row.slc_graph_id return slice_obj def get_slice_ids(self) -> list: """ Get slice ids for an actor @param act_id actor id @return list of slice ids """ result = [] try: with session_scope(self.db_engine) as session: for row in session.query(Slices).all(): result.append(row.slc_id) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e return result @staticmethod def create_slices_filter(*, slice_id: str = None, slice_name: str = None, project_id: str = None, email: str = None, oidc_sub: str = None) -> dict: filter_dict = {} if slice_id is not None: filter_dict['slc_guid'] = slice_id if slice_name is not None: filter_dict['slc_name'] = slice_name if project_id is not None: filter_dict['project_id'] = project_id if email is not None: filter_dict['email'] = email if oidc_sub is not None: filter_dict['oidc_claim_sub'] = oidc_sub return filter_dict def get_slices(self, *, slice_id: str = None, slice_name: str = None, project_id: str = None, email: str = None, state: list[int] = None, oidc_sub: str = None, slc_type: list[int] = None) -> list: """ Get slices for an actor @param slice_id actor id @param slice_name slice name @param project_id project id @param email email @param state state @param oidc_sub oidc claim sub @param slc_type slice type @return list of slices """ result = [] try: filter_dict = self.create_slices_filter(slice_id=slice_id, slice_name=slice_name, project_id=project_id, email=email, oidc_sub=oidc_sub) with session_scope(self.db_engine) as session: rows = session.query(Slices).filter_by(**filter_dict) if state is not None: rows = rows.filter(Slices.slc_state.in_(state)) if slc_type is not None: rows = rows.filter(Slices.slc_type.in_(slc_type)) for row in rows.all(): slice_obj = self.generate_slice_dict_from_row(row) result.append(slice_obj.copy()) slice_obj.clear() except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e return result def get_slice_by_id(self, *, slc_id: int) -> dict: """ Get slice by id for an actor @param slc_id slice id @return slice dictionary """ result = {} try: with session_scope(self.db_engine) as session: slc_obj = session.query(Slices).filter(Slices.slc_id == slc_id).first() if slc_obj is not None: result = self.generate_slice_dict_from_row(slc_obj) else: raise DatabaseException(self.OBJECT_NOT_FOUND.format("Slice", slc_id)) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e return result def add_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, rsv_state: int, rsv_pending: int, rsv_joining: int, properties, lease_start: datetime = None, lease_end: datetime = None, rsv_graph_node_id: str = None, oidc_claim_sub: str = None, email: str = None, project_id: str = None): """ Add a reservation @param slc_guid slice guid @param rsv_resid reservation guid @param rsv_category category @param rsv_state state @param rsv_pending pending state @param rsv_joining join state @param properties pickled instance @param lease_start Lease start time @param lease_end Lease end time @param rsv_graph_node_id graph id @param oidc_claim_sub OIDC Sub claim @param email User email @param project_id Project id """ try: slc_id = self.get_slc_id_by_slice_id(slice_id=slc_guid) rsv_obj = Reservations(rsv_slc_id=slc_id, rsv_resid=rsv_resid, rsv_category=rsv_category, rsv_state=rsv_state, rsv_pending=rsv_pending, rsv_joining=rsv_joining, lease_start=lease_start, lease_end=lease_end, properties=properties, oidc_claim_sub=oidc_claim_sub, email=email, project_id=project_id) if rsv_graph_node_id is not None: rsv_obj.rsv_graph_node_id = rsv_graph_node_id with session_scope(self.db_engine) as session: session.add(rsv_obj) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e def update_reservation(self, *, slc_guid: str, rsv_resid: str, rsv_category: int, rsv_state: int, rsv_pending: int, rsv_joining: int, properties, lease_start: datetime = None, lease_end: datetime = None, rsv_graph_node_id: str = None): """ Update a reservation @param slc_guid slice guid @param rsv_resid reservation guid @param rsv_category category @param rsv_state state @param rsv_pending pending state @param rsv_joining join state @param properties pickled instance @param lease_start Lease start time @param lease_end Lease end time @param rsv_graph_node_id graph id """ try: slc_id = self.get_slc_id_by_slice_id(slice_id=slc_guid) with session_scope(self.db_engine) as session: rsv_obj = session.query(Reservations).filter(Reservations.rsv_slc_id == slc_id).filter( Reservations.rsv_resid == rsv_resid).first() if rsv_obj is not None: rsv_obj.rsv_category = rsv_category rsv_obj.rsv_state = rsv_state rsv_obj.rsv_pending = rsv_pending rsv_obj.rsv_joining = rsv_joining rsv_obj.properties = properties rsv_obj.lease_end = lease_end rsv_obj.lease_start = lease_start if rsv_graph_node_id is not None: rsv_obj.rsv_graph_node_id = rsv_graph_node_id else: raise DatabaseException(self.OBJECT_NOT_FOUND.format("Reservation", rsv_resid)) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e def remove_reservation(self, *, rsv_resid: str): """ Remove a reservation @param rsv_resid reservation guid """ try: with session_scope(self.db_engine) as session: session.query(Reservations).filter(Reservations.rsv_resid == rsv_resid).delete() except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e @staticmethod def generate_reservation_dict_from_row(row) -> dict: """ Generate a dictionary representing a reservation row read from database @param row row """ if row is None: return None rsv_obj = {'rsv_id': row.rsv_id, 'rsv_slc_id': row.rsv_slc_id, 'rsv_resid': row.rsv_resid, 'rsv_category': row.rsv_category, 'rsv_state': row.rsv_state, 'rsv_pending': row.rsv_pending, 'rsv_joining': row.rsv_joining, 'properties': row.properties, 'rsv_graph_node_id': row.rsv_graph_node_id} return rsv_obj def create_reservation_filter(self, *, slice_id: str = None, graph_node_id: str = None, project_id: str = None, email: str = None, oidc_sub: str = None, rid: str = None) -> dict: filter_dict = {} if slice_id is not None: slc_id = self.get_slc_id_by_slice_id(slice_id=slice_id) filter_dict['rsv_slc_id'] = slc_id if graph_node_id is not None: filter_dict['rsv_graph_node_id'] = graph_node_id if project_id is not None: filter_dict['project_id'] = project_id if email is not None: filter_dict['email'] = email if oidc_sub is not None: filter_dict['oidc_claim_sub'] = oidc_sub if rid is not None: filter_dict['rsv_resid'] = rid return filter_dict def get_reservations(self, *, slice_id: str = None, graph_node_id: str = None, project_id: str = None, email: str = None, oidc_sub: str = None, rid: str = None, state: list[int] = None, category: list[int] = None) -> list: """ Get Reservations for an actor @param slice_id slice id @param graph_node_id graph node id @param project_id project id @param email email @param oidc_sub oidc sub @param rid reservation id @param state reservation state @param category reservation category @return list of reservations """ result = [] try: filter_dict = self.create_reservation_filter(slice_id=slice_id, graph_node_id=graph_node_id, project_id=project_id, email=email, oidc_sub=oidc_sub, rid=rid) with session_scope(self.db_engine) as session: rows = session.query(Reservations).filter_by(**filter_dict) if state is not None: rows = rows.filter(Reservations.rsv_state.in_(state)) if category is not None: rows = rows.filter(Reservations.rsv_category.in_(category)) for row in rows.all(): rsv_obj = self.generate_reservation_dict_from_row(row) result.append(rsv_obj.copy()) rsv_obj.clear() except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e return result def get_reservations_by_rids(self, *, rsv_resid_list: list) -> list: """ Get Reservations for an actor by reservation ids @param act_id actor id @param rsv_resid_list reservation guid list @return list of reservations """ result = [] try: with session_scope(self.db_engine) as session: for row in session.query(Reservations).filter(Reservations.rsv_resid.in_(rsv_resid_list)).all(): rsv_obj = self.generate_reservation_dict_from_row(row) result.append(rsv_obj.copy()) rsv_obj.clear() except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e return result def add_proxy(self, *, act_id: int, prx_name: str, properties): """ Add a proxy @param act_id actor id @param prx_name proxy name @param properties pickled instance """ try: prx_obj = Proxies(prx_act_id=act_id, prx_name=prx_name, properties=properties) with session_scope(self.db_engine) as session: session.add(prx_obj) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e def update_proxy(self, *, act_id: int, prx_name: str, properties): """ Update a proxy @param act_id actor id @param prx_name proxy name @param properties pickled instance """ try: with session_scope(self.db_engine) as session: prx_obj = session.query(Proxies).filter(Proxies.prx_act_id == act_id).filter( Proxies.prx_name == prx_name).first() if prx_obj is not None: prx_obj.properties = properties else: raise DatabaseException(self.OBJECT_NOT_FOUND.format("Proxy", prx_name)) except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e def remove_proxy(self, *, act_id: int, prx_name: str): """ Remove a proxy @param act_id actor id @param prx_name proxy name """ try: with session_scope(self.db_engine) as session: session.query(Proxies).filter(Proxies.prx_act_id == act_id).filter( Proxies.prx_name == prx_name).delete() except Exception as e: self.logger.error(Constants.EXCEPTION_OCCURRED.format(e)) raise e @staticmethod def generate_proxy_dict_from_row(row) -> dict: """ Generate dictionary representing a proxy row read from database """ if row is None: return None prx_obj = {'prx_id': row.prx_id, 'prx_act_id': row.prx_act_id, 'prx_name': row.prx_name, 'properties': row.properties}
# -*- coding: utf-8 -*- """ flaskbb.cli.commands ~~~~~~~~~~~~~~~~~~~~ This module contains the main commands. :copyright: (c) 2016 by the FlaskBB Team. :license: BSD, see LICENSE for more details. """ import sys import os import time import requests import binascii from datetime import datetime import click from werkzeug.utils import import_string, ImportStringError from jinja2 import Environment, FileSystemLoader from flask import current_app from flask.cli import FlaskGroup, ScriptInfo, with_appcontext from sqlalchemy_utils.functions import (database_exists, create_database, drop_database) from flask_alembic import alembic_click from flaskbb import create_app from flaskbb._compat import iteritems from flaskbb.extensions import db, whooshee, celery, alembic from flaskbb.cli.utils import (prompt_save_user, prompt_config_path, write_config, get_version, FlaskBBCLIError, EmailType) from flaskbb.utils.populate import (create_test_data, create_welcome_forum, create_default_groups, create_default_settings, insert_bulk_data, update_settings_from_fixture) from flaskbb.utils.translations import compile_translations def make_app(script_info): config_file = getattr(script_info, "config_file") if config_file is not None: # check if config file exists if os.path.exists(os.path.abspath(config_file)): click.secho("[+] Using config from: {}".format( os.path.abspath(config_file)), fg="cyan") # config file doesn't exist, maybe it's a module else: try: import_string(config_file) click.secho("[+] Using config from: {}".format(config_file), fg="cyan") except ImportStringError: click.secho("[~] Config '{}' doesn't exist. " "Using default config.".format(config_file), fg="red") config_file = None else: # lets look for a config file in flaskbb's root folder # TODO: are there any other places we should look for the config? # Like somewhere in /etc/? # this walks back to flaskbb/ from flaskbb/flaskbb/cli/main.py # can't use current_app.root_path because it's not (yet) available config_dir = os.path.dirname( os.path.dirname(os.path.dirname(__file__)) ) config_file = os.path.join(config_dir, "flaskbb.cfg") if os.path.exists(config_file): click.secho("[+] Found config file 'flaskbb.cfg' in {}" .format(config_dir), fg="yellow") click.secho("[+] Using config from: {}".format(config_file), fg="cyan") else: config_file = None click.secho("[~] Using default config.", fg="yellow") return create_app(config_file) def set_config(ctx, param, value): """This will pass the config file to the create_app function.""" ctx.ensure_object(ScriptInfo).config_file = value @click.group(cls=FlaskGroup, create_app=make_app, add_version_option=False) @click.option("--config", expose_value=False, callback=set_config, required=False, is_flag=False, is_eager=True, metavar="CONFIG", help="Specify the config to use in dotted module notation " "e.g. flaskbb.configs.default.DefaultConfig") @click.option("--version", expose_value=False, callback=get_version, is_flag=True, is_eager=True, help="Show the FlaskBB version.") def flaskbb(): """This is the commandline interface for flaskbb.""" pass flaskbb.add_command(alembic_click, "db") @flaskbb.command() @click.option("--welcome", "-w", default=True, is_flag=True, help="Disable the welcome forum.") @click.option("--force", "-f", default=False, is_flag=True, help="Doesn't ask for confirmation.") @click.option("--username", "-u", help="The username of the user.") @click.option("--email", "-e", type=EmailType(), help="The email address of the user.") @click.option("--password", <PASSWORD>", help="The password of the user.") @click.option("--group", "-g", help="The group of the user.", type=click.Choice(["admin", "super_mod", "mod", "member"])) def install(welcome, force, username, email, password, group): """Installs flaskbb. If no arguments are used, an interactive setup will be run. """ click.secho("[+] Installing FlaskBB...", fg="cyan") if database_exists(db.engine.url): if force or click.confirm(click.style( "Existing database found. Do you want to delete the old one and " "create a new one?", fg="magenta") ): drop_database(db.engine.url) else: sys.exit(0) create_database(db.engine.url) alembic.upgrade() click.secho("[+] Creating default settings...", fg="cyan") create_default_groups() create_default_settings() click.secho("[+] Creating admin user...", fg="cyan") prompt_save_user(username, email, password, group) if welcome: click.secho("[+] Creating welcome forum...", fg="cyan") create_welcome_forum() click.secho("[+] Compiling translations...", fg="cyan") compile_translations() click.secho("[+] FlaskBB has been successfully installed!", fg="green", bold=True) @flaskbb.command() @click.option("--test-data", "-t", default=False, is_flag=True, help="Adds some test data.") @click.option("--bulk-data", "-b", default=False, is_flag=True, help="Adds a lot of data.") @click.option("--posts", default=100, help="Number of posts to create in each topic (default: 100).") @click.option("--topics", default=100, help="Number of topics to create (default: 100).") @click.option("--force", "-f", is_flag=True, help="Will delete the database before populating it.") @click.option("--initdb", "-i", is_flag=True, help="Initializes the database before populating it.") def populate(bulk_data, test_data, posts, topics, force, initdb): """Creates the necessary tables and groups for FlaskBB.""" if force: click.secho("[+] Recreating database...", fg="cyan") drop_database(db.engine.url) # do not initialize the db if -i is passed if not initdb: alembic.upgrade() if initdb: click.secho("[+] Initializing database...", fg="cyan") alembic.upgrade() if test_data: click.secho("[+] Adding some test data...", fg="cyan") create_test_data() if bulk_data: timer = time.time() topic_count, post_count = insert_bulk_data(int(topics), int(posts)) elapsed = time.time() - timer click.secho("[+] It took {} seconds to create {} topics and {} posts" .format(elapsed, topic_count, post_count), fg="cyan") # this just makes the most sense for the command name; use -i to # init the db as well if not test_data: click.secho("[+] Populating the database with some defaults...", fg="cyan") create_default_groups() create_default_settings() @flaskbb.command() def reindex(): """Reindexes the search index.""" click.secho("[+] Reindexing search index...", fg="cyan") whooshee.reindex() @flaskbb.command() @click.option("all_latest", "--all", "-a", default=False, is_flag=True, help="Upgrades migrations AND fixtures to the latest version.") @click.option("--fixture/", "-f", default=None, help="The fixture which should be upgraded or installed.") @click.option("--force", default=False, is_flag=True, help="Forcefully upgrades the fixtures.") def upgrade(all_latest, fixture, force): """Updates the migrations and fixtures.""" if all_latest: click.secho("[+] Upgrading migrations to the latest version...", fg="cyan") alembic.upgrade() if fixture or all_latest: try: settings = import_string( "flaskbb.fixtures.{}".format(fixture) ) settings = settings.fixture except ImportError: raise FlaskBBCLIError("{} fixture is not available" .format(fixture), fg="red") click.secho("[+] Updating fixtures...", fg="cyan") count = update_settings_from_fixture( fixture=settings, overwrite_group=force, overwrite_setting=force ) click.secho("[+] {} groups and {} settings updated.".format( len(count.keys()), len(count.values())), fg="green" ) @flaskbb.command("download-emojis") @with_appcontext def download_emoji(): """Downloads emojis from emoji-cheat-sheet.com. This command is probably going to be removed in future version. """ click.secho("[+] Downloading emojis...", fg="cyan") HOSTNAME = "https://api.github.com" REPO = "/repos/arvida/emoji-cheat-sheet.com/contents/public/graphics/emojis" # noqa FULL_URL = "{}{}".format(HOSTNAME, REPO) DOWNLOAD_PATH = os.path.join(current_app.static_folder, "emoji") response = requests.get(FULL_URL) cached_count = 0 count = 0 for image in response.json(): if not os.path.exists(os.path.abspath(DOWNLOAD_PATH)): raise FlaskBBCLIError( "{} does not exist.".format(os.path.abspath(DOWNLOAD_PATH)), fg="red") full_path = os.path.join(DOWNLOAD_PATH, image["name"]) if not os.path.exists(full_path): count += 1 f = open(full_path, 'wb') f.write(requests.get(image["download_url"]).content) f.close() if count == cached_count + 50: cached_count = count click.secho("[+] {} out of {} Emojis downloaded...".format( cached_count, len(response.json())), fg="cyan") click.secho("[+] Finished downloading {} Emojis.".format(count), fg="green") @flaskbb.command("celery", context_settings=dict(ignore_unknown_options=True,)) @click.argument('celery_args', nargs=-1, type=click.UNPROCESSED) @click.option("show_help", "--help", "-h", is_flag=True, help="Shows this message and exits") @click.option("show_celery_help", "--help-celery", is_flag=True, help="Shows the celery help message") @click.pass_context @with_appcontext def start_celery(ctx, show_help, show_celery_help, celery_args): """Preconfigured wrapper around the 'celery' command. Additional CELERY_ARGS arguments are passed to celery.""" if show_help: click.echo(ctx.get_help()) sys.exit(0) if show_celery_help: click.echo(celery.start(argv=["--help"])) sys.exit(0) default_args = ['celery'] default_args = default_args + list(celery_args) celery.start(argv=default_args) @flaskbb.command() @click.option("--server", "-s", default="gunicorn", type=click.Choice(["gunicorn", "gevent"]), help="The WSGI Server to run FlaskBB on.") @click.option("--host", "-h", default="127.0.0.1", help="The interface to bind FlaskBB to.") @click.option("--port", "-p", default="8000", type=int, help="The port to bind FlaskBB to.") @click.option("--workers", "-w", default=4, help="The number of worker processes for handling requests.") @click.option("--daemon", "-d", default=False, is_flag=True, help="Starts gunicorn as daemon.") @click.option("--config", "-c", help="The configuration file to use for FlaskBB.") def start(server, host, port, workers, config, daemon): """Starts a production ready wsgi server. TODO: Figure out a way how to forward additional args to gunicorn without causing any errors. """ if server == "gunicorn": try: from gunicorn.app.base import Application class FlaskBBApplication(Application): def __init__(self, app, options=None): self.options = options or {} self.application = app super(FlaskBBApplication, self).__init__() def load_config(self): config = dict([ (key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None ]) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): return self.application options = { "bind": "{}:{}".format(host, port), "workers": workers, "daemon": daemon, } FlaskBBApplication(create_app(config=config), options).run() except ImportError: raise FlaskBBCLIError("Cannot import gunicorn. " "Make sure it is installed.", fg="red") elif server == "gevent": try: from gevent import __version__ from gevent.pywsgi import WSGIServer click.secho("* Starting gevent {}".format(__version__)) click.secho("* Listening on http://{}:{}/".format(host, port)) http_server = WSGIServer((host, port), create_app(config=config)) http_server.serve_forever() except ImportError: raise FlaskBBCLIError("Cannot import gevent. " "Make sure it is installed.", fg="red") @flaskbb.command("shell", short_help="Runs a shell in the app context.") @with_appcontext def shell_command(): """Runs an interactive Python shell in the context of a given Flask application. The application will populate the default namespace of this shell according to it"s configuration. This is useful for executing small snippets of management code without having to manually configuring the application. This code snippet is taken from Flask"s cli module and modified to run IPython and falls back to the normal shell if IPython is not available. """ import code banner = "Python %s on %s\nInstance Path: %s" % ( sys.version, sys.platform, current_app.instance_path, ) ctx = {"db": db} # Support the regular Python interpreter startup script if someone # is using it. startup = os.environ.get("PYTHONSTARTUP") if startup and os.path.isfile(startup): with open(startup, "r") as f: eval(compile(f.read(), startup, "exec"), ctx) ctx.update(current_app.make_shell_context()) try: import IPython IPython.embed(banner1=banner, user_ns=ctx) except ImportError: code.interact(banner=banner, local=ctx) @flaskbb.command("urls", short_help="Show routes for the app.") @click.option("--route", "-r", "order_by", flag_value="rule", default=True, help="Order by route") @click.option("--endpoint", "-e", "order_by", flag_value="endpoint", help="Order by endpoint") @click.option("--methods", "-m", "order_by", flag_value="methods", help="Order by methods") @with_appcontext def list_urls(order_by): """Lists all available routes.""" from flask import current_app rules = sorted( current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order_by) ) max_rule_len = max(len(rule.rule) for rule in rules) max_rule_len = max(max_rule_len, len("Route")) max_endpoint_len = max(len(rule.endpoint) for rule in rules) max_endpoint_len = max(max_endpoint_len, len("Endpoint")) max_method_len = max(len(", ".join(rule.methods)) for rule in rules) max_method_len = max(max_method_len, len("Methods")) column_header_len = max_rule_len + max_endpoint_len + max_method_len + 4 column_template = "{:<%s} {:<%s} {:<%s}"
216, 1875, 1885, 217, 1876, 1886, 1895, 218, 59, 60, 61, 62, 15, 1613, 2471, 1679, 27, 1623, 3328, 3548, 1689, 2482, 3768, 2603, 1745, 1755, 39, 1632, 3337, 3557, 1698, 3382, 6186, 3602, 3777, 3822, 1764, 2493, 3988, 2614, 3997, 4042, 2724, 1811, 1821, 1830, 51, 1640, 3345, 3565, 1706, 3390, 6194, 3610, 3785, 3830, 1772, 3426, 6230, 3646, 6350, 1601, 3866, 4005, 4050, 4086, 1838, 2504, 4208, 2625, 4217, 4262, 2735, 4225, 4270, 4306, 2834, 1877, 1887, 1896, 1904, 63, 171, 2512, 2633, 183, 2513, 4426, 2634, 2743, 2744, 195, 2514, 4427, 2635, 4436, 4481, 2745, 2842, 2843, 2844, 207, 2515, 4428, 2636, 4437, 4482, 2746, 4445, 4490, 4526, 2845, 2930, 2931, 2932, 2933, 219, 227, 1940, 228, 1941, 1951, 229, 1942, 1952, 1961, 230, 1943, 1953, 1962, 1970, 231, 71, 72, 73, 74, 75, 16, 1614, 2472, 1680, 28, 1624, 3329, 3549, 1690, 2483, 3769, 2604, 1746, 1756, 40, 1633, 3338, 3558, 1699, 3383, 6187, 3603, 3778, 3823, 1765, 2494, 3989, 2615, 3998, 4043, 2725, 1812, 1822, 1831, 52, 1641, 3346, 3566, 1707, 3391, 6195, 3611, 3786, 3831, 1773, 3427, 6231, 3647, 6351, 6679, 3867, 4006, 4051, 4087, 1839, 2505, 4209, 2626, 4218, 4263, 2736, 4226, 4271, 4307, 2835, 1878, 1888, 1897, 1905, 64, 1648, 3353, 3573, 1714, 3398, 6202, 3618, 3793, 3838, 1780, 3434, 6238, 3654, 6358, 6686, 3874, 4013, 4058, 4094, 1846, 3462, 6266, 3682, 6386, 6714, 3902, 6470, 6798, 1602, 4122, 4233, 4278, 4314, 4342, 1912, 2516, 4429, 2637, 4438, 4483, 2747, 4446, 4491, 4527, 2846, 4453, 4498, 4534, 4562, 2934, 1944, 1954, 1963, 1971, 1978, 76, 172, 2523, 2644, 184, 2524, 4646, 2645, 2754, 2755, 196, 2525, 4647, 2646, 4656, 4701, 2756, 2853, 2854, 2855, 208, 2526, 4648, 2647, 4657, 4702, 2757, 4665, 4710, 4746, 2856, 2941, 2942, 2943, 2944, 220, 2527, 4649, 2648, 4658, 4703, 2758, 4666, 4711, 4747, 2857, 4673, 4718, 4754, 4782, 2945, 3018, 3019, 3020, 3021, 3022, 232, 239, 2006, 240, 2007, 2017, 241, 2008, 2018, 2027, 242, 2009, 2019, 2028, 2036, 243, 2010, 2020, 2029, 2037, 2044, 244, 83, 84, 85, 86, 87, 88, 17, 1615, 2473, 1681, 29, 1625, 3330, 3550, 1691, 2484, 3770, 2605, 1747, 1757, 41, 1634, 3339, 3559, 1700, 3384, 6188, 3604, 3779, 3824, 1766, 2495, 3990, 2616, 3999, 4044, 2726, 1813, 1823, 1832, 53, 1642, 3347, 3567, 1708, 3392, 6196, 3612, 3787, 3832, 1774, 3428, 6232, 3648, 6352, 6680, 3868, 4007, 4052, 4088, 1840, 2506, 4210, 2627, 4219, 4264, 2737, 4227, 4272, 4308, 2836, 1879, 1889, 1898, 1906, 65, 1649, 3354, 3574, 1715, 3399, 6203, 3619, 3794, 3839, 1781, 3435, 6239, 3655, 6359, 6687, 3875, 4014, 4059, 4095, 1847, 3463, 6267, 3683, 6387, 6715, 3903, 6471, 6799, 7008, 4123, 4234, 4279, 4315, 4343, 1913, 2517, 4430, 2638, 4439, 4484, 2748, 4447, 4492, 4528, 2847, 4454, 4499, 4535, 4563, 2935, 1945, 1955, 1964, 1972, 1979, 77, 1655, 3360, 3580, 1721, 3405, 6209, 3625, 3800, 3845, 1787, 3441, 6245, 3661, 6365, 6693, 3881, 4020, 4065, 4101, 1853, 3469, 6273, 3689, 6393, 6721, 3909, 6477, 6805, 7014, 4129, 4240, 4285, 4321, 4349, 1919, 3490, 6294, 3710, 6414, 6742, 3930, 6498, 6826, 7035, 4150, 6554, 6882, 7091, 1603, 4370, 4460, 4505, 4541, 4569, 4590, 1985, 2528, 4650, 2649, 4659, 4704, 2759, 4667, 4712, 4748, 2858, 4674, 4719, 4755, 4783, 2946, 4680, 4725, 4761, 4789, 4810, 3023, 2011, 2021, 2030, 2038, 2045, 2051, 89, 173, 2534, 2655, 185, 2535, 4866, 2656, 2765, 2766, 197, 2536, 4867, 2657, 4876, 4921, 2767, 2864, 2865, 2866, 209, 2537, 4868, 2658, 4877, 4922, 2768, 4885, 4930, 4966, 2867, 2952, 2953, 2954, 2955, 221, 2538, 4869, 2659, 4878, 4923, 2769, 4886, 4931, 4967, 2868, 4893, 4938, 4974, 5002, 2956, 3029, 3030, 3031, 3032, 3033, 233, 2539, 4870, 2660, 4879, 4924, 2770, 4887, 4932, 4968, 2869, 4894, 4939, 4975, 5003, 2957, 4900, 4945, 4981, 5009, 5030, 3034, 3095, 3096, 3097, 3098, 3099, 3100, 245, 251, 2072, 252, 2073, 2083, 253, 2074, 2084, 2093, 254, 2075, 2085, 2094, 2102, 255, 2076, 2086, 2095, 2103, 2110, 256, 2077, 2087, 2096, 2104, 2111, 2117, 257, 95, 96, 97, 98, 99, 100, 101, 18, 1616, 2474, 1682, 30, 1626, 3331, 3551, 1692, 2485, 3771, 2606, 1748, 1758, 42, 1635, 3340, 3560, 1701, 3385, 6189, 3605, 3780, 3825, 1767, 2496, 3991, 2617, 4000, 4045, 2727, 1814, 1824, 1833, 54, 1643, 3348, 3568, 1709, 3393, 6197, 3613, 3788, 3833, 1775, 3429, 6233, 3649, 6353, 6681, 3869, 4008, 4053, 4089, 1841, 2507, 4211, 2628, 4220, 4265, 2738, 4228, 4273, 4309, 2837, 1880, 1890, 1899, 1907, 66, 1650, 3355, 3575, 1716, 3400, 6204, 3620, 3795, 3840, 1782, 3436, 6240, 3656, 6360, 6688, 3876, 4015, 4060, 4096, 1848, 3464, 6268, 3684, 6388, 6716, 3904, 6472, 6800, 7009, 4124, 4235, 4280, 4316, 4344, 1914, 2518, 4431, 2639, 4440, 4485, 2749, 4448, 4493, 4529, 2848, 4455, 4500, 4536, 4564, 2936, 1946, 1956, 1965, 1973, 1980, 78, 1656, 3361, 3581, 1722, 3406, 6210, 3626, 3801, 3846, 1788, 3442, 6246, 3662, 6366, 6694, 3882, 4021, 4066, 4102, 1854, 3470, 6274, 3690, 6394, 6722, 3910, 6478, 6806, 7015, 4130, 4241, 4286, 4322, 4350, 1920, 3491, 6295, 3711, 6415, 6743, 3931, 6499, 6827, 7036, 4151, 6555, 6883, 7092, 7217, 4371, 4461, 4506, 4542, 4570, 4591, 1986, 2529, 4651, 2650, 4660, 4705, 2760, 4668, 4713, 4749, 2859, 4675, 4720, 4756, 4784, 2947, 4681, 4726, 4762, 4790, 4811, 3024, 2012, 2022, 2031, 2039, 2046, 2052, 90, 1661, 3366, 3586, 1727, 3411, 6215, 3631, 3806, 3851, 1793, 3447, 6251, 3667, 6371, 6699, 3887, 4026, 4071, 4107, 1859, 3475, 6279, 3695, 6399, 6727, 3915, 6483, 6811, 7020, 4135, 4246, 4291, 4327, 4355, 1925, 3496, 6300, 3716, 6420, 6748, 3936, 6504, 6832, 7041, 4156, 6560, 6888, 7097, 7222, 4376, 4466, 4511, 4547, 4575, 4596, 1991, 3511, 6315, 3731, 6435, 6763, 3951, 6519, 6847, 7056, 4171, 6575, 6903, 7112, 7237, 4391, 1605, 1605, 1605, 1605, 1604, 1605, 4686, 4731, 4767, 4795, 4816, 1605, 2057, 2540, 4871, 2661, 4880, 4925, 2771, 4888, 4933, 4969, 2870, 4895, 4940, 4976, 5004, 2958, 4901, 4946, 4982, 5010, 5031, 3035, 4906, 4951, 4987, 5015, 5036, 1605, 3101, 2078, 2088, 2097, 2105, 2112, 2118, 2123, 102, 174, 2545, 2666, 186, 2546, 5086, 2667, 2776, 2777, 198, 2547, 5087, 2668, 5096, 5141, 2778, 2875, 2876, 2877, 210, 2548, 5088, 2669, 5097, 5142, 2779, 5105, 5150, 5186, 2878, 2963, 2964, 2965, 2966, 222, 2549, 5089, 2670, 5098, 5143, 2780, 5106, 5151, 5187, 2879, 5113, 5158, 5194, 5222, 2967, 3040, 3041, 3042, 3043, 3044, 234, 2550, 5090, 2671, 5099, 5144, 2781, 5107, 5152, 5188, 2880, 5114, 5159, 5195, 5223, 2968, 5120, 5165, 5201, 5229, 5250, 3045, 3106, 3107, 3108, 3109, 3110, 3111, 246, 2551, 5091, 2672, 5100, 5145, 2782, 5108, 5153, 5189, 2881, 5115, 5160, 5196, 5224, 2969, 5121, 5166, 5202, 5230, 5251, 3046, 5126, 5171, 5207, 5235, 5256, 1605, 3112, 3161, 3162, 3163, 3164, 3165, 3166, 3167, 258, 263, 2138, 264, 2139, 2149, 265, 2140, 2150, 2159, 266, 2141, 2151, 2160, 2168, 267, 2142, 2152, 2161, 2169, 2176, 268, 2143, 2153, 2162, 2170, 2177, 2183, 269, 2144, 2154, 2163, 2171, 2178, 2184, 2189, 270, 107, 108, 109, 110, 111, 112, 113, 114, 19, 175, 2475, 187, 31, 175, 2556, 2677, 187, 2486, 2787, 2607, 199, 199, 43, 175, 2556, 2677, 187, 2557, 5306, 2678, 2787, 2788, 199, 2497, 2886, 2618, 2886, 2887, 2728, 211, 211, 211, 55, 175, 2556, 2677, 187, 2557, 5306, 2678, 2787, 2788, 199, 2558, 5307, 2679, 5316, 5361, 2789, 2886, 2887, 2888, 211, 2508, 2974, 2629, 2974, 2975, 2739, 2974, 2975, 2976, 2838, 223, 223, 223, 223, 67, 175, 2556, 2677, 187, 2557, 5306, 2678, 2787, 2788, 199, 2558, 5307, 2679, 5316, 5361, 2789, 2886, 2887, 2888, 211, 2559, 5308, 2680, 5317, 5362, 2790, 5325, 5370, 5406, 2889, 2974, 2975, 2976, 2977, 223, 2519, 3051, 2640, 3051, 3052, 2750,
<reponame>aixiwang/iot_data_svr # -*- coding=gbk -*- #----------------------------------------------------------- # Copyright (c) 2015 by <NAME> <<EMAIL>> #----------------------------------------------------------- import random, time import os import sys import thread, threading, subprocess #import mosquitto import leveldb RPC_VER = '0.4' print 'RPC_VER:',RPC_VER #import memcacheq #q_rt_data = memcacheq.connect('q_rt_data') #------------------------------ # set_kv #------------------------------ def set_kv(db,k,v): db.Put(k,v) #------------------------------ # get_kv #------------------------------ def get_kv(db,k): #print 'k:',k try: v = db.Get(k) return v except: return '' #------------------------------ # rm_kv #------------------------------ def rm_kv(db,k): db.Delete(k) return #------------------------------ # get_search_keys_old #------------------------------ def get_search_keys_old(db,tag,name,t1,t2): if name != '*': prefix = tag + ':' + name else: prefix = tag + ':' datas = [] f_t1 = float(t1) f_t2 = float(t2) try: if name != '*': # to be optimized kv = list(db.RangeIter(key_from = None, key_to = None)) else: kv = list(db.RangeIter(key_from = None, key_to = None)) for record in kv: try: #print record if record[1][0:len(prefix)] == prefix: f_s = record[1].split(':') #print 'f_s:',f_s t = float(f_s[2]) if t > (f_t2): break if t >= f_t1 and t <= f_t2: #print 'append key:',record[1] datas.append(record[1]) except: pass return {'code' : 0,'data':datas} except Exception as e: print '@' return {'code' : -1,'data':'exception:' + str(e)} #------------------------------ # get_search_keys #------------------------------ def get_search_keys(db,tag,name,t1,t2): if name != '*': prefix = tag + ':' + name else: prefix = tag + ':' datas = [] f_t1 = float(t1) f_t2 = float(t2) k_t1 = int(f_t1)/60 k_t2 = int(f_t2)/60 try: for k in range(k_t1,k_t2 + 1): keys_value = get_kv(db,str(k)) #print 'keys_value:',keys_value if keys_value != '': keys_array = keys_value.split('#') for key_str in keys_array: try: #print 'key_str:',key_str if key_str[0:len(prefix)] == prefix: f_s = key_str.split(':') #print 'f_s:',f_s t = float(f_s[2]) if t > (f_t2): break if t >= f_t1 and t <= f_t2: #print 'append key:',record[1] datas.append(key_str) except Exception as e: print 'search keys exception:', str(e) pass #print 'search_keys:',datas return {'code' : 0,'data':datas} except Exception as e: print 'get_search_keys exception: ' + str(e) return {'code' : -1,'data':'exception:' + str(e)} #------------------------------ # delete_search_key #------------------------------ def delete_search_key(db,key): rm_kv(db,key) #------------------------------ # opendb #------------------------------ def opendb(dbname): db = leveldb.LevelDB(dbname) return db class RpcHandlerNew: def __init__(self,auth_key,opendb): self.rpc_auth_key = auth_key self.key = '' self.count = 0 self.opendb = opendb self.userdb = None pass def shutdown(self): try: print 'rpc_handler shutdown' self.db.Close() self.index_db.Close() self.stats_db.Close() self.index_stats_db.Close() except: print 'rpc_handler shutdown exception' pass #------------------------------ # handle_json #------------------------------ def handle_json(self, request): #print 'request:',request try: #if 1: key = request["key"] if self.userdb == None: self.user_db = self.opendb('./db/user.db') self.sensor_key = self.user_db.Get(key) #print 'sensor_key:',sensor_key if key != self.rpc_auth_key and self.sensor_key != 'sensor': print 'key:',key time.sleep(5) return {'code' : -1, 'data' : 'invalid key'} if key != self.rpc_auth_key: if key.find(':') > 0: key = key.split(':')[0] if self.key != key: self.key = key self.dbname = './db/' + key + '.db' self.index_dbname = './db/' + key + '.idx.db' self.stats_dbname = './db/' + key + '-stats' + '.db' self.index_stats_dbname = './db/' + key + '-stats' + '.idx.db' #print 'init level db start, key=',self.key self.db = self.opendb(self.dbname) self.index_db = self.opendb(self.index_dbname) self.stats_db = self.opendb(self.stats_dbname) self.index_stats_db = self.opendb(self.index_stats_dbname) #sensor_key = get_kv('./db/user.db',key) #print 'init level db end' cmd = request["cmd"] if (cmd.find('bsddb_') == 0): cmd = cmd[6:] if cmd == 'get_rpc_ver': return self.handle_get_rpc_ver(request) if cmd == 'echo': return self.handle_echo(request) if cmd == 'setfile': return self.handle_setfile(request) if cmd == 'getfile': return self.handle_getfile(request) elif cmd == 'set': return self.handle_set(request) elif cmd == 'get': return self.handle_get(request) elif cmd == 'delete': return self.handle_delete(request) elif cmd == 'set_ts_data': return self.handle_set_ts_data(request) elif cmd == 'get_ts_datas': return self.handle_get_ts_datas(request) elif cmd == 'get_ts_keys': return self.handle_get_ts_keys(request) elif cmd == 'set_stats_data': return self.handle_set_stats_data(request) elif cmd == 'delete_stats': return self.handle_delete_stats(request) elif cmd == 'get_stats_datas': return self.handle_get_stats_datas(request) elif cmd == 'get_stats_keys': return self.handle_get_stats_keys(request) elif cmd == 'mqtt_pub': return self.handle_mqtt_pub(request) else: return {'code' : -1, 'data' : 'unkonwn command'} except Exception as e: print 'rpc_handler exception:',str(e) return {'code' : -1,'data':str(e)} #------------------------------ # handle_get_rpc_ver #------------------------------ def handle_get_rpc_ver(self, request): return {'code' : 0, 'data' : RPC_VER} #------------------------------ # handle_echo #------------------------------ def handle_echo(self, request): data = request['in'] + '-echo' return {'code' : 0, 'data' : data} #------------------------------ # handle_setfile #------------------------------ def handle_setfile(self, request): d = './files/' + self.key + '/' if os.path.exists(d) == False: os.system('mkdir ' + d) fname = d + request['filename'] f_content = request['content'].decode('hex') f = open(fname,'wb') f.write(f_content) f.close() return {'code' : 0, 'data':None} #------------------------------ # handle_getfile #------------------------------ def handle_getfile(self, request): d = './files/' + self.key + '/' if os.path.exists(d) == False: os.system('mkdir ' + d) fname = d + request['filename'] f = open(fname,'rb') f_content = f.read().encode('hex') f.close() return {'code' : 0,'data': f_content} #------------------------------ # handle_set #------------------------------ def handle_set(self, request): k = request['k'].encode('utf8') v = request['v'].encode('utf8') set_kv(self.db,k,v) return {'code' : 0,'data':None} #------------------------------ # handle_get #------------------------------ def handle_get(self, request): k = request['k'].encode('utf8') v = get_kv(self.db,k) return {'code' : 0,'data':{'k':k,'v':v}} #------------------------------ # handle_delete #------------------------------ def handle_delete(self, request): k = request['k'].encode('utf8') rm_kv(self.db,k) return {'code' : 0,'data':None} #------------------------------ # handle_set_ts_data #------------------------------ def handle_set_ts_data(self, request): tag = request['tag'] name = request['name'] v = request['v'] if request.has_key('t'): t_str = request['t'] else: #self.count = self.count + 1 #if self.count > t_str = str(time.time()) #idx = t_str.find('.') #if idx > 0 and (len(s_str)-idx) == 3: # t_str += '0' k = str(tag) + ':' + str(name) + ':' + t_str s = name + '\n' + t_str + '\n' + v #if (tag == 'data'): # q_rt_data.add(s) set_kv(self.db,k,v) index_key = str(int(float(t_str))/60) index_value = get_kv(self.index_db,index_key) if index_value == '': index_value = k else: index_value += '#' + k #print 'save index_db:',index_key,index_value set_kv(self.index_db,index_key,index_value) set_kv(self.db,k,v) return {'code' : 0,'data':None} #------------------------------ # handle_get_ts_datas #------------------------------ def handle_get_ts_datas(self, request): t1 = float(request['t1']) t2 = float(request['t2']) tag = request['tag'] name = request['name'] keys = get_search_keys(self.index_db,tag,name,t1,t2) #print 'keys:',keys if keys['code'] != 0: return {'code':-1,'data':'search key error!'} datas =[] for k in keys['data']: #print 'k:',i,' v:',self.db[i] v = get_kv(self.db,k) rec = [k,v] datas.append(rec) return {'code':0,'data':datas} #------------------------------ # handle_get_ts_keys #------------------------------ def handle_get_ts_keys(self, request): t1 = float(request['t1']) t2 = float(request['t2']) tag = request['tag'] name = request['name'] keys = get_search_keys(self.index_db,tag,name,t1,t2) return keys #------------------------------ # handle_set_stats_data #------------------------------ def handle_set_stats_data(self, request): tag = request['tag'] name = request['name'] time = request['time'] v = request['v'] k = '%s:%s:%s' % (str(tag),str(name),str(time)) set_kv(self.stats_db,k,v) set_kv(self.index_stats_db,str(time),k) return {'code' : 0,'data':None} #------------------------------ # handle_delete_stats #------------------------------ def handle_delete_stats(self, request): k = str(request['k']) rm_kv(self.stats_db,k) return {'code' : 0,'data':None} #------------------------------ # handle_get_stats_datas #------------------------------ def handle_get_stats_datas(self, request): t1 = float(request['t1']) t2 = float(request['t2']) tag = request['tag'] name = request['name'] keys = get_search_keys(self.index_stats_db,tag,name,t1,t2) if keys['code'] != 0: return {'code':-1,'data':'search key error!'} datas =[] for k in keys['data']: #print 'k:',i,' v:',self.db[i] v = get_kv(self.stats_db,k) rec = [k,v] datas.append(rec) return {'code':0,'data':datas} #------------------------------ # handle_get_stats_keys #------------------------------ def handle_get_stats_keys(self, request): t1 = float(request['t1']) t2 = float(request['t2']) tag = request['tag'] name = request['name'] keys = get_search_keys(self.index_stats_db,tag,name,t1,t2) return keys #------------------- # handle_mqtt_pub #------------------- def handle_mqtt_pub(self,request): server_addr = request['server_addr'] server_port = request['server_port'] username = request['username'] password = request['password'] topic = request['topic'] message = request['message'] mqttc = mosquitto.Mosquitto("gateway-" + str(time.time())) mqttc.username_pw_set(username,password) mqttc.connect(server_addr, server_port, 60) result, mid = mqttc.publish(str(topic), str(message),0) if (result == mosquitto.MOSQ_ERR_SUCCESS): retcode = 0 else: retcode = -1 res = { "code" : retcode, "data" : None } return res #---------------------- # main #---------------------- if __name__ == "__main__": rpc = RpcHandlerNew('1234-5678',opendb) # echo json_in = { 'key':'1234-5678', 'cmd':'echo', 'in':'asdfasdfasdf', } json_out = rpc.handle_json(json_in) print json_out # setfile print '1.test setfile' json_in = { 'key':'1234-5678', 'cmd':'setfile', 'filename': './test.txt', 'content': '343434343434343434343434343434', } json_out = rpc.handle_json(json_in) print json_out # getfile print '2.test getfile' json_in = { 'key':'1234-5678', 'cmd':'getfile', 'filename': './test.txt', } json_out = rpc.handle_json(json_in) print json_out # test set print '3.test set' json_in = { 'key':'1234-5678', 'cmd':'set', 'k':'asdfasdfasdfasd', 'v':'adsfasdfasd==============asdfa', } json_out = rpc.handle_json(json_in) print json_out #------------------------------------------- # test get print '4.test get' json_in = { 'key':'1234-5678', 'cmd':'get', 'k':'asdfasdfasdfasd', } json_out = rpc.handle_json(json_in) print json_out #------------------------------------------- # test delete print '5.test delete' json_in = { 'key':'1234-5678', 'cmd':'delete', 'k':'asdfasdfasdfasd', } json_out =
date_all: index = line.index(data) first_line = line[0: index] sec_line = line[index + len(data):] # 计算时差,获取目标时间 data = re.sub(r"[个]?小时前", '', data) hours_delta = int(data) target_time = now - timedelta(hours=hours_delta) time_str = '“' + time_to_str(target_time) + "”" placeholder = random_str() placeholders_list[placeholder] = (time_str, [target_time]) line = first_line + placeholder + sec_line # 中文后 10-99 date_all = re.findall(r"([二三四五六七八九]?[十]?[零一二三四五六七八九十半][个]?小时后)", line) for data in date_all: index = line.index(data) first_line = line[0: index] sec_line = line[index + len(data):] # 计算时差,获取目标时间 data = re.sub(r"[个]?小时后", '', data) hour = data try: # 解析小时 if len(hour) > 1: if '十' not in hour: raise ValueError else: pre_hour = hour[0:hour.index('十')] post_hour = hour[hour.index('十') + 1:] # 10 if len(pre_hour) == 0 and len(post_hour) == 0: hour = 10 # 11 ~ 19 elif len(pre_hour) == 0 and len(post_hour) != 0: hour = 10 + int(time_map[post_hour]) # 20, 30 elif len(pre_hour) != 0 and len(post_hour) == 0: hour = int(time_map[pre_hour]) * 10 else: # 21 ~ 29 hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour]) else: hour = int(time_map[hour]) except ValueError: # 识别不出来就认为是错误,后续不再识别 time_str = data placeholder = random_str() placeholders_list[placeholder] = (time_str, [None]) line = first_line + placeholder + sec_line continue target_time = now + timedelta(hours=hour) time_str = '“' + time_to_str(target_time) + "”" placeholder = random_str() placeholders_list[placeholder] = (time_str, [target_time]) line = first_line + placeholder + sec_line # 数字后 date_all = re.findall(r"(\d{1,2}[个]?小时后)", line) for data in date_all: index = line.index(data) first_line = line[0: index] sec_line = line[index + len(data):] # 计算时差,获取目标时间 data = re.sub(r"[个]?小时后", '', data) hours_delta = int(data) target_time = now + timedelta(hours=hours_delta) time_str = '“' + time_to_str(target_time) + "”" placeholder = random_str() placeholders_list[placeholder] = (time_str, [target_time]) line = first_line + placeholder + sec_line # 半 前 date_all = re.findall(r"(半[个]?小时前)", line) for data in date_all: index = line.index(data) first_line = line[0: index] sec_line = line[index + len(data):] # 计算时差,获取目标时间 minute_delta = 30 target_time = now - timedelta(minutes=minute_delta) time_str = '“' + time_to_str(target_time) + "”" placeholder = random_str() placeholders_list[placeholder] = (time_str, [target_time]) line = first_line + placeholder + sec_line # 半 后 date_all = re.findall(r"(半[个]?小时后)", line) for data in date_all: index = line.index(data) first_line = line[0: index] sec_line = line[index + len(data):] # 计算时差,获取目标时间 minute_delta = 30 target_time = now + timedelta(minutes=minute_delta) time_str = '“' + time_to_str(target_time) + "”" placeholder = random_str() placeholders_list[placeholder] = (time_str, [target_time]) line = first_line + placeholder + sec_line return line def hour_absolute_comfirm(line, placeholders_list: dict): time_map = {'零': '0', '一': '1', '二': '2', '三': '3', '四': '4', '五': '5', '六': '6', '七': '7', '八': '8', '九': '9', '十': '10', '两': '2'} connect_char = ['-', '~', '到', '至'] now = date.today() # 数字 上午 date_all_morning_1 = re.findall(r"(上午\d{1,2}[点:])", line) date_all_morning_2 = re.findall(r"(早上\d{1,2}[点:])", line) for data in date_all_morning_1 + date_all_morning_2: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'(上午)', '', data) hour = re.sub(r'(早上)', '', hour) hour = re.sub(r'([点:])', '', hour) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day hour = int(hour) return line, year, month, day, hour # 数字 下午, 傍晚,晚上 date_all_afternoon = re.findall(r"(下午\d{1,2}[点:])", line) date_all_nightfall = re.findall(r"(傍晚\d{1,2}[点:])", line) date_all_night = re.findall(r"(晚上\d{1,2}[点:])", line) for data in date_all_afternoon + date_all_nightfall + date_all_night: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'(下午)', '', data) hour = re.sub(r'(傍晚)', '', hour) hour = re.sub(r'(晚上)', '', hour) hour = re.sub(r'([点:])', '', hour) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day # 确定小时 和 分钟 hour = int(hour) if hour <= 12: hour += 12 return line, year, month, day, hour # 数字 中午 date_all_1 = re.findall(r"(中午[01]?[123][点:])", line) date_all_2 = re.findall(r"(正午[01]?[123][点:])", line) for data in date_all_1 + date_all_2: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'(中午)', '', data) hour = re.sub(r'(正午)', '', hour) hour = re.sub(r'([点:])', '', hour) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day # 确定小时 和 分钟 hour = int(hour) return line, year, month, day, hour # 数字 凌晨 date_all_early = re.findall(r"(凌晨[0]?[123456][点:])", line) for data in date_all_early: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'(凌晨)', '', data) hour = re.sub(r'([点:])', '', hour) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day # 确定小时 和 分钟 hour = int(hour) return line, year, month, day, hour # 数字 date_all = re.findall(r"(\d{1,2}[点:])", line) for data in date_all: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'([点:])', '', data) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day # 确定小时 和 分钟 hour = int(hour) return line, year, month, day, hour # 中文 上午 date_all_morning_1 = re.findall(r"(上午[二]?[十]?[零两一二三四五六七八九十][点:])", line) date_all_morning_2 = re.findall(r"(早上[二]?[十]?[零两一二三四五六七八九十][点:])", line) for data in date_all_morning_1 + date_all_morning_2: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'(上午)', '', data) hour = re.sub(r'(早上)', '', hour) hour = re.sub(r'(分)', '', hour) hour = re.sub(r'(钟)', '', hour) hour = re.sub(r'([点:])', '', hour) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day # 解析小时 if len(hour) > 1: if '十' not in hour: hour = 25 else: pre_hour = hour[0:hour.index('十')] post_hour = hour[hour.index('十') + 1:] # 10 if len(pre_hour) == 0 and len(post_hour) == 0: hour = 10 # 11 ~ 19 elif len(pre_hour) == 0 and len(post_hour) != 0: hour = 10 + int(time_map[post_hour]) # 20, 30 elif len(pre_hour) != 0 and len(post_hour) == 0: hour = int(time_map[pre_hour]) * 10 else: # 21 ~ 29 hour = int(time_map[pre_hour]) * 10 + int(time_map[post_hour]) else: hour = int(time_map[hour]) return line, year, month, day, hour # 中文 下午 date_all_afternoon = re.findall(r"(下午[二]?[十]?[两一二三四五六七八九十][点:])", line) date_all_nightfall = re.findall(r"(傍晚[二]?[十]?[两一二三四五六七八九十][点:])", line) date_all_night = re.findall(r"(晚上[二]?[十]?[两一二三四五六七八九十][点:])", line) for data in date_all_afternoon + date_all_nightfall + date_all_night: index = line.rindex(data) if index + len(data) == len(line): line = line[:index] hour = re.sub(r'(下午)', '', data) hour = re.sub(r'(傍晚)', '', hour) hour = re.sub(r'(晚上)', '', hour) hour = re.sub(r'(分)', '', hour) hour = re.sub(r'(钟)', '', hour) hour = re.sub(r'([点:])', '', hour) # 确定年月日 line, year, month, day = day_relative_comfirm(line, placeholders_list) if year is None: line, year, month, day = day_absolute_comfirm(line, placeholders_list) year = now.year if year is None else year month = now.month if month is None else month day = now.day if day is None else day # 解析小时 if len(hour) > 1: if '十' not in hour: hour = 25 else: pre_hour = hour[0:hour.index('十')] post_hour = hour[hour.index('十') + 1:] # 10 if len(pre_hour) == 0 and len(post_hour) == 0: hour = 10 # 11 ~ 19 elif len(pre_hour) == 0 and len(post_hour) != 0:
#!/usr/bin/env python import numpy as np import inspect import functools from collections.abc import Iterable from abc import abstractmethod from dash.development.base_component import Component as DashComponentBase from dash.development.base_component import ComponentMeta as DashComponentMeta import dash_bootstrap_components as dbc from dataclasses import dataclass from .idtree import IdTree __all__ = [ 'Template', 'NullComponent', 'WrappedComponent', 'ComponentTemplate', 'DashComponentInfo' ] class Template(IdTree): """An abstract base class that encapsulate layout and callbacks into a reusable structure. The core of this class is the :meth:`child` method which creates and maintains a tree of instances whose nodes have unique ids assigned. Actual Dash components are typically not instantiated when template instances are created. Instead, the template instances hold the factories and the arguments, and they only get called when :attr:`layout` property is queried. This call will be propagated down to all the children of the calling instance, result in a tree of Dash component objects readily be consumed by the Dash ``app`` instance. Complex layout can be built in a declarative fashion with repeated call to the :meth:`child` factory function from any node in the tree. All the node have their unique id (the Dash component id) managed automatically and they can be accessed with the :attr:`id` property, which can then be consumed in callback definitions. Subclass of this base class shall implement method :meth:`setup_layout`, within which one can declare a layout using :meth:`child`, and define any callbacks for them components. Subclasses defined this way will act as re-usable templates that can be plugged anywhere in other trees of templates. Through this mechanism, very complex layout can be built in a fully modularized fashion. """ @property def id(self): return getattr(self, '_static_id', IdTree.id.fget(self)) # The id setter allows overwriting id manually for an object. @id.setter def id(self, value): self._static_id = value @property @abstractmethod def layout(self): """Implement this to return a valid Dash layout object.""" return NotImplemented def before_setup_layout(self, app): """Hook that run before the `setup_layout` function call.""" pass def after_setup_layout(self, app): """Hook that run after the `setup_layout` function call.""" pass @abstractmethod def setup_layout(self, app): """Implement this to declare layout components and their callbacks. This base implementation has to be called in the subclass implementation to ensure any child templates also get properly setup. This is particularly important for templates that contain templates in their descendants. The convention is to structure the implementation in the following way:: def setup_layout(self, app): child0 = self.child(some_dash_type, ...) child1 = child0.child(some_template_cls, ...) # This will trigger `setup_layout` call to all the children, # which may make available some attributes super().setup_layout(app) @app.callback(...) def some_callback(...): return """ self.before_setup_layout(app) for child in self.children: child.setup_layout(app) self.after_setup_layout(app) def child(self, factory, *args, **kwargs): """Return a child template instance. The actual type of child template is delegated to the appropriate subclass based on the type of `factory`: 1. `factory` is a `Template` instance. The instance is added as-is as the child of this object. `ValueError` is raised if `args` or `kwargs` are passed. 2. `factory` is a Dash component class, (e.g., `~dash_html_components.Div`). A `ComponentTemplate` object is created and returned. `args` and `kwargs` are passed to the constructor of the ComponentTemplate, which is passed down to the Dash component class when actual Dash component is created. 3. `factory` is a Dash component instance. The instance is wrapped in a `WrappedComponent` object and returned. `ValueError` is raised if `args` or `kwargs` are passed. `ValueError` is raised if `factory` does not conform to the cases listed above. """ def ensure_no_extra_args(): if args or kwargs: raise ValueError( f"child call arguments and keyword arguments shall not" f" be specified for factory {factory}") if isinstance(factory, Template): ensure_no_extra_args() factory.parent = self return factory if isinstance(factory, DashComponentMeta): # dash component class child_cls = _make_component_template_cls(factory) elif isinstance(factory, DashComponentBase): # dash component instance ensure_no_extra_args() args = (factory, ) child_cls = WrappedComponent else: raise ValueError( f"unable to create child template" f" from type {type(factory)}") return child_cls(*args, **kwargs, parent=self) def grid(self, nrows, ncols, squeeze=True): """Return a dash bootstrap component grid. .. note:: User is responsible to create a wrapping container for the rows and cols to behave as expected. Parameters ---------- nrows : int The number of rows. ncols : int The number of cols per row. squeeze : bool, optional If True, insignificant dimensions are removed from the returned array. """ # note we need to check the current type and see if it is container # if not, a container object needs to be created to make # the grid functional correctly, per the documentation of dbc. result = np.full((nrows, ncols), None, dtype=object) container = self # if hasattr(self, 'dash_component_info') and ( # self.dash_component_info.type is dbc.Container): # container = self # else: # container = self.child(dbc.Container, fluid=True) current_row = None for i in range(nrows): for j in range(ncols): if j == 0: current_row = container.child(dbc.Row) result[i, j] = current_row.child(dbc.Col) if squeeze: result = np.squeeze(result) return result def colgrid( self, nrows, ncols, squeeze=True, gx=None, gy=None, width_ratios=None): """Return a dash bootstrap component grid, with only cols. .. note:: User is responsible to create a wrapping container for the cols to behave as expected. A row is created to hold all the columns. Parameters ---------- nrows : int The number of rows. ncols : int The number of cols per row. squeeze : bool, optional If True, insignificant dimensions are removed from the returned array. gx : int, optional The horizontal gutter. gy : int, optional The vertical gutter. width_ratios : array-like of length ncols, optional The relative widths of the columns. Each column gets a relative width of width_ratios[i] / sum(width_ratios). If not given, all columns will have the same width. """ # make sure ncols are factors of 12 if ncols > 12 or 12 % ncols != 0: raise ValueError("ncols has to be integer division of 12.") result = np.full((nrows, ncols), None, dtype=object) if width_ratios is None: colwidth = np.full(result.shape, 12 // ncols) else: width_ratios = np.asanyarray(width_ratios) if width_ratios.shape != (ncols, ): raise ValueError( f"width_ratios shape {width_ratios.shape} " f"does not match ncols {ncols} ") # scale the width_ratios to make it sum to 12 # and round to int width_ratios = np.round( width_ratios * (12 / np.sum(width_ratios))).astype(int) # check they sum to 12 if np.sum(width_ratios) != 12: raise ValueError( f"unable to calculate widths for cols " f"with ratios {width_ratios}.") colwidth = np.tile(width_ratios, (nrows, 1)) container = self row_className = list() if gx is not None: row_className.append(f'gx-{gx}') if gy is not None: row_className.append(f'gy-{gy}') row = container.child(dbc.Row, className=' '.join(row_className)) for i in range(nrows): for j in range(ncols): result[i, j] = row.child(dbc.Col, width=colwidth[i, j]) if squeeze: result = np.squeeze(result) return result class NullComponent(Template): """A component template that does not represent an actual component. This is useful to serve as a root to build standalone layout that does not attached immediately to the main Dash app layout tree. """ def __init__(self, id, parent=None): self._node_id = id super().__init__(parent=parent) @property def idbase(self): self._node_id @property def id(self): return self._node_id @id.setter def id(self, id): self._node_id = id def setup_layout(self, app): super().setup_layout(app) @property def layout(self): return [c.layout for c in self.children] @dataclass class DashComponentInfo(object): """A class to hold Dash component class related info.""" type: type """The Dash component class.""" prop_names: list """The property names of the Dash component class.""" @staticmethod def _get_component_prop_names(component_cls): return inspect.getfullargspec(component_cls.__init__).args[1:] @classmethod def from_component_cls(cls, component_cls): """Return the Dash component info from `component_cls`.""" prop_names = cls._get_component_prop_names(component_cls) return cls(type=component_cls, prop_names=prop_names) @functools.lru_cache(maxsize=None) def _get_component_info(component_cls): return DashComponentInfo.from_component_cls(component_cls) _missing_defualt = object() """A value used to indicate a missing default value.""" def _get_class_meta_attr(cls, attr, default=_missing_defualt): """A helper function to get class Meta attribute. This is a convention we use for defining attributes that are consumed during class construction:: class A: class Meta: some_option = 1 This example defines a meta attribute ``some_option`` for class ``A``. """ Meta = getattr(cls, 'Meta', None) if Meta is None: if default is _missing_defualt: raise AttributeError(f"meta attribute {attr} not found.") return default if default is _missing_defualt: return getattr(Meta, attr) return getattr(Meta, attr, default) class ComponentFactoryMixin(object): """A mixin class that holds and processes info
the idea is we just need to abort the wrapped function call. <Side Effects> A new reference is created to every non-simple type of object. That is, everything except objects of type str, unicode, int, etc. <Returns> The deep copy of obj with circular/recursive references preserved. """ try: # If this is a top-level call to _copy, create a new objectmap for use # by recursive calls to _copy. if objectmap is None: objectmap = {} # If this is a circular reference, use the copy we already made. elif _saved_id(obj) in objectmap: return objectmap[_saved_id(obj)] # types.InstanceType is included because the user can provide an instance # of a class of their own in the list of callback args to settimer. if _is_in(type(obj), [str, unicode, int, long, float, complex, bool, frozenset, types.NoneType, types.FunctionType, types.LambdaType, types.MethodType, types.InstanceType]): return obj elif type(obj) is list: temp_list = [] # Need to save this in the objectmap before recursing because lists # might have circular references. objectmap[_saved_id(obj)] = temp_list for item in obj: temp_list.append(self._copy(item, objectmap)) return temp_list elif type(obj) is tuple: temp_list = [] for item in obj: temp_list.append(self._copy(item, objectmap)) # I'm not 100% confident on my reasoning here, so feel free to point # out where I'm wrong: There's no way for a tuple to directly contain # a circular reference to itself. Instead, it has to contain, for # example, a dict which has the same tuple as a value. In that # situation, we can avoid infinite recursion and properly maintain # circular references in our copies by checking the objectmap right # after we do the copy of each item in the tuple. The existence of the # dictionary would keep the recursion from being infinite because those # are properly handled. That just leaves making sure we end up with # only one copy of the tuple. We do that here by checking to see if we # just made a copy as a result of copying the items above. If so, we # return the one that's already been made. if _saved_id(obj) in objectmap: return objectmap[_saved_id(obj)] retval = tuple(temp_list) objectmap[_saved_id(obj)] = retval return retval elif type(obj) is set: temp_list = [] # We can't just store this list object in the objectmap because it isn't # a set yet. If it's possible to have a set contain a reference to # itself, this could result in infinite recursion. However, sets can # only contain hashable items so I believe this can't happen. for item in obj: temp_list.append(self._copy(item, objectmap)) retval = set(temp_list) objectmap[_saved_id(obj)] = retval return retval elif type(obj) is dict: temp_dict = {} # Need to save this in the objectmap before recursing because dicts # might have circular references. objectmap[_saved_id(obj)] = temp_dict for key, value in obj.items(): temp_key = self._copy(key, objectmap) temp_dict[temp_key] = self._copy(value, objectmap) return temp_dict # We don't copy certain objects. This is because copying an emulated file # object, for example, will cause the destructor of the original one to # be invoked, which will close the actual underlying file. As the object # is wrapped and the client does not have access to it, it's safe to not # wrap it. elif isinstance(obj, (NamespaceObjectWrapper, emulfile.emulated_file, emulcomm.emulated_socket, thread.LockType, virtual_namespace.VirtualNamespace)): return obj else: raise TypeError("_copy is not implemented for objects of type " + str(type(obj))) except Exception, e: self._handle_violation("_copy failed on " + str(obj) + " with message " + str(e)) def _check_arguments(self, *args, **kwargs): """ <Purpose> Check the arguments against the arg_checking_func provided to the constructor. <Arguments> self *args **kwargs The arguments that will ultimately be passed to the wrapped function. <Exceptions> TypeError If the arguments aren't valid. That is, if they fail the arg checking. <Side Effects> None <Returns> None """ try: self.__arg_checking_func(*args, **kwargs) except NamespaceRequirementError, e: if type(self.__target_func) is str: name = self.__target_func else: name = self.__target_func.__name__ raise TypeError("Function '" + name + "' called with incorrect arguments. " + str(e) + " Arguments were args:" + str(args) + ", kwargs:" + str(kwargs)) # We catch a TypeError as some of the argument checking functions don't # accept variable args, so python will raise a TypeError if the correct # number of args/kwargs hasn't been passed. We don't show the exception # string in this case as it will name the argument checking function, # which is bound to confuse the user. Of course, confusion will result # if we have a bug in our code that is raising a TypeError. except TypeError: if type(self.__target_func) is str: name = self.__target_func else: name = self.__target_func.__name__ raise TypeError("Function '" + name + "' called with incorrect arguments. " + " Arguments were args:" + str(args) + ", kwargs:" + str(kwargs)) def _check_return_value(self, retval): """ <Purpose> Check the return value against the return_checking_func provided to the constructor. <Arguments> self retval The return value that will ultimately be returned to the calling code if it is acceptable. <Exceptions> NamespaceViolationError If the return value isn't acceptable. <Side Effects> None <Returns> None """ try: self.__return_checking_func(retval) except NamespaceRequirementError, e: if type(self.__target_func) is str: name = self.__target_func else: name = self.__target_func.__name__ self._handle_violation("Function '" + name + "' returned with unallowed return type " + str(type(retval)) + " : " + str(e)) def _check_raised_exception(self, raised_exception): """ <Purpose> Check a raised exceptin against the exception_checking_func provided to the constructor. <Arguments> self raised_exception The exception that will ultimately be raised to the calling code if it is acceptable. <Exceptions> NamespaceViolationError If the exception isn't allowed. <Side Effects> None <Returns> None """ try: self.__exception_checking_func(raised_exception) except NamespaceRequirementError: # We include the exception message because it might be a huge pain to # debug an error in our code without this. # TODO: this will lose the traceback info of the original exception. self._handle_violation("Exception of type " + str(type(raised_exception)) + "is not an allowed exception type. " + "Exception message was: " + str(raised_exception)) def __init__(self, func_dict): """ <Purpose> Constructor. <Arguments> self func_dict A dictionary whose with the following keys whose values are the corresponding funcion: target_func (required) -- a function or a string of the name of the method on the underlying object. arg_checking_func (required) return_checking_func (required) exception_checking_func (optional) arg_wrapping_func (optional) arg_unwrapping_func (optional) return_wrapping_func (optional) <Exceptions> None <Side Effects> None <Returns> None """ # Required in func_dict. self.__target_func = func_dict["target_func"] self.__arg_checking_func = func_dict["arg_checking_func"] self.__return_checking_func = func_dict["return_checking_func"] # Optional in func_dict. self.__exception_checking_func = func_dict.get("exception_checking_func", allow_all) self.__arg_wrapping_func = func_dict.get("arg_wrapping_func", None) self.__arg_unwrapping_func = func_dict.get("arg_unwrapping_func", None) self.__return_wrapping_func = func_dict.get("return_wrapping_func", None) # Make sure that the __target_func really is a function or a string # indicating a function by that name on the underlying object should # be called. if not _saved_callable(self.__target_func) and type(self.__target_func) is not str: raise TypeError("The target_func was neither callable nor a string when " + "constructing a namespace-wrapped function. The object " + "used for target_func was: " + repr(self.__target_func)) def wrapped_function(self, *args, **kwargs): """ <Purpose> Act as the function that is wrapped but perform all required sanitization and checking of data that goes into and comes out of the underlying function. <Arguments> self *args **kwargs The arguments to the underlying function. <Exceptions> NamespaceViolationError If some aspect of the arguments or function call is not allowed. Anything else that the underlying function may raise. <Side Effects> Anything that the underyling function may do. <Returns> Anything that the underlying function may return. """ # Copy first, then check. args = self._copy(args) kwargs = self._copy(kwargs) self._check_arguments(*args, **kwargs) if self.__arg_wrapping_func is not None: args, kwargs = self.__arg_wrapping_func(*args, **kwargs) if self.__arg_unwrapping_func is not None: args, kwargs = self.__arg_unwrapping_func(*args, **kwargs) try: # If it's a string rather than a function, then this is our convention # for indicating that we
infer in ["BayesInference","ASIRInference"]: InferenceObject.drawposteriorexamples ( ax=ax_plot ) plotThres ( InferenceObject, ax=ax_plot ) plotPMF ( InferenceObject, ax=ax_plot, showdesc=True ) if infer in ["BayesInference","ASIRInference"]: distname = "posterior" observed = -2*N.log ( InferenceObject.nullevidence ) good = plotppScatter ( InferenceObject.ppdeviance, InferenceObject.mcdeviance, "deviance", "D", ax_deviance ) elif infer == "BootstrapInference": distname = "bootstrap" observed = InferenceObject.deviance good = plotHistogram ( InferenceObject.mcdeviance, observed, "bootstrap deviance", "D", ax_deviance ) if warn and not good: ax_deviance.text ( N.array(ax_deviance.get_xlim()).mean(), N.array(ax_deviance.get_ylim()).mean(), "The fitted model is a bad\ndescription of the data!", horizontalalignment="center", verticalalignment="center", rotation=45, **(rc.warning+rc.alltext) ) # The other two plots are in a loop: Rpd, Rkd ax = [ax_rpd,ax_rkd] axh = [ax_rpdh,ax_rkdh] index = ["p","k"] warningtext = ["Simulated Rpd differs from observed!\nTry other sigmoid?", "Simulated Rkd differs from observed!\nData are nonstationary!"] for k in xrange ( 2 ): plotRd ( InferenceObject, ax[k], index[k] ) name = "R%sd" % (index[k],) if infer in ["BayesInference","ASIRInference"]: good = plotppScatter ( eval("InferenceObject.pp%s" % (name,)), eval("InferenceObject.mc%s"%(name,)), name,name, axh[k] ) else: good = plotHistogram ( eval("InferenceObject.mc%s" % (name,)), eval("InferenceObject.%s"%(name,)), "bootstrap "+name, name, axh[k] ) if warn and not good: axh[k].text ( p.array(axh[k].get_xlim()).mean(), p.array(axh[k].get_ylim()).mean() , warningtext[k], \ horizontalalignment="center", verticalalignment="center", rotation=45, **(rc.warning+rc.alltext) ) def plotGeweke ( BayesInferenceObject, parameter=0, ax=None, warn=True ): """Geweke plot of moving average of samples :Parameters: *BayesInferenceObject* : a BayesInference object that contains all the infromation about the sampling process *parameter* : index of the model parameter of interest *ax* : the pylab.axes object where the plot should go *warn* : should a warning message be displayed if non stationarity of the samples is observed? """ if BayesInferenceObject.mcestimates is None: raise ValueError, "Geweke MCMC convergence diagnostic requires monte carlo samples. Try to call the sample() method of your inference object." stationary,z,bad = BayesInferenceObject.geweke ( parameter ) if ax is None: ax = prepare_axes (p.axes()) x = N.arange (z[:,0].shape[0])+1 for k in xrange ( z.shape[-1] ): ax.plot ( x, z[:,k], 'o-' ) ax.plot ( ax.get_xlim(), [-2]*2, 'k:' ) ax.plot ( ax.get_xlim(), [ 2]*2, 'k:' ) if warn and not stationary: nsegments = z.shape[0] ax.text(0.5*nsegments,0,"chains did not converge", rotation=45, verticalalignment="center", horizontalalignment="center", **(rc.warning+rc.alltext) ) ax.set_yticks ( N.array( (-3,-2,-1,0,1,2,3) ) ) ax.set_xticks ( x ) ax.set_ylim ( -3,3 ) ax.set_xlim ( 0.5, z[:,k].shape[0]+.5 ) ax.set_xlabel ( "chain segment", **(rc.label+rc.alltext) ) ax.set_ylabel ( "z-score", **(rc.label+rc.alltext) ) return ax def plotChains ( BayesInferenceObject, parameter=0, ax=None, raw=False, warn=True ): """Simply plot all chains for a single parameter :Parameters: *parameter* : index of the model parameter to plot *raw* : plot raw samples instead of thinned samples after burnin *ax* : axes in which to print *warn* : if True, warnings are written into the plot """ if BayesInferenceObject.mcestimates is None: raise ValueError, "Plotting MCMC chains requires monte carlo samples. Try to call the sample() method of your inference object." # Do we have an appropriate axis? if ax==None: ax = prepare_axes ( p.axes() ) # Plot the chains for c in xrange(BayesInferenceObject.nchains): samples = BayesInferenceObject.getsamples ( c, raw=raw ) ax.plot ( samples[:,parameter] ) # Learn something about the axes xtics = N.array(ax.get_xticks()) x0 = xtics.min() xr = xtics.max()-xtics.min() ytics = ax.get_yticks() y0 = ytics.min() yr = N.array(ytics.max()-ytics.min()) if BayesInferenceObject.nchains>2: ax.text(x0+0.6*xr,y0+0.95*yr,r"$\hat{R} = %.4f$" % (BayesInferenceObject.Rhat ( parameter ) ) ) if warn and BayesInferenceObject.Rhat(parameter)>1.1: ax.text(x0+0.5*xr,y0+0.5*yr,"Chains do not seem to sample\nfrom the same distribution!", horizontalalignment="center",verticalalignment="center",rotation=45,**(rc.warning+rc.alltext)) ax.set_xlabel ( "sample #", **(rc.label+rc.alltext) ) parname = BayesInferenceObject.parnames[parameter] if parname in ["alpha","beta","gamma","lambda"]: parname = r"$\%s$" % (parname,) ax.set_ylabel ( parname, **(rc.label+rc.alltext) ) return ax def plotParameterDist ( InferenceObject, parameter=0, ax=None ): """Plot the distribution of parameters :Parameters: *InferenceObject* : either a BootstrapInference object or a BayesInference object containing the samples of the parameter distribtution *parameter* : index of the model parameter of interest *ax* : pylab.axes object where the plot should go """ if InferenceObject.mcestimates is None: raise ValueError, "Plotting distribution of parameters requires monte carlo samples. Try to call the sample() method of your inference object." if ax is None: ax = prepare_axes ( p.axes() ) samples = InferenceObject.mcestimates[:,parameter] h,b,ptch = ax.hist ( samples, bins=20, normed=True, histtype="step", lw=2 ) if InferenceObject.__repr__().split()[1] in ["BayesInference","ASIRInference"]: priorstr = InferenceObject.model["priors"] if not priorstr is None: priorstr = priorstr[parameter] m = re.search ( r"(\w+)\((-?\d*\.?\d*[eE]?-?\d*),(-?\d*\.?\d*[eE]?-?\d*)\)", priorstr ) if not m is None: dist,prm1,prm2 = m.groups() prm1,prm2 = float(prm1),float(prm2) x = N.mgrid[b.min():b.max():100j] if dist.lower () == "gauss": ax.plot(x,stats.norm.pdf(x,prm1,prm2)) elif dist.lower () == "beta": ax.plot(x,stats.beta.pdf(x,prm1,prm2)) elif dist.lower () == "gamma": ax.plot(x,stats.gamma.pdf(x,prm1,scale=prm2)) elif dist.lower () == "ngamma": ax.plot(x,stats.gamma.pdf(-x,prm1,scale=prm2)) elif dist.lower () == "uniform": ax.plot(x,stats.uniform.pdf(x,prm1,prm2)) elif dist.lower () == "invgamma": ax.plot(x,stats.invgamma.pdf(x,prm1,scale=prm2)) # Highlight estimate and credibility intervals prm = InferenceObject.estimate[parameter] c25,c975 = p.prctile ( samples, (2.5,97.5) ) ym = ax.get_ylim() ax.plot ( [c25]*2,ym,'b:', [c975]*2,ym,'b:' ) ax.plot ( [prm]*2,ym,'b' ) prname = InferenceObject.parnames[parameter] if prname in ["alpha","beta","gamma","lambda"]: prname = r"\%s" % (prname,) message = r"$\hat{%s}"%(prname,) message += r"$=%.3f, CI(95)=(%.3f,%.3f)" % ( prm,c25,c975 ) ax.set_title ( message, **(rc.text+rc.alltext) ) ax.set_xlabel ( InferenceObject.parnames[parameter], **(rc.label+rc.alltext) ) ax.set_ylabel ( "density estimate", **(rc.label+rc.alltext) ) def plotThresholdDist ( InferenceObject, cut=0, ax=None ): """Plot the distribution of thresholds :Parameters: *InferenceObjecxt* : a BootstrapInference or BayesInference object containing the desired data *cut* : index (!) of the desired cut *ax* : axes object to place the plot in. """ if InferenceObject.mcestimates is None: raise ValueError, "Plotting distributions of thresholds requires monte carlo samples. Try to call the sample() method of your inference object." if ax is None: ax = prepare_axes ( p.axes() ) # Plot histogram mcthres = InferenceObject.mcthres[:,cut] h,b,ptch = ax.hist ( mcthres, bins=20, normed=True, histtype="step", lw=2 ) # Highlight estimate and credibility intervals thres = InferenceObject.getThres ( InferenceObject.cuts[cut] ) c25,c975 = InferenceObject.getCI ( cut=cut, conf=(0.025,0.975) ) yl = ax.get_ylim () ax.plot( [c25]*2,yl,'b:', [c975]*2,yl,'b:' ) ax.plot ( [thres]*2, yl, 'b' ) ax.set_title ( r"F$^{-1}$(%.2f)=%.3f, CI(95%%)=(%.3f,%.3f)" % (InferenceObject.cuts[cut], thres, c25, c975 ), horizontalalignment="center", verticalalignment="bottom", **(rc.text+rc.alltext) ) ax.set_xlabel ( r"F$^{-1}$(%.2f)" % ( InferenceObject.cuts[cut], ), **(rc.label+rc.alltext) ) ax.set_ylabel ( "density estimate", **(rc.label+rc.alltext) ) ax.set_ylim ( yl ) return ax def ThresholdPlot ( InferenceObject ): """Show distributions and estimates for all thresholds :Parameters: *InferenceObject* a BootstrapInference or BayesInference object containing the desired data """ if InferenceObject.mcestimates is None: raise ValueError, "Plotting distributions of thresholds requires monte carlo samples. Try to call the sample() method of your inference object." nthres = len(InferenceObject.cuts) axw = 1./nthres fig = p.figure ( figsize=(3*nthres,3) ) allax = axes_array_h ( fig, nthres, (.8/nthres-.08/(nthres-1),.7), (.1,.2), dist=.1 ) for k,ax in enumerate ( allax ): # ax = p.subplot ( 1,nthres,k+1 ) ax = plotThresholdDist ( InferenceObject, k, prepare_axes ( ax ) ) return allax def ParameterPlot ( InferenceObject ): """Show distributions and estimates for all parameters in the model :Parameters: *InferenceObject* : a BootstrapInference or BayesInference object containing the desired data """ if InferenceObject.mcestimates is None: raise ValueError, "Plotting distributions of parameters requires monte carlo samples. Try to call the sample() method of your inference object." nparams = len(InferenceObject.parnames) axw = 1./nparams fig = p.figure (figsize=(3*nparams,3)) allax = axes_array_h ( fig, nparams, (.8/nparams-.08/(nparams-1),.65), (.1,.2), dist=.1 ) for k,ax in enumerate ( allax ): # ax = p.subplot ( 1, nparams, k+1 ) plotParameterDist ( InferenceObject, k, ax ) return allax def ConvergenceMCMC ( BayesInferenceObject, parameter=0, warn=True ): """Diagram to check convergence of MCMC chains for a single parameter :Parameters: *BayesInferenceObject* : a BayesInference object containing all information about the model and the posterior distribution *parameter* : model parameter of interest. So far, no model derived parameters such as thresholds are supported *warn* : should warnings be displayed if the samples look suspicious? """ if BayesInferenceObject.mcestimates is None: raise ValueError, "MCMC convergence diagnostics require monte carlo samples. Try to call the sample() method of your inference object." fig = p.figure ( figsize=[9,3] ) ax_chains,ax_geweke,ax_prm = axes_array_h ( fig, 3, (.2,.65),(.1,.2), dist=.1 ) plotChains ( BayesInferenceObject, parameter, ax_chains, warn=warn ) plotGeweke ( BayesInferenceObject, parameter, ax_geweke, warn=warn ) plotParameterDist ( BayesInferenceObject, parameter, ax_prm ) def plotSensitivity ( BootstrapInferenceObject, ax=None ): """Visualize a sensitivity analysis to determine expanded bootstrap confidence intervals Sensitivity analysis is used for BootstrapInference objects
= median(onchip_energy_eff_ug_r_list_u6_wspm + onchip_energy_eff_ug_r_list_u7_wspm + onchip_energy_eff_ug_r_list_u8_wspm) onchip_energy_eff_ug_r_list_ux_wspm_max = max(onchip_energy_eff_ug_r_list_u6_wspm + onchip_energy_eff_ug_r_list_u7_wspm + onchip_energy_eff_ug_r_list_u8_wspm) print("unary 256c (baseline) :", onchip_energy_eff_list_ug_wspm) print("unary 32c :", onchip_energy_eff_ug_r_list_u6_wspm) print("unary 64c :", onchip_energy_eff_ug_r_list_u7_wspm) print("unary 128c :", onchip_energy_eff_ug_r_list_u8_wspm) print("min improve:", onchip_energy_eff_ug_r_list_ux_wspm_min*100, "%") print("mean improve:", onchip_energy_eff_ug_r_list_ux_wspm_mean*100, "%") print("median improve:", onchip_energy_eff_ug_r_list_ux_wspm_median*100, "%") print("max improve:", onchip_energy_eff_ug_r_list_ux_wspm_max*100, "%") print("Total energy efficiency improve: ") total_energy_eff_bp_r_list_ux_wspm_min = min(total_energy_eff_bp_r_list_u6_wspm + total_energy_eff_bp_r_list_u7_wspm + total_energy_eff_bp_r_list_u8_wspm) total_energy_eff_bp_r_list_ux_wspm_mean = mean(total_energy_eff_bp_r_list_u6_wspm + total_energy_eff_bp_r_list_u7_wspm + total_energy_eff_bp_r_list_u8_wspm) total_energy_eff_bp_r_list_ux_wspm_median = median(total_energy_eff_bp_r_list_u6_wspm + total_energy_eff_bp_r_list_u7_wspm + total_energy_eff_bp_r_list_u8_wspm) total_energy_eff_bp_r_list_ux_wspm_max = max(total_energy_eff_bp_r_list_u6_wspm + total_energy_eff_bp_r_list_u7_wspm + total_energy_eff_bp_r_list_u8_wspm) print("binary parallel (baseline):", total_energy_eff_list_bp_spm) print("binary serial :", total_energy_eff_bp_r_list_bs_spm) print("unary 32c :", total_energy_eff_bp_r_list_u6_wspm) print("unary 64c :", total_energy_eff_bp_r_list_u7_wspm) print("unary 128c :", total_energy_eff_bp_r_list_u8_wspm) print("ugemm 256c :", total_energy_eff_bp_r_list_ug_wspm) print("min improve:", total_energy_eff_bp_r_list_ux_wspm_min*100, "%") print("mean improve:", total_energy_eff_bp_r_list_ux_wspm_mean*100, "%") print("median improve:", total_energy_eff_bp_r_list_ux_wspm_median*100, "%") print("max improve:", total_energy_eff_bp_r_list_ux_wspm_max*100, "%") total_energy_eff_bs_r_list_ux_wspm_min = min(total_energy_eff_bs_r_list_u6_wspm + total_energy_eff_bs_r_list_u7_wspm + total_energy_eff_bs_r_list_u8_wspm) total_energy_eff_bs_r_list_ux_wspm_mean = mean(total_energy_eff_bs_r_list_u6_wspm + total_energy_eff_bs_r_list_u7_wspm + total_energy_eff_bs_r_list_u8_wspm) total_energy_eff_bs_r_list_ux_wspm_median = median(total_energy_eff_bs_r_list_u6_wspm + total_energy_eff_bs_r_list_u7_wspm + total_energy_eff_bs_r_list_u8_wspm) total_energy_eff_bs_r_list_ux_wspm_max = max(total_energy_eff_bs_r_list_u6_wspm + total_energy_eff_bs_r_list_u7_wspm + total_energy_eff_bs_r_list_u8_wspm) print("binary serial (baseline) :", total_energy_eff_list_bs_spm) print("unary 32c :", total_energy_eff_bs_r_list_u6_wspm) print("unary 64c :", total_energy_eff_bs_r_list_u7_wspm) print("unary 128c :", total_energy_eff_bs_r_list_u8_wspm) print("ugemm 256c :", total_energy_eff_bs_r_list_ug_wspm) print("min improve:", total_energy_eff_bs_r_list_ux_wspm_min*100, "%") print("mean improve:", total_energy_eff_bs_r_list_ux_wspm_mean*100, "%") print("median improve:", total_energy_eff_bs_r_list_ux_wspm_median*100, "%") print("max improve:", total_energy_eff_bs_r_list_ux_wspm_max*100, "%") total_energy_eff_ug_r_list_ux_wspm_min = min(total_energy_eff_ug_r_list_u6_wspm + total_energy_eff_ug_r_list_u7_wspm + total_energy_eff_ug_r_list_u8_wspm) total_energy_eff_ug_r_list_ux_wspm_mean = mean(total_energy_eff_ug_r_list_u6_wspm + total_energy_eff_ug_r_list_u7_wspm + total_energy_eff_ug_r_list_u8_wspm) total_energy_eff_ug_r_list_ux_wspm_median = median(total_energy_eff_ug_r_list_u6_wspm + total_energy_eff_ug_r_list_u7_wspm + total_energy_eff_ug_r_list_u8_wspm) total_energy_eff_ug_r_list_ux_wspm_max = max(total_energy_eff_ug_r_list_u6_wspm + total_energy_eff_ug_r_list_u7_wspm + total_energy_eff_ug_r_list_u8_wspm) print("ugemm 256c (baseline) :", total_energy_eff_list_ug_wspm) print("unary 32c :", total_energy_eff_ug_r_list_u6_wspm) print("unary 64c :", total_energy_eff_ug_r_list_u7_wspm) print("unary 128c :", total_energy_eff_ug_r_list_u8_wspm) print("min improve:", total_energy_eff_ug_r_list_ux_wspm_min*100, "%") print("mean improve:", total_energy_eff_ug_r_list_ux_wspm_mean*100, "%") print("median improve:", total_energy_eff_ug_r_list_ux_wspm_median*100, "%") print("max improve:", total_energy_eff_ug_r_list_ux_wspm_max*100, "%") print("__________________________________________________________________________________________________") print("binary parallel | on-chip | ", onchip_energy_eff_bp_r_list_ux_wspm_min, onchip_energy_eff_bp_r_list_ux_wspm_mean, onchip_energy_eff_bp_r_list_ux_wspm_max) print(" __________________________________________________________________________________") print(" | total | ", total_energy_eff_bp_r_list_ux_wspm_min, total_energy_eff_bp_r_list_ux_wspm_mean, total_energy_eff_bp_r_list_ux_wspm_max) print("__________________________________________________________________________________________________") print("binary serial | on-chip | ", onchip_energy_eff_bs_r_list_ux_wspm_min, onchip_energy_eff_bs_r_list_ux_wspm_mean, onchip_energy_eff_bs_r_list_ux_wspm_max) print(" __________________________________________________________________________________") print(" | total | ", total_energy_eff_bs_r_list_ux_wspm_min, total_energy_eff_bs_r_list_ux_wspm_mean, total_energy_eff_bs_r_list_ux_wspm_max) print("__________________________________________________________________________________________________") print("ugemm 256c | on-chip | ", onchip_energy_eff_ug_r_list_ux_wspm_min, onchip_energy_eff_ug_r_list_ux_wspm_mean, onchip_energy_eff_ug_r_list_ux_wspm_max) print(" __________________________________________________________________________________") print(" | total | ", total_energy_eff_ug_r_list_ux_wspm_min, total_energy_eff_ug_r_list_ux_wspm_mean, total_energy_eff_ug_r_list_ux_wspm_max) print("__________________________________________________________________________________________________") print("Energy total fig saved!\n") # total power with dram my_dpi = 300 if a == "eyeriss": fig_h = 1 else: fig_h = 1 fig_w = 3.3115 bp_color = "#7A81FF" bs_color = "#FF7F7F" u6_color = "#666666" u7_color = "#888888" u8_color = "#AAAAAA" ug_color = "#CCCCCC" bg_color = "#D783FF" x_axis = ["Average"] fig, ax = plt.subplots(figsize=(fig_w, fig_h)) idx_tot = 6 x_idx = np.arange(len(x_axis)) width = 1 / 2**(math.ceil(math.log2(idx_tot))) iterval = width l_alpha = 0.8 # 8b - spm - bp index = 0 dram_d_list = power_list[index * 5 + 0][-1:] sram_d_list = power_list[index * 5 + 1][-1:] sram_l_list = power_list[index * 5 + 2][-1:] sarr_d_list = power_list[index * 5 + 3][-1:] sarr_l_list = power_list[index * 5 + 4][-1:] total_power_list_bp_spm = [] onchip_power_list_bp_spm = [] for i in range(len(x_axis)): total_power_list_bp_spm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) onchip_power_list_bp_spm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) idx = 1.5 ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_power_list_bp_spm, width, hatch = None, alpha=0.99, color=bp_color, label='Binary Parallel') # 8b - spm - bs index = 1 dram_d_list = power_list[index * 5 + 0][-1:] sram_d_list = power_list[index * 5 + 1][-1:] sram_l_list = power_list[index * 5 + 2][-1:] sarr_d_list = power_list[index * 5 + 3][-1:] sarr_l_list = power_list[index * 5 + 4][-1:] total_power_list_bs_spm = [] onchip_power_list_bs_spm = [] for i in range(len(x_axis)): total_power_list_bs_spm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) onchip_power_list_bs_spm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) idx += 1 ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_power_list_bs_spm, width, hatch = None, alpha=0.99, color=bs_color, label='Binary Serial') onchip_power_r_list_bs_spm = [] for i in range(len(x_axis)): onchip_power_r_list_bs_spm.append(1-onchip_power_list_bs_spm[i]/onchip_power_list_bp_spm[i]) total_power_r_list_bs_spm = [] for i in range(len(x_axis)): total_power_r_list_bs_spm.append(1 - total_power_list_bs_spm[i]/total_power_list_bp_spm[i]) # 8b - wospm - ur - 32c index = 2 dram_d_list = power_list[index * 5 + 0][-1:] sram_d_list = power_list[index * 5 + 1][-1:] sram_l_list = power_list[index * 5 + 2][-1:] sarr_d_list = power_list[index * 5 + 3][-1:] sarr_l_list = power_list[index * 5 + 4][-1:] total_power_list_u6_wspm = [] onchip_power_list_u6_wspm = [] for i in range(len(x_axis)): total_power_list_u6_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) onchip_power_list_u6_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) idx += 1 ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_power_list_u6_wspm, width, hatch = None, alpha=0.99, color=u6_color, label='Unary-32c') onchip_power_r_list_u6_wspm = [] for i in range(len(x_axis)): onchip_power_r_list_u6_wspm.append(1-onchip_power_list_u6_wspm[i]/onchip_power_list_bp_spm[i]) total_power_r_list_u6_wspm = [] for i in range(len(x_axis)): total_power_r_list_u6_wspm.append(1 - total_power_list_u6_wspm[i]/total_power_list_bp_spm[i]) # 8b - wospm - ur - 64c index = 3 dram_d_list = power_list[index * 5 + 0][-1:] sram_d_list = power_list[index * 5 + 1][-1:] sram_l_list = power_list[index * 5 + 2][-1:] sarr_d_list = power_list[index * 5 + 3][-1:] sarr_l_list = power_list[index * 5 + 4][-1:] total_power_list_u7_wspm = [] onchip_power_list_u7_wspm = [] for i in range(len(x_axis)): total_power_list_u7_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) onchip_power_list_u7_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) idx += 1 ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_power_list_u7_wspm, width, hatch = None, alpha=0.99, color=u7_color, label='Unary-64c') onchip_power_r_list_u7_wspm = [] for i in range(len(x_axis)): onchip_power_r_list_u7_wspm.append(1-onchip_power_list_u7_wspm[i]/onchip_power_list_bp_spm[i]) total_power_r_list_u7_wspm = [] for i in range(len(x_axis)): total_power_r_list_u7_wspm.append(1 - total_power_list_u7_wspm[i]/total_power_list_bp_spm[i]) # 8b - wospm - ur - 128c index = 4 dram_d_list = power_list[index * 5 + 0][-1:] sram_d_list = power_list[index * 5 + 1][-1:] sram_l_list = power_list[index * 5 + 2][-1:] sarr_d_list = power_list[index * 5 + 3][-1:] sarr_l_list = power_list[index * 5 + 4][-1:] total_power_list_u8_wspm = [] onchip_power_list_u8_wspm = [] for i in range(len(x_axis)): total_power_list_u8_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) onchip_power_list_u8_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) idx += 1 ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_power_list_u8_wspm, width, hatch = None, alpha=0.99, color=u8_color, label='Unary-128c') onchip_power_r_list_u8_wspm = [] for i in range(len(x_axis)): onchip_power_r_list_u8_wspm.append(1-onchip_power_list_u8_wspm[i]/onchip_power_list_bp_spm[i]) total_power_r_list_u8_wspm = [] for i in range(len(x_axis)): total_power_r_list_u8_wspm.append(1 - total_power_list_u8_wspm[i]/total_power_list_bp_spm[i]) # 8b - wospm - ug - 256c index = 5 dram_d_list = power_list[index * 5 + 0][-1:] sram_d_list = power_list[index * 5 + 1][-1:] sram_l_list = power_list[index * 5 + 2][-1:] sarr_d_list = power_list[index * 5 + 3][-1:] sarr_l_list = power_list[index * 5 + 4][-1:] total_power_list_ug_wspm = [] onchip_power_list_ug_wspm = [] for i in range(len(x_axis)): total_power_list_ug_wspm.append(dram_d_list[i] + sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) onchip_power_list_ug_wspm.append(sram_d_list[i] + sram_l_list[i] + sarr_d_list[i] + sarr_l_list[i]) idx += 1 ax.bar(x_idx + iterval * (idx - idx_tot / 2), total_power_list_ug_wspm, width, hatch = None, alpha=0.99, color=ug_color, label='uGEMM-H') onchip_power_r_list_ug_wspm = [] for i in range(len(x_axis)): onchip_power_r_list_ug_wspm.append(1-onchip_power_list_ug_wspm[i]/onchip_power_list_bp_spm[i]) total_power_r_list_ug_wspm = [] for i in range(len(x_axis)): total_power_r_list_ug_wspm.append(1 - total_power_list_ug_wspm[i]/total_power_list_bp_spm[i]) ax.set_ylabel('Total power\n(mW)') ax.set_xticks(x_idx) ax.set_xticklabels(x_axis) plt.xlim(x_idx[0]-0.5, x_idx[-1]+0.5) plt.yscale("linear") bottom, top = plt.ylim() if a == "eyeriss": ax.set_ylim(bottom, 2500) for x in x_idx: ax.fill_betweenx([bottom, top], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0) ax.text(0-1.5*width, 2600, "{:.2f}".format(total_power_list_bp_spm[0]), horizontalalignment="right") ax.text(0-0.5*width, 2600, "{:.2f}".format(total_power_list_bs_spm[0]), horizontalalignment="left") y_tick_list = [0, 1000, 2000] ax.set_yticks(y_tick_list) y_label_list = [] for y in y_tick_list: if y != 0: y_label_list.append("{:1.0E}".format(abs(y))) else: y_label_list.append("0") ax.set_yticklabels(y_label_list) else: ax.set_ylim(bottom, top) for x in x_idx: ax.fill_betweenx([bottom, top+2000], x1=x, x2=x+0.5, alpha=0.2, color=bg_color, linewidth=0) y_tick_list = [0, 5000, 10000] ax.set_yticks(y_tick_list) y_label_list = [] for y in y_tick_list: if y != 0: y_label_list.append("{:1.0E}".format(abs(y))) else: y_label_list.append("0") ax.set_yticklabels(y_label_list) ax.minorticks_off() fig.tight_layout() plt.savefig('./outputs_fig/' + technode + '/Power_total_' + a_cap + ".pdf", bbox_inches='tight', dpi=my_dpi, pad_inches=0.02) if print_power_onchip: print("On-chip power reduction: ") print("binary parallel (baseline):", onchip_power_list_bp_spm) print("binary serial :", onchip_power_r_list_bs_spm) print("unary 32c :", onchip_power_r_list_u6_wspm) print("unary 64c :", onchip_power_r_list_u7_wspm) print("unary 128c :", onchip_power_r_list_u8_wspm) print("ugemm 256c :", onchip_power_r_list_ug_wspm) print("min reduction:", min(onchip_power_r_list_u6_wspm + onchip_power_r_list_u7_wspm + onchip_power_r_list_u8_wspm)*100, "%") print("mean reduction:", mean(onchip_power_r_list_u6_wspm + onchip_power_r_list_u7_wspm + onchip_power_r_list_u8_wspm)*100, "%") print("median reduction:", median(onchip_power_r_list_u6_wspm + onchip_power_r_list_u7_wspm + onchip_power_r_list_u8_wspm)*100, "%") print("max reduction:", max(onchip_power_r_list_u6_wspm + onchip_power_r_list_u7_wspm + onchip_power_r_list_u8_wspm)*100, "%") onchip_power_bs_r_list_u6_wspm = [] onchip_power_bs_r_list_u7_wspm = [] onchip_power_bs_r_list_u8_wspm = [] onchip_power_bs_r_list_ug_wspm = [] for i in range(len(onchip_power_list_bs_spm)): onchip_power_bs_r_list_u6_wspm.append(1 - onchip_power_list_u6_wspm[i] / onchip_power_list_bs_spm[i]) onchip_power_bs_r_list_u7_wspm.append(1 - onchip_power_list_u7_wspm[i] / onchip_power_list_bs_spm[i]) onchip_power_bs_r_list_u8_wspm.append(1 - onchip_power_list_u8_wspm[i] / onchip_power_list_bs_spm[i]) onchip_power_bs_r_list_ug_wspm.append(1 - onchip_power_list_ug_wspm[i] / onchip_power_list_bs_spm[i]) print("binary serial (baseline) :", onchip_power_list_bs_spm) print("unary 32c :", onchip_power_bs_r_list_u6_wspm) print("unary 64c :", onchip_power_bs_r_list_u7_wspm) print("unary 128c :", onchip_power_bs_r_list_u8_wspm) print("ugemm 256c :", onchip_power_bs_r_list_ug_wspm) print("min reduction:", min(onchip_power_bs_r_list_u6_wspm + onchip_power_bs_r_list_u7_wspm + onchip_power_bs_r_list_u8_wspm)*100, "%") print("mean reduction:", mean(onchip_power_bs_r_list_u6_wspm + onchip_power_bs_r_list_u7_wspm + onchip_power_bs_r_list_u8_wspm)*100, "%") print("median reduction:", median(onchip_power_bs_r_list_u6_wspm + onchip_power_bs_r_list_u7_wspm + onchip_power_bs_r_list_u8_wspm)*100, "%") print("max reduction:", max(onchip_power_bs_r_list_u6_wspm + onchip_power_bs_r_list_u7_wspm + onchip_power_bs_r_list_u8_wspm)*100,
draw = ImageDraw.Draw(process) # puts in background bg_image = bg_image.resize((85, 105), Image.ANTIALIAS) bg_image = bg_image.crop((0,0, 85, 105)) result.paste(bg_image, (0,0)) # draw transparent overlay draw.rectangle([(0, 40), (85, 105)], fill=(30, 30 ,30, 220)) # white portion draw.rectangle([(15, 11), (68, 64)], fill=(255,255,255,160), outline=(100, 100, 100, 100)) # profile rectangle # put in profile picture profile_size = (50, 50) profile_image = profile_image.resize(profile_size, Image.ANTIALIAS) process.paste(profile_image, (17, 13)) # fonts level_fnt2 = ImageFont.truetype(font_bold_file, 20) level_fnt = ImageFont.truetype(font_bold_file, 32) # write label text draw.text((self._center(0, 85, "Level Up!", level_fnt2), 65), "Level Up!", font=level_fnt2, fill=(240,240,240,255)) # Level lvl_text = "LVL {}".format(userinfo["servers"][server.id]["level"]) draw.text((self._center(0, 85, lvl_text, level_fnt), 80), lvl_text, font=level_fnt, fill=(240,240,240,255)) # Level Number result = Image.alpha_composite(result, process) result.save(GENPATH+'/level{}.png'.format(user.id),'PNG', quality=100) async def draw_rank(self, user, server): # fonts name_fnt = ImageFont.truetype(font_bold_file, 22) header_u_fnt = ImageFont.truetype(font_unicode_file, 18) sub_header_fnt = ImageFont.truetype(font_bold_file, 14) badge_fnt = ImageFont.truetype(font_bold_file, 12) large_fnt = ImageFont.truetype(font_bold_file, 33) level_label_fnt = ImageFont.truetype(font_bold_file, 22) general_info_fnt = ImageFont.truetype(font_bold_file, 15) general_info_u_fnt = ImageFont.truetype(font_unicode_file, 11) credit_fnt = ImageFont.truetype(font_bold_file, 10) userinfo = self.users[user.id] # get urls bg_url = userinfo["rank_background"] profile_url = user.avatar_url # create image objects bg_image = Image profile_image = Image await self._make_temp_image(bg_url, "rank", user) bg_image = Image.open(GENPATH+'/temp_rank_bg{}.png'.format(user.id)).convert('RGBA') profile_image = Image.open(GENPATH+'/temp_rank_profile{}.png'.format(user.id)).convert('RGBA') # set canvas bg_color = (255,255,255, 0) result = Image.new('RGBA', (360, 100), bg_color) process = Image.new('RGBA', (360, 100), bg_color) # puts in background bg_image = bg_image.resize((360, 100), Image.ANTIALIAS) bg_image = bg_image.crop((0,0, 360, 100)) result.paste(bg_image, (0,0)) # draw draw = ImageDraw.Draw(process) # draw transparent overlay vert_pos = 5 left_pos = 70 right_pos = 360 - vert_pos titre_height = 22 gap = 3 draw.rectangle([(left_pos - 20,vert_pos), (right_pos, vert_pos + titre_height)], fill=(230,230,230,230)) # titre box content_top = vert_pos + titre_height + gap content_bottom = 100 - vert_pos draw.rectangle([(left_pos - 20, content_top), (right_pos, content_bottom)], fill=(30, 30 ,30, 220), outline=(230,230,230,230)) # content box # stick in credits if needed if bg_url in bg_credits.keys(): credit_text = " ".join("{}".format(bg_credits[bg_url])) draw.text((2, 92), credit_text, font=credit_fnt, fill=(0,0,0,190)) # draw level circle multiplier = 6 lvl_circle_dia = 94 circle_left = 15 circle_top = int((100 - lvl_circle_dia)/2) raw_length = lvl_circle_dia * multiplier # create mask mask = Image.new('L', (raw_length, raw_length), 0) draw_thumb = ImageDraw.Draw(mask) draw_thumb.ellipse((0, 0) + (raw_length, raw_length), fill = 255, outline = 0) # drawing level bar calculate angle start_angle = -90 # from top instead of 3oclock angle = int(360 * (userinfo["current_xp"]/self._get_next_level(userinfo["servers"][server.id]["level"]))) + start_angle lvl_circle = Image.new("RGBA", (raw_length, raw_length)) draw_lvl_circle = ImageDraw.Draw(lvl_circle) draw_lvl_circle.ellipse([0, 0, raw_length, raw_length], fill=(180, 180, 180, 180), outline = (255, 255, 255, 220)) # determines exp bar color if "rank_exp_color" not in userinfo.keys() or not userinfo["rank_exp_color"]: exp_fill = (255, 255, 255, 230) else: exp_fill = tuple(userinfo["rank_exp_color"]) draw_lvl_circle.pieslice([0, 0, raw_length, raw_length], start_angle, angle, fill=exp_fill, outline = (255, 255, 255, 230)) # put on level bar circle lvl_circle = lvl_circle.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS) lvl_bar_mask = mask.resize((lvl_circle_dia, lvl_circle_dia), Image.ANTIALIAS) process.paste(lvl_circle, (circle_left, circle_top), lvl_bar_mask) # draws mask total_gap = 10 border = int(total_gap/2) profile_size = lvl_circle_dia - total_gap raw_length = profile_size * multiplier # put in profile picture output = ImageOps.fit(profile_image, (raw_length, raw_length), centering=(0.5, 0.5)) output = output.resize((profile_size, profile_size), Image.ANTIALIAS) mask = mask.resize((profile_size, profile_size), Image.ANTIALIAS) profile_image = profile_image.resize((profile_size, profile_size), Image.ANTIALIAS) process.paste(profile_image, (circle_left + border, circle_top + border), mask) # draw level box level_left = 277 level_right = right_pos draw.rectangle([(level_left, vert_pos), (level_right, vert_pos + titre_height)], fill="#AAA") # box lvl_text = "NIVEAU {}".format(userinfo["servers"][server.id]["level"]) draw.text((self._center(level_left, level_right, lvl_text, level_label_fnt), vert_pos + 2), lvl_text, font=level_label_fnt, fill=(110,110,110,255)) # Level # # draw text grey_color = (110,110,110,255) white_color = (230,230,230,255) # reputation points left_text_align = 130 rep_align = self._center(110, 190, "Réput.", level_label_fnt) # _name(self, user, max_length) text_name = self._name(user, 21) # print(text_name) # _truncate_text(self, text, max_length) truncated = self._truncate_text(text_name, 21) # print(truncated) # _write_unicode(text, init_x, y, font, unicode_font, fill) self._write_unicode(truncated, left_text_align - 20, vert_pos + 2, name_fnt, header_u_fnt, grey_color, draw) # Name draw.text((rep_align, 37), "Réput.".format(await self._find_server_rank(user, server)), font=level_label_fnt, fill=white_color) # Rep Label rep_label_width = level_label_fnt.getsize("Réput.")[0] rep_text = "+{}".format(userinfo["rep"]) draw.text((self._center(rep_align, rep_align + rep_label_width, rep_text, large_fnt) , 63), rep_text, font=large_fnt, fill=white_color) # Rep # divider bar draw.rectangle([(190, 45), (191, 85)], fill=(160,160,160,240)) # labels label_align = 210 draw.text((label_align, 38), "Rang Serveur:", font=general_info_fnt, fill=white_color) # Server Rank draw.text((label_align, 58), "XP Serveur:", font=general_info_fnt, fill=white_color) # Server Exp draw.text((label_align, 78), "Crédits:", font=general_info_fnt, fill=white_color) # Credit # info right_text_align = 290 rank_txt = "#{}".format(await self._find_server_rank(user, server)) draw.text((right_text_align, 38), self._truncate_text(rank_txt, 12) , font=general_info_fnt, fill=white_color) # Rank exp_txt = "{}".format(userinfo["servers"][server.id]["total_xp"]) draw.text((right_text_align, 58), self._truncate_text(exp_txt, 12), font=general_info_fnt, fill=white_color) # Exp try: credits = self._get_solde(user) except: credits = 0 credit_txt = "{}".format(credits) draw.text((right_text_align, 78), self._truncate_text(credit_txt, 12), font=general_info_fnt, fill=white_color) # Credits result = Image.alpha_composite(result, process) result.save(GENPATH+'/rank{}.png'.format(user.id),'PNG', quality=100) ############################################################################## # # FONCTIONS INTERNES A LA CLASSE (PRIVATE) # ############################################################################## # essayer d'importer les fonctions bancaires avec # from economy import bank async def _process_purchase(self, ctx): user = ctx.message.author server = ctx.message.server try: bank = self.bot.get_cog('Economy').bank if bank.account_exists(user): if not bank.can_spend(user, self.settings["bg_price"]): await self.bot.say("**Fonds insuffisants.**\n" "Coûts du changement d'arrière plan: **{}**".format(self.settings["bg_price"])) return False else: new_balance = bank.get_balance(user) - self.settings["bg_price"] bank.set_credits(user, new_balance) return True else: if self.settings["bg_price"] == 0: return True else: await self.bot.say("{} n'a pas de compte bancaire.\n" "Créer votre compte à la banque : **{}bank register**".format(prefix)) return False except: if self.settings["bg_price"] == 0: return True else: msg = "Il y a une **erreur** avec le module bancaire.\n" "Régler le problème pour permettre les achats ou\n" "définir le prix à 0. \n\n" "Actuellement il est de **{}**".format(prefix, self.settings["bg_price"]) await self.bot.say(msg) return False async def _make_temp_image(self, bg_url:str, whatfor:str, user ): async with aiohttp.get(bg_url) as r: image = await r.content.read() with open(GENPATH+'/temp_'+whatfor+'_bg{}.png'.format(user.id),'wb') as f: f.write(image) try: async with aiohttp.get(profile_url) as r: image = await r.content.read() except: async with aiohttp.get(default_avatar_url) as r: image = await r.content.read() with open(GENPATH+'/temp_'+whatfor+'_profile{}.png'.format(user.id),'wb') as f: f.write(image) async def _handle_on_message(self, message): text = message.content channel = message.channel server = message.server user = message.author curr_time = time.time() # Si l'utilisateur est un bot if user.bot: return # Première fois : création de l'entrée serveur # Donne les réglages par défaut # et création des fichiers USERS et BLOCK rattachés try: if server.id not in self.servers: await self._create_server(server) except: pass self.settings = self.servers[server.id]["settings"] self.backgrounds = self.servers[server.id]["bgs"] # Inscrire les joueurs automatiquement ? if not self.servers[server.id]["settings"]["processed"]: await self._register_users(server) # Si le serveur est désactivé if self.servers[server.id]["settings"]["disabled_server"]: return # Création de l'utilisateur s'il n'existe pas. await self._create_user(user, server) # timeout pour ne pas spam l'xp actual_interval = float(curr_time) - float(self.block[user.id]["chat"]) if actual_interval >= float(self.settings["freq_xp"]): await self._process_xp(message) self.block[user.id]["chat"] = curr_time fileIO(BLOCK, "save", self.block) else: remaining_time = int(float(self.settings["freq_xp"]) - actual_interval) async def _find_server_rank(self, user, server): targetid = user.id users = [] for userid in self.users.keys(): # Je passe en revue tous les userid du fichiers USERS if server.id in self.users[userid]["servers"]: # Je cherche uniquement les occurences de server.id temp_user = find(lambda m: m.id == userid, server.members) # je cherche l'utilisateur dans les membres du serveurs server_exp = self.users[userid]["servers"][server.id]["total_xp"] # récupère l'xp du user pour ce serveur if temp_user != None: users.append((userid, temp_user.name, server_exp)) # Récupère une liste : (user : totalXP) pour ce serveur sorted_list = sorted(users, key=lambda us: us[2], reverse =True) # tri la liste # récupère l'index de l'utilisateur concerné = rang rank = 1 for user in sorted_list: if user[0] == targetid: return rank rank+=1 async def _find_global_rank(self, user, server): users = [] for userid in self.users.keys(): temp_user = find(lambda m: m.id == userid, server.members) # je cherche l'utilisateur dans les membres du serveurs global_xp = self.users[userid]["global_xp"] # récupère l'xp du user pour ce serveur if temp_user != None: users.append((userid, temp_user.name, self.users[userid]["global_xp"])) sorted_list = sorted(users, key=lambda us: us[2], reverse=True) rank = 1 for stats in sorted_list: if stats[0] == user.id: return rank rank+=1 async def _process_xp(self, message): channel = message.channel server = channel.server user = message.author # tirage au sort d'un montant d'xp loto_xp = self._get_random_xp() # ajoute l'xp au montant total d'xp gagné # au global # au local self.users[user.id]["global_xp"] += loto_xp self.users[user.id]["servers"][server.id]["total_xp"] += loto_xp #
state_dict[conv_name + '.batch_norm.weight'] offset = state_dict[conv_name + '.batch_norm.bias'] m = state_dict[conv_name + '.batch_norm._mean'] v = state_dict[conv_name + '.batch_norm._variance'] if fpn.coord_conv: copy_conv_bn(yolo_block.tip.conv, w, scale, offset, m, v, use_gpu) else: copy_conv_bn(yolo_block.tip, w, scale, offset, m, v, use_gpu) # SPP 和 DropBlock if fpn.conv_block_num == 2: if i == 0: if fpn.spp: conv_name = 'neck.yolo_block.%d.conv_module.spp.conv' % (i,) w = state_dict[conv_name + '.conv.weight'] scale = state_dict[conv_name + '.batch_norm.weight'] offset = state_dict[conv_name + '.batch_norm.bias'] m = state_dict[conv_name + '.batch_norm._mean'] v = state_dict[conv_name + '.batch_norm._variance'] copy_conv_bn(yolo_block.conv_module[3].conv, w, scale, offset, m, v, use_gpu) elif fpn.conv_block_num == 0: if fpn.spp and i == 0: conv_name = 'neck.yolo_block.%d.conv_module.spp.conv' % (i,) w = state_dict[conv_name + '.conv.weight'] scale = state_dict[conv_name + '.batch_norm.weight'] offset = state_dict[conv_name + '.batch_norm.bias'] m = state_dict[conv_name + '.batch_norm._mean'] v = state_dict[conv_name + '.batch_norm._variance'] copy_conv_bn(yolo_block.conv_module[0].conv, w, scale, offset, m, v, use_gpu) # 上采样之前的yolo_transition if i < fpn.num_blocks - 1: conv_name = 'neck.yolo_transition.%d' % (i,) w = state_dict[conv_name + '.conv.weight'] scale = state_dict[conv_name + '.batch_norm.weight'] offset = state_dict[conv_name + '.batch_norm.bias'] m = state_dict[conv_name + '.batch_norm._mean'] v = state_dict[conv_name + '.batch_norm._variance'] copy_conv_bn(fpn.routes[i], w, scale, offset, m, v, use_gpu) if isinstance(head, YOLOv3Head): for i in range(len(head.anchors)): w = state_dict["yolo_head.yolo_output.{}.weight".format(i)] b = state_dict["yolo_head.yolo_output.{}.bias".format(i)] copy_conv(head.yolo_outputs[i].conv, w, b, use_gpu) elif model_class_name == 'FCOS': ss = args.ckpt.split('.') if ss[-1] == 'pth': state_dict = torch.load(args.ckpt, map_location=torch.device('cpu')) backbone_dic = {} fpn_dic = {} fcos_head_dic = {} others = {} for key, value in state_dict.items(): if 'tracked' in key: continue if 'bottom_up' in key: backbone_dic[key] = value.data.numpy() elif 'fpn' in key: fpn_dic[key] = value.data.numpy() elif 'fcos_head' in key: fcos_head_dic[key] = value.data.numpy() else: others[key] = value.data.numpy() backbone = model.backbone fpn = model.fpn head = model.head if isinstance(backbone, Resnet50Vb): resnet = backbone # AdelaiDet里输入图片使用了BGR格式。这里做一下手脚使输入图片默认是RGB格式。 w = backbone_dic['backbone.bottom_up.stem.conv1.weight'] cpw = np.copy(w) w[:, 2, :, :] = cpw[:, 0, :, :] w[:, 0, :, :] = cpw[:, 2, :, :] scale = backbone_dic['backbone.bottom_up.stem.conv1.norm.weight'] offset = backbone_dic['backbone.bottom_up.stem.conv1.norm.bias'] m = backbone_dic['backbone.bottom_up.stem.conv1.norm.running_mean'] v = backbone_dic['backbone.bottom_up.stem.conv1.norm.running_var'] copy_conv_bn(resnet.stage1_conv1_1, w, scale, offset, m, v, use_gpu) nums = [3, 4, 6, 3] for nid, num in enumerate(nums): stage_name = 'res' + str(nid + 2) for kk in range(num): conv_name1 = 'backbone.bottom_up.%s.%d.conv1' % (stage_name, kk) w = backbone_dic[conv_name1 + '.weight'] scale = backbone_dic[conv_name1 + '.norm.weight'] offset = backbone_dic[conv_name1 + '.norm.bias'] m = backbone_dic[conv_name1 + '.norm.running_mean'] v = backbone_dic[conv_name1 + '.norm.running_var'] copy_conv_bn(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv1, w, scale, offset, m, v, use_gpu) conv_name2 = 'backbone.bottom_up.%s.%d.conv2' % (stage_name, kk) w = backbone_dic[conv_name2 + '.weight'] scale = backbone_dic[conv_name2 + '.norm.weight'] offset = backbone_dic[conv_name2 + '.norm.bias'] m = backbone_dic[conv_name2 + '.norm.running_mean'] v = backbone_dic[conv_name2 + '.norm.running_var'] copy_conv_bn(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv2, w, scale, offset, m, v, use_gpu) conv_name3 = 'backbone.bottom_up.%s.%d.conv3' % (stage_name, kk) w = backbone_dic[conv_name3 + '.weight'] scale = backbone_dic[conv_name3 + '.norm.weight'] offset = backbone_dic[conv_name3 + '.norm.bias'] m = backbone_dic[conv_name3 + '.norm.running_mean'] v = backbone_dic[conv_name3 + '.norm.running_var'] copy_conv_bn(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv3, w, scale, offset, m, v, use_gpu) # 每个stage的第一个卷积块才有4个卷积层 if kk == 0: shortcut_name = 'backbone.bottom_up.%s.%d.shortcut' % (stage_name, kk) w = backbone_dic[shortcut_name + '.weight'] scale = backbone_dic[shortcut_name + '.norm.weight'] offset = backbone_dic[shortcut_name + '.norm.bias'] m = backbone_dic[shortcut_name + '.norm.running_mean'] v = backbone_dic[shortcut_name + '.norm.running_var'] copy_conv_bn(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv4, w, scale, offset, m, v, use_gpu) # fpn, 6个卷积层 w = fpn_dic['backbone.fpn_lateral5.weight'] b = fpn_dic['backbone.fpn_lateral5.bias'] copy_conv(fpn.fpn_inner_convs[0].conv, w, b, use_gpu) w = fpn_dic['backbone.fpn_lateral4.weight'] b = fpn_dic['backbone.fpn_lateral4.bias'] copy_conv(fpn.fpn_inner_convs[1].conv, w, b, use_gpu) w = fpn_dic['backbone.fpn_lateral3.weight'] b = fpn_dic['backbone.fpn_lateral3.bias'] copy_conv(fpn.fpn_inner_convs[2].conv, w, b, use_gpu) w = fpn_dic['backbone.fpn_output5.weight'] b = fpn_dic['backbone.fpn_output5.bias'] copy_conv(fpn.fpn_convs[0].conv, w, b, use_gpu) w = fpn_dic['backbone.fpn_output4.weight'] b = fpn_dic['backbone.fpn_output4.bias'] copy_conv(fpn.fpn_convs[1].conv, w, b, use_gpu) w = fpn_dic['backbone.fpn_output3.weight'] b = fpn_dic['backbone.fpn_output3.bias'] copy_conv(fpn.fpn_convs[2].conv, w, b, use_gpu) # head num_convs = 4 ids = [[0, 1], [3, 4], [6, 7], [9, 10]] for lvl in range(0, num_convs): # conv + gn w = fcos_head_dic['proposal_generator.fcos_head.cls_tower.%d.weight' % ids[lvl][0]] b = fcos_head_dic['proposal_generator.fcos_head.cls_tower.%d.bias' % ids[lvl][0]] scale = fcos_head_dic['proposal_generator.fcos_head.cls_tower.%d.weight' % ids[lvl][1]] offset = fcos_head_dic['proposal_generator.fcos_head.cls_tower.%d.bias' % ids[lvl][1]] copy_conv_gn(head.cls_convs[lvl], w, b, scale, offset, use_gpu) # conv + gn w = fcos_head_dic['proposal_generator.fcos_head.bbox_tower.%d.weight' % ids[lvl][0]] b = fcos_head_dic['proposal_generator.fcos_head.bbox_tower.%d.bias' % ids[lvl][0]] scale = fcos_head_dic['proposal_generator.fcos_head.bbox_tower.%d.weight' % ids[lvl][1]] offset = fcos_head_dic['proposal_generator.fcos_head.bbox_tower.%d.bias' % ids[lvl][1]] copy_conv_gn(head.reg_convs[lvl], w, b, scale, offset, use_gpu) # 类别分支最后的conv w = fcos_head_dic['proposal_generator.fcos_head.cls_logits.weight'] b = fcos_head_dic['proposal_generator.fcos_head.cls_logits.bias'] copy_conv(head.cls_pred.conv, w, b, use_gpu) # 坐标分支最后的conv w = fcos_head_dic['proposal_generator.fcos_head.bbox_pred.weight'] b = fcos_head_dic['proposal_generator.fcos_head.bbox_pred.bias'] copy_conv(head.reg_pred.conv, w, b, use_gpu) # centerness分支最后的conv w = fcos_head_dic['proposal_generator.fcos_head.ctrness.weight'] b = fcos_head_dic['proposal_generator.fcos_head.ctrness.bias'] copy_conv(head.ctn_pred.conv, w, b, use_gpu) # 3个scale。请注意,AdelaiDet在head部分是从小感受野到大感受野遍历,而PaddleDetection是从大感受野到小感受野遍历。所以这里scale顺序反过来。 scale_0 = fcos_head_dic['proposal_generator.fcos_head.scales.0.scale'] scale_1 = fcos_head_dic['proposal_generator.fcos_head.scales.1.scale'] scale_2 = fcos_head_dic['proposal_generator.fcos_head.scales.2.scale'] if use_gpu: head.scales_on_reg[2].data = torch.Tensor(scale_0).cuda() head.scales_on_reg[1].data = torch.Tensor(scale_1).cuda() head.scales_on_reg[0].data = torch.Tensor(scale_2).cuda() else: head.scales_on_reg[2].data = torch.Tensor(scale_0) head.scales_on_reg[1].data = torch.Tensor(scale_1) head.scales_on_reg[0].data = torch.Tensor(scale_2) elif ss[-1] == 'pdparams': state_dict = fluid.io.load_program_state(args.ckpt) backbone_dic = {} scale_on_reg_dic = {} fpn_dic = {} head_dic = {} others = {} for key, value in state_dict.items(): # if 'tracked' in key: # continue if 'branch' in key: backbone_dic[key] = value elif 'scale_on_reg' in key: scale_on_reg_dic[key] = value elif 'fpn' in key: fpn_dic[key] = value elif 'fcos_head' in key: head_dic[key] = value else: others[key] = value backbone = model.backbone fpn = model.fpn head = model.head if isinstance(backbone, Resnet50Vb): resnet = backbone # AdelaiDet里输入图片使用了BGR格式。这里做一下手脚使输入图片默认是RGB格式。 w = state_dict['conv1_weights'] scale = state_dict['bn_conv1_scale'] offset = state_dict['bn_conv1_offset'] copy_conv_af(backbone.stage1_conv1_1, w, scale, offset, use_gpu) nums = [3, 4, 6, 3] for nid, num in enumerate(nums): stage_name = 'res' + str(nid + 2) for kk in range(num): block_name = stage_name + chr(ord("a") + kk) conv_name1 = block_name + "_branch2a" bn_name1 = 'bn' + conv_name1[3:] w = backbone_dic[conv_name1 + '_weights'] scale = backbone_dic[bn_name1 + '_scale'] offset = backbone_dic[bn_name1 + '_offset'] copy_conv_af(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv1, w, scale, offset, use_gpu) conv_name2 = block_name + "_branch2b" bn_name2 = 'bn' + conv_name2[3:] w = state_dict[conv_name2 + '_weights'] scale = state_dict[bn_name2 + '_scale'] offset = state_dict[bn_name2 + '_offset'] copy_conv_af(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv2, w, scale, offset, use_gpu) conv_name3 = block_name + "_branch2c" bn_name3 = 'bn' + conv_name3[3:] w = backbone_dic[conv_name3 + '_weights'] scale = backbone_dic[bn_name3 + '_scale'] offset = backbone_dic[bn_name3 + '_offset'] copy_conv_af(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv3, w, scale, offset, use_gpu) # 每个stage的第一个卷积块才有4个卷积层 if kk == 0: shortcut_name = block_name + "_branch1" shortcut_bn_name = 'bn' + shortcut_name[3:] w = backbone_dic[shortcut_name + '_weights'] scale = backbone_dic[shortcut_bn_name + '_scale'] offset = backbone_dic[shortcut_bn_name + '_offset'] copy_conv_af(resnet.get_block('stage%d_%d' % (2 + nid, kk)).conv4, w, scale, offset, use_gpu) # fpn w = fpn_dic['fpn_inner_res5_sum_w'] b = fpn_dic['fpn_inner_res5_sum_b'] copy_conv(fpn.fpn_inner_convs[0].conv, w, b, use_gpu) w = fpn_dic['fpn_inner_res4_sum_lateral_w'] b = fpn_dic['fpn_inner_res4_sum_lateral_b'] copy_conv(fpn.fpn_inner_convs[1].conv, w, b, use_gpu) w = fpn_dic['fpn_inner_res3_sum_lateral_w'] b = fpn_dic['fpn_inner_res3_sum_lateral_b'] copy_conv(fpn.fpn_inner_convs[2].conv, w, b, use_gpu) w = fpn_dic['fpn_res5_sum_w'] b = fpn_dic['fpn_res5_sum_b'] copy_conv(fpn.fpn_convs[0].conv, w, b, use_gpu) w = fpn_dic['fpn_res4_sum_w'] b = fpn_dic['fpn_res4_sum_b'] copy_conv(fpn.fpn_convs[1].conv, w, b, use_gpu) w = fpn_dic['fpn_res3_sum_w'] b = fpn_dic['fpn_res3_sum_b'] copy_conv(fpn.fpn_convs[2].conv, w, b, use_gpu) w = fpn_dic['fpn_6_w'] b = fpn_dic['fpn_6_b'] copy_conv(fpn.extra_convs[0].conv, w, b, use_gpu) w = fpn_dic['fpn_7_w'] b = fpn_dic['fpn_7_b'] copy_conv(fpn.extra_convs[1].conv, w, b, use_gpu) # head num_convs = 4 for lvl in range(0, num_convs): # conv + gn conv_cls_name = 'fcos_head_cls_tower_conv_{}'.format(lvl) norm_name = conv_cls_name + "_norm" w = head_dic[conv_cls_name + "_weights"] b = head_dic[conv_cls_name + "_bias"] scale = head_dic[norm_name + "_scale"] offset = head_dic[norm_name + "_offset"] copy_conv_gn(head.cls_convs[lvl], w, b, scale, offset, use_gpu) # conv + gn conv_reg_name = 'fcos_head_reg_tower_conv_{}'.format(lvl) norm_name = conv_reg_name + "_norm" w = head_dic[conv_reg_name + "_weights"] b = head_dic[conv_reg_name + "_bias"] scale = head_dic[norm_name + "_scale"] offset = head_dic[norm_name + "_offset"] copy_conv_gn(head.reg_convs[lvl], w, b, scale, offset, use_gpu) # 类别分支最后的conv conv_cls_name = "fcos_head_cls" w = head_dic[conv_cls_name + "_weights"] b = head_dic[conv_cls_name + "_bias"] copy_conv(head.cls_pred.conv, w, b, use_gpu) # 坐标分支最后的conv conv_reg_name = "fcos_head_reg" w = head_dic[conv_reg_name + "_weights"] b = head_dic[conv_reg_name + "_bias"] copy_conv(head.reg_pred.conv, w, b, use_gpu) # centerness分支最后的conv conv_centerness_name = "fcos_head_centerness" w = head_dic[conv_centerness_name + "_weights"] b = head_dic[conv_centerness_name + "_bias"] copy_conv(head.ctn_pred.conv, w, b, use_gpu) # 5个scale fpn_names = ['fpn_7', 'fpn_6', 'fpn_res5_sum', 'fpn_res4_sum', 'fpn_res3_sum'] i = 0 for fpn_name in fpn_names: scale_i = scale_on_reg_dic["%s_scale_on_reg" % fpn_name] if use_gpu: head.scales_on_reg[i].data = torch.Tensor(scale_i).cuda() else: head.scales_on_reg[i].data = torch.Tensor(scale_i) i += 1 else: raise NotImplementedError("Architectures \'{}\' is not implemented.".format(model_class_name)) # save checkpoint. ckpt_state = { "start_epoch": 0, "model": model.state_dict(), "optimizer": None, } torch.save(ckpt_state, args.output_ckpt)
>>> import scikitplot.plotters as skplt >>> rf = RandomForestClassifier() >>> skplt.plot_learning_curve(rf, X, y) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_learning_curve.png :align: center :alt: Learning Curve """ if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) if train_sizes is None: train_sizes = np.linspace(.1, 1.0, 5) ax.set_title(title, fontsize=title_fontsize) ax.set_xlabel("Training examples", fontsize=text_fontsize) ax.set_ylabel("Score", fontsize=text_fontsize) train_sizes, train_scores, test_scores = learning_curve( clf, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) ax.grid() ax.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") ax.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") ax.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") ax.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax @deprecated('This will be removed in v0.4.0. Please use ' 'scikitplot.metrics.plot_silhouette instead.') def plot_silhouette(clf, X, title='Silhouette Analysis', metric='euclidean', copy=True, ax=None, figsize=None, cmap='nipy_spectral', title_fontsize="large", text_fontsize="medium"): """Plots silhouette analysis of clusters using fit_predict. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Silhouette Analysis" metric (string or callable, optional): The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by sklearn.metrics.pairwise.pairwise_distances. If X is the distance array itself, use "precomputed" as the metric. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. cmap (string or :class:`matplotlib.colors.Colormap` instance, optional): Colormap used for plotting the projection. View Matplotlib Colormap documentation for available options. https://matplotlib.org/users/colormaps.html title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(n_clusters=4, random_state=1) >>> skplt.plot_silhouette(kmeans, X) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_silhouette.png :align: center :alt: Silhouette Plot """ if copy: clf = clone(clf) cluster_labels = clf.fit_predict(X) n_clusters = len(set(cluster_labels)) silhouette_avg = silhouette_score(X, cluster_labels, metric=metric) sample_silhouette_values = silhouette_samples(X, cluster_labels, metric=metric) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) ax.set_xlim([-0.1, 1]) ax.set_ylim([0, len(X) + (n_clusters + 1) * 10 + 10]) ax.set_xlabel('Silhouette coefficient values', fontsize=text_fontsize) ax.set_ylabel('Cluster label', fontsize=text_fontsize) y_lower = 10 for i in range(n_clusters): ith_cluster_silhouette_values = sample_silhouette_values[ cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = plt.cm.get_cmap(cmap)(float(i) / n_clusters) ax.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) ax.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i), fontsize=text_fontsize) y_lower = y_upper + 10 ax.axvline(x=silhouette_avg, color="red", linestyle="--", label='Silhouette score: {0:0.3f}'.format(silhouette_avg)) ax.set_yticks([]) # Clear the y-axis labels / ticks ax.set_xticks(np.arange(-0.1, 1.0, 0.2)) ax.tick_params(labelsize=text_fontsize) ax.legend(loc='best', fontsize=text_fontsize) return ax @deprecated('This will be removed in v0.4.0. Please use ' 'scikitplot.cluster.plot_elbow_curve instead.') def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots elbow curve of different values of K for KMeans clustering. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods and a ``score`` parameter. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Elbow Plot" cluster_ranges (None or :obj:`list` of int, optional): List of n_clusters for which to plot the explained variances. Defaults to ``range(1, 12, 2)``. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(random_state=1) >>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11)) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_elbow_curve.png :align: center :alt: Elbow Curve """ if cluster_ranges is None: cluster_ranges = range(1, 12, 2) else: cluster_ranges = sorted(cluster_ranges) if not hasattr(clf, 'n_clusters'): raise TypeError('"n_clusters" attribute not in classifier. ' 'Cannot plot elbow method.') clfs = [] for i in cluster_ranges: current_clf = clone(clf) setattr(current_clf, "n_clusters", i) clfs.append(current_clf.fit(X).score(X)) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) ax.plot(cluster_ranges, np.absolute(clfs), 'b*-') ax.grid(True) ax.set_xlabel('Number of clusters', fontsize=text_fontsize) ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize) ax.tick_params(labelsize=text_fontsize) return ax @deprecated('This will be removed in v0.4.0. Please use ' 'scikitplot.decomposition.plot_pca_component_variance instead.') def plot_pca_component_variance(clf, title='PCA Component Explained Variances', target_explained_variance=0.75, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots PCA components' explained variance ratios. (new in v0.2.2) Args: clf: PCA instance that has the ``explained_variance_ratio_`` attribute. title (string, optional): Title of the generated plot. Defaults to "PCA Component Explained Variances" target_explained_variance (float, optional): Looks for the minimum number of principal components that satisfies this value and emphasizes it on the plot. Defaults to 0.75 ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> pca = PCA(random_state=1) >>> pca.fit(X) >>> skplt.plot_pca_component_variance(pca) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_pca_component_variance.png :align: center :alt: PCA Component variances """ if not hasattr(clf, 'explained_variance_ratio_'): raise TypeError('"clf" does not have explained_variance_ratio_ ' 'attribute. Has the PCA been fitted?') if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) cumulative_sum_ratios = np.cumsum(clf.explained_variance_ratio_) # Magic code for figuring out closest value to target_explained_variance idx = np.searchsorted(cumulative_sum_ratios, target_explained_variance) ax.plot(range(len(clf.explained_variance_ratio_) + 1), np.concatenate(([0], np.cumsum(clf.explained_variance_ratio_))), '*-') ax.grid(True) ax.set_xlabel('First n principal components', fontsize=text_fontsize) ax.set_ylabel('Explained variance ratio of first n components', fontsize=text_fontsize) ax.set_ylim([-0.02, 1.02]) if idx < len(cumulative_sum_ratios): ax.plot(idx+1, cumulative_sum_ratios[idx], 'ro', label='{0:0.3f} Explained variance ratio for ' 'first {1} components'.format(cumulative_sum_ratios[idx], idx+1), markersize=4, markeredgewidth=4) ax.axhline(cumulative_sum_ratios[idx], linestyle=':', lw=3, color='black') ax.tick_params(labelsize=text_fontsize) ax.legend(loc="best", fontsize=text_fontsize) return ax @deprecated('This will be removed in v0.4.0. Please use ' 'scikitplot.decomposition.plot_pca_component_variance instead.') def plot_pca_2d_projection(clf, X, y, title='PCA 2-D Projection', ax=None, figsize=None, cmap='Spectral', title_fontsize="large", text_fontsize="medium"): """Plots the 2-dimensional projection of PCA on a given dataset. Args: clf: Fitted PCA instance that can ``transform`` given data set into 2 dimensions. X (array-like, shape (n_samples, n_features)): Feature set to project, where n_samples is the number of samples and n_features is the number of features. y (array-like, shape (n_samples) or (n_samples, n_features)): Target relative to X for labeling. title (string, optional): Title of the generated plot. Defaults to "PCA 2-D Projection" ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. cmap (string or :class:`matplotlib.colors.Colormap` instance, optional): Colormap used for plotting the projection. View Matplotlib Colormap documentation for available options. https://matplotlib.org/users/colormaps.html title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`):
row_tots=row_tots, col_tots=col_tots, grand_tot=grand_tot, attach_rlabels=attach_rlabels) def select_col(self, key, where=None): """ determines rows in table that satisfy the conditions given by where and returns the values of key in the remaining rows args: key: column label of data to return kwds: where: constraints to apply to table before returning data returns: a list example: >>> ... >>> print(df) first last age gender ================================== Roger Lew 28 male Bosco Robinson 5 male Megan Whittington 26 female John Smith 51 male Jane Doe 49 female >>> df.select_col('age', where='gender == "male"') [28, 5, 51] >>> """ if where == None: where = [] # 1. # check to see if data columns have equal lengths if not self._are_col_lengths_equal(): raise Exception('columns have unequal lengths') # 2. # check the supplied arguments if key not in list(self.keys()): raise KeyError(val) ## # check to make sure exclude is mappable ## # todo ## ## # warn if exclude is not a subset of self.conditions ## if not set(self.keys()) >= set(tup[0] for tup in where): ## warnings.warn("where is not a subset of table conditions", ## RuntimeWarning) if where == []: return copy(self[key]) else: self._build_sqlite3_tbl([key], where) self._execute('select * from TBL') return [r[0] for r in self.cur] def sort(self, order=None): """ sort the table in-place kwds: order: is a list of factors to sort by to reverse order append " desc" to the factor returns: None example: >>> from pyvttbl import DataFrame >>> from collections import namedtuple >>> Person = namedtuple('Person',['first','last','age','gender']) >>> df =DataFrame() >>> df.insert(Person('Roger', 'Lew', 28, 'male')._asdict()) >>> df.insert(Person('Bosco', 'Robinson', 5, 'male')._asdict()) >>> df.insert(Person('Megan', 'Whittington', 26, 'female')._asdict()) >>> df.insert(Person('John', 'Smith', 51, 'male')._asdict()) >>> df.insert(Person('Jane', 'Doe', 49, 'female')._asdict()) >>> df.sort(['gender', 'age']) >>> print(df) first last age gender ================================== Megan Whittington 26 female Jane Doe 49 female Bosco Robinson 5 male Roger Lew 28 male John Smith 51 male >>> """ if order == None: order = [] # Check arguments if self == {}: raise Exception('Table must have data to sort data') # check to see if data columns have equal lengths if not self._are_col_lengths_equal(): raise Exception('columns have unequal lengths') if not hasattr(order, '__iter__'): raise TypeError( "'%s' object is not iterable" % type(order).__name__) # check or build order if order == []: order = list(self.keys()) # there are probably faster ways to do this, we definitely need # to treat the words as tokens to avoid problems were column # names are substrings of other column names for i, k in enumerate(order): ks = k.split() if ks[0] not in list(self.keys()): raise KeyError(k) if len(ks) == 1: order[i] = _sha1(ks[0]) elif len(ks) == 2: if ks[1].lower() not in ['desc', 'asc']: raise Exception("'order arg must be 'DESC' or 'ASC'") order[i] = '%s %s'%(_sha1(ks[0]), ks[1]) elif len(ks) > 2: raise Exception('too many parameters specified') # build table self._build_sqlite3_tbl(list(self.keys())) # build and excute query query = 'select * from TBL order by ' + ', '.join(order) self._execute(query) # read sorted order from cursor d = [] for row in self.cur: d.append(list(row)) d = list(zip(*d)) # transpose for i, n in enumerate(self.keys()): self[n] = list(d[i]) def where(self, where): """ Applies the where filter to a copy of the DataFrame, and returns the new DataFrame. The associated DataFrame is not copied. args: where: criterion to apply to new table returns: a new :class:`DataFrame` example: >>> ... >>> print(df) first last age gender ================================== Roger Lew 28 male Bosco Robinson 5 male Megan Whittington 26 female John Smith 51 male <NAME> 49 female >>> print(df.where('age > 20 and age < 45')) first last age gender ================================== <NAME> 28 male <NAME> 26 female >>> """ new = DataFrame() self._build_sqlite3_tbl(list(self.keys()), where) self._execute('select * from TBL') for n, values in zip(list(self.keys()), list(zip(*list(self.cur)))): new[n] = list(values) return new def where_update(self, where): """ Applies the where filter in-place. args: where: criterion to apply to table returns: None """ self._build_sqlite3_tbl(list(self.keys()), where) self._execute('select * from TBL') for n, values in zip(list(self.keys()), list(zip(*list(self.cur)))): del self[n] self[n] = list(values) def validate(self, criteria, verbose=False, report=False): """ validate the data in the table. args: criteria: a dict whose keys should coorespond to columns in the table. The values should be functions which take a single parameter and return a boolean. kwds: verbose: True: provide real-time feedback False: don't provide feedback (default) report: True: print a report upon completion False: don't print report (default) returns: True: the criteria was satisfied False: the critera was not satisfied example: >>> ... >>> print(df) first last age gender ================================== Roger Lew 28 male Bosco Robinson 5 male Megan Whittington 26 female John Smith 51 male Jane Doe 49 female >>> def isint(x): try : return int(x)-float(x)==0 except: return False >>> df.validate({'age' : lambda x: isint(x), 'gender' : lambda x: x in ['male', 'female']}, verbose=True, report=True) Validating gender: ..... Validating age: ..... Report: Values tested: 10 Values passed: 10 Values failed: 0 ***Validation PASSED*** True >>> """ # do some checking if self == {}: raise Exception('table must have data to validate data') try: c = set(criteria.keys()) s = set(self.keys()) except: raise TypeError('criteria must be mappable type') # check if the criteria dict has keys that aren't in self all_keys_found = bool((c ^ (c & s)) == set()) # if the user doesn't want a detailed report we don't have # to do as much book keeping and can greatly simplify the # logic if not verbose and not report: if all_keys_found: return all(all(map(criteria[k], self[k])) for k in criteria) else: return False # loop through specified columns and apply the # validation function to each value in the column valCounter = Counter() reportDict = {} for k in (c & s): reportDict[k] = [] if verbose: print('\nValidating %s:'%k) for i,v in enumerate(self[k]): try: func = criteria[k] result = func(v) except: result = False valCounter['code_failures'] +=1 valCounter[result] += 1 valCounter['n'] += 1 if result: if verbose: print('.', end='') else: reportDict[k].append( "Error: on index %i value " "'%s' failed validation"%(i, str(v))) if verbose: print('X', end='') if verbose: print() # do some book keeping pass_or_fail = (valCounter['n'] == valCounter[True]) & all_keys_found # print a report if the user has requested one if report: print('\nReport:') for k in (c&s): if len(reportDict[k]) > 0: print('While validating %s:'%k) for line in reportDict[k]: print(' ',line) print( ' Values tested:', valCounter['n'], '\n Values passed:', valCounter[True], '\n Values failed:', valCounter[False]) if valCounter['code_failures'] != 0: print('\n (%i values failed because ' 'func(x) did not properly execute)' %valCounter['code_failures']) if not all_keys_found: print('\n Error: criteria dict contained ' 'keys not found in table:' '\n ', ', '.join(c ^ (c & s))) if pass_or_fail: print('\n***Validation PASSED***') else: print('\n***Validation FAILED***') # return the test result return pass_or_fail def attach(self, other): """ attaches a second :class:`DataFrame` to self args: other: a :class:`DataFrame` object whose key set matches self return: None """ # do some checking if not isinstance(other, DataFrame): raise TypeError('second argument must be a DataFrame') if not self._are_col_lengths_equal(): raise Exception('columns in self have unequal lengths') if not other._are_col_lengths_equal(): raise Exception('columns in other have unequal lengths') if not set(self.keys()) == set(other.keys()): raise Exception('self and other must have the same columns') if not all(self._get_sqltype(n) == other._get_sqltype(n) for n in self): raise Exception('types of self and other must match') # perform attachment for n in list(self.keys()): self[n] = np.concatenate((self[n], other[n])) # update state variables self.conditions = DictSet([(n, list(self[n])) for n in self]) def insert(self, row): """ insert a row into the table args: row: should be mappable. e.g. a dict or a list with key/value pairs. returns: None example: >>> from pyvttbl import DataFrame >>> from collections import namedtuple
#!/usr/bin/env python import pyami.quietscipy #builtin import os import re import sys import time import subprocess import glob from optparse import OptionParser #appion from appionlib import basicScript from appionlib import apParam from appionlib import apDisplay from appionlib import apProject from appionlib import apDatabase from appionlib import appiondata from appionlib import apWebScript from appionlib import apThread #leginon import leginon.leginonconfig import sinedon from pyami import mem from pyami import version from pyami import fileutil #===================== #===================== class AppionScript(basicScript.BasicScript): #===================== def __init__(self,optargs=sys.argv[1:],quiet=False,useglobalparams=True,maxnproc=None): """ Starts a new function and gets all the parameters """ ### setup some expected values self.successful_run = False self.clusterjobdata = None self.params = {} sys.stdout.write("\n\n") self.quiet = quiet self.maxnproc = maxnproc self.startmem = mem.active() self.t0 = time.time() self.createDefaultStats() self.timestamp = apParam.makeTimestamp() self.argdict = {} self.optdict = {} apDisplay.printMsg("Time stamp: "+self.timestamp) self.functionname = apParam.getFunctionName(sys.argv[0]) apDisplay.printMsg("Function name: "+self.functionname) self.appiondir = apParam.getAppionDirectory() apDisplay.printMsg("Appion directory: "+self.appiondir) hostname = apParam.getHostname() apDisplay.printMsg("Processing hostname: "+hostname) self.parsePythonPath() # loadavg = os.getloadavg()[0] # if loadavg > 2.0: # apDisplay.printMsg("Load average is high "+str(round(loadavg,2))) # loadsquared = loadavg*loadavg # time.sleep(loadavg) # apDisplay.printMsg("New load average "+str(round(os.getloadavg()[0],2))) self.setLockname('lock') ### setup default parser: run directory, etc. self.setParams(optargs,useglobalparams) #if 'outdir' in self.params and self.params['outdir'] is not None: # self.params['rundir'] = self.params['outdir'] ### setup correct database after we have read the project id if 'projectid' in self.params and self.params['projectid'] is not None: apDisplay.printMsg("Using split database") # use a project database newdbname = apProject.getAppionDBFromProjectId(self.params['projectid']) sinedon.setConfig('appiondata', db=newdbname) apDisplay.printColor("Connected to database: '"+newdbname+"'", "green") ### check if user wants to print help message if 'commit' in self.params and self.params['commit'] is True: apDisplay.printMsg("Committing data to database") else: apDisplay.printWarning("Not committing data to database") self.checkConflicts() if useglobalparams is True: self.checkGlobalConflicts() ### setup run directory self.setProcessingDirName() self.setupRunDirectory() ### Start pool of threads to run subprocesses. ### Later you will use self.process_launcher.launch(...) to ### put commands into the queue. ### There is currently a timeout built into it that will cause ### the threads to die if they have no tasks after 10 seconds. self.process_launcher = apThread.ProcessLauncher(2, self.params['rundir']) ### write function log self.logfile = apParam.writeFunctionLog(sys.argv, msg=(not self.quiet)) ### upload command line parameters to database self.uploadScriptData() ### any custom init functions go here self.onInit() #===================== def argumentFromParamDest(self, dest): """ For a given optparse destination (dest, e.g., 'runname') this will determine the command line argument (e.g., -n) """ if len(self.argdict) == 0: for opt in self.parser.option_list: arg = str(opt.get_opt_string.im_self) if '/' in arg: args = arg.split('/') arg = args[-1:][0] self.argdict[opt.dest] = arg self.optdict[opt.dest] = opt if dest in self.argdict: return self.argdict[dest] return "????" #===================== def usageFromParamDest(self, dest, value): """ For a given optparse destination (dest, e.g., 'commit') and value (e.g., 'False') this will generate the command line usage (e.g., '--no-commit') """ usage = None if value is None: return None argument = self.argumentFromParamDest(dest) if not dest in self.optdict: return None optaction = self.optdict[dest].action if optaction == 'store': #opttype = self.optdict[dest].type value = str(value) if not ' ' in value: usage = argument+"="+value else: usage = argument+"='"+value+"'" elif optaction == 'store_true' or optaction == 'store_false': storage = 'store_'+str(value).lower() for opt in self.parser.option_list: if opt.dest == dest and opt.action == storage: arg = str(opt.get_opt_string.im_self) if '/' in arg: args = arg.split('/') arg = args[-1:][0] usage = arg return usage #===================== def getSessionData(self): sessiondata = None if 'sessionname' in self.params and self.params['sessionname'] is not None: sessiondata = apDatabase.getSessionDataFromSessionName(self.params['sessionname']) if not sessiondata and 'stackid' in self.params: from appionlib import apStack sessiondata = apStack.getSessionDataFromStackId(self.params['stackid']) if not sessiondata: ### works with only canonical session names s = re.search('/([0-9][0-9][a-z][a-z][a-z][0-9][0-9][^/]*)/', self.params['rundir']) if s: self.params['sessionname'] = s.groups()[0] sessiondata = apDatabase.getSessionDataFromSessionName(self.params['sessionname']) return sessiondata #===================== def getClusterJobData(self): if self.clusterjobdata is not None: return self.clusterjobdata if not 'commit' in self.params or self.params['commit'] is False: return None pathq = appiondata.ApPathData(path=os.path.abspath(self.params['rundir'])) clustq = appiondata.ApAppionJobData() clustq['path'] = pathq clustq['jobtype'] = self.functionname.lower() clustdatas = clustq.query() if not clustdatas: ### insert a cluster job clustq['name'] = self.params['runname']+".appionsub.job" clustq['clusterpath'] = pathq clustq['user'] = apParam.getUsername() clustq['cluster'] = apParam.getHostname() clustq['status'] = "R" clustq['session'] = self.getSessionData() ### need a proper way to create a jobtype clustq['jobtype']=self.params['jobtype'] if not clustq['jobtype']: clustq['jobtype'] = self.functionname.lower() clustq.insert() self.clusterjobdata = clustq return clustq elif len(clustdatas) == 1: ### we have an entry ### we need to say that we are running apWebScript.setJobToRun(clustdatas[0].dbid) self.clusterjobdata = clustdatas[0] return clustdatas[0] else: ### special case: more than one job with given path apDisplay.printWarning("More than one cluster job has this path") self.clusterjobdata = clustdatas[0] return clustdatas[0] #===================== def uploadScriptData(self): """ Using tables to track program run parameters in a generic fashion inspired by <NAME> and <NAME> from the Xmipp team/Carazo lab """ apDisplay.printMsg("Uploading ScriptData....") prognameq = appiondata.ScriptProgramName() prognameq['name'] = self.functionname userq = appiondata.ScriptUserName() userdict = apParam.getUserDict() if userdict: userq['name'] = userdict['username'] userq['uid'] = userdict['uid'] userq['gid'] = userdict['gid'] userq['fullname'] = userdict['fullname'] unixshell = userdict['unixshell'] else: userq['name'] = "unknown" unixshell = None hostq = appiondata.ScriptHostName() hostq['name'] = apParam.getHostname() hostq['ip'] = apParam.getHostIP() hostq['system'] = apParam.getSystemName() hostq['distro'] = apParam.getLinuxDistro() hostq['nproc'] = apParam.getNumProcessors() hostq['memory'] = apParam.getTotalMemory() hostq['cpu_vendor'] = apParam.getCPUVendor() hostq['gpu_vendor'] = apParam.getGPUVendor() hostq['arch'] = apParam.getMachineArch() progrunq = appiondata.ScriptProgramRun() progrunq['runname'] = self.params['runname'] progrunq['progname'] = prognameq progrunq['username'] = userq progrunq['hostname'] = hostq progrunq['unixshell'] = unixshell progrunq['rundir'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir'])) progrunq['job'] = self.getClusterJobData() appiondir = apParam.getAppionDirectory() ### get appion version/subversion revision progrunq['revision'] = None versionfile = os.path.join(appiondir, "appionlib/version.txt") if os.path.isfile(versionfile): f = open(versionfile, 'r') line = f.readline() f.close() sline = line.strip() progrunq['revision'] = sline if os.path.isdir(os.path.join(appiondir, ".svn")): if progrunq['revision'] is None: progrunq['revision'] = version.getSubversionRevision(appiondir) else: progrunq['revision'] += "-"+version.getSubversionRevision(appiondir) if not progrunq['revision']: progrunq['revision'] = 'unknown' apDisplay.printMsg("Running Appion version '%s'"%(progrunq['revision'])) progrunq['appion_path'] = appiondata.ApPathData(path=os.path.abspath(appiondir)) for paramname in self.params.keys(): paramnameq = appiondata.ScriptParamName() paramnameq['name'] = paramname paramnameq['progname'] = prognameq paramvalueq = appiondata.ScriptParamValue() paramvalueq['value'] = str(self.params[paramname]) usage = self.usageFromParamDest(paramname, self.params[paramname]) #print "usage: ", usage paramvalueq['usage'] = usage paramvalueq['paramname'] = paramnameq paramvalueq['progrun'] = progrunq if usage is not None: paramvalueq.insert() #===================== def setupRunDirectory(self): """ Set the run directory """ if self.params['rundir'] is None: apDisplay.printWarning("run directory not defined, automatically setting it") self.setProcessingDirName() self.setRunDir() if self.params['rundir'] is None: apDisplay.printError("No run directory was set") if self.quiet is False: apDisplay.printMsg("Run directory: "+self.params['rundir']) #if apDatabase.queryDirectory(self.params['rundir']): # self.preExistingDirectoryError() #create the run directory, if needed apParam.createDirectory(self.params['rundir'], warning=(not self.quiet)) os.chdir(self.params['rundir']) #===================== def __del__(self): """ This functions runs whenever the program stops, even if it crashes """ if self.successful_run is False: clustdata = self.getClusterJobData() if clustdata is None: return from appionlib import apWebScript apWebScript.setJobToError(clustdata.dbid) #===================== def close(self): ### run basic script closing functions basicScript.BasicScript.close(self) apDisplay.printMsg("Run directory:\n "+self.params['rundir']) ### additionally set to done is database if self.params['commit'] is True: clustdata = self.getClusterJobData() apWebScript.setJobToDone(clustdata.dbid) self.successful_run = True def setParams(self,optargs,useglobalparams=True): self.parser = OptionParser() if useglobalparams is True: self.setupGlobalParserOptions() self.setupParserOptions() self.params = apParam.convertParserToParams(self.parser) self.checkForDuplicateCommandLineInputs(optargs) #===================== def setupGlobalParserOptions(self): """ set the input parameters """ self.parser.add_option("-n", "--runname", dest="runname", default=self.timestamp, help="Name for processing run, e.g. --runname=run1", metavar="NAME") self.parser.add_option("-d", "--description", dest="description", help="Description of the processing run (must be in quotes)", metavar="TEXT") self.parser.add_option("-p", "--projectid", dest="projectid", type="int", help="Project id associated with processing run, e.g. --projectid=159", metavar="#") self.parser.add_option("-R", "--rundir", "--outdir", dest="rundir", help="Run path for storing output, e.g. --rundir=/data/appion/runs/run1", metavar="PATH") self.parser.add_option("-C", "--commit", dest="commit", default=True, action="store_true", help="Commit processing run to database") self.parser.add_option("--no-commit", dest="commit", default=True, action="store_false", help="Do not commit processing run to database") self.parser.add_option("--expid", "--expId", dest="expid", type="int", help="Session id associated with processing run, e.g. --expId=7159", metavar="#") self.parser.add_option("--nproc", dest="nproc", type="int", help="Number of processor to use", metavar="#") # jobtype is a dummy option for now so that it is possible to use the same command line that # is fed to runJob.py to direct command line running. Do not use the resulting param. self.parser.add_option("--jobtype", dest="jobtype", help="Job Type of processing run, e.g., partalign", metavar="X") #===================== def checkGlobalConflicts(self): """ make sure the necessary parameters are set correctly """ if self.params['runname'] is None: apDisplay.printError("enter a runname, e.g. --runname=run1") if self.params['projectid'] is None: apDisplay.printError("enter a project id, e.g. --projectid=159") if self.maxnproc is not None and self.params['nproc'] is not None: if self.params['nproc'] > self.maxnproc: apDisplay.printWarning('You have specify --nproc=%d.\n However,we know from experience larger than %d processors in this script can cause problem.\n We have therefore changed --nproc to %d for you.' % (self.params['nproc'],self.maxnproc,self.maxnproc)) self.params['nproc'] = self.maxnproc ####################################################### #### ITEMS BELOW CAN BE SPECIFIED IN A NEW PROGRAM #### ####################################################### #===================== def preExistingDirectoryError(self): apDisplay.printWarning("Run directory already exists in the database") #===================== def setupParserOptions(self): """ set the input parameters this function should be rewritten in each program """ apDisplay.printError("you did not create a 'setupParserOptions' function in your script") self.parser.set_usage("Usage: %prog --commit --description='<text>' [options]") self.parser.add_option("--stackid", dest="stackid", type="int", help="ID for particle stack (optional)", metavar="INT") #===================== def checkConflicts(self): """ make sure the necessary parameters are set correctly """ apDisplay.printError("you did not create a 'checkConflicts' function in your script") if self.params['runname'] is None: apDisplay.printError("enter an unique run name, e.g. --runname=run1") if self.params['description'] is None: apDisplay.printError("enter a description, e.g. --description='awesome data'") #===================== def setProcessingDirName(self): self.processdirname = self.functionname def getDefaultBaseAppionDir(self,sessiondata,subdirs=[]): ''' This function sets default base appiondir using leginon.cfg image path settings when rundir is not specified in the script. Such case will only occur if user construct the script his/herself, not from web. ''' path = leginon.leginonconfig.IMAGE_PATH if path: path = os.path.join(path,sessiondata['name']) else: path = os.path.abspath(sessiondata['image path']) path = re.sub("/rawdata","",path) pieces = path.split('leginon') path = 'leginon'.join(pieces[:-1]) + 'appion' + pieces[-1] for subdir in subdirs: path = os.path.join(path, subdir) return path #===================== def setRunDir(self): """ this function only runs if no rundir is defined at the command line """ if self.params['rundir'] is None: if ('sessionname' in self.params and self.params['sessionname'] is not None ): # command line users may use sessionname rather than expId sessiondata = apDatabase.getSessionDataFromSessionName(self.params['sessionname']) self.params['rundir'] = self.getDefaultBaseAppionDir(sessiondata,[self.processdirname,self.params['runname']]) else: if ('expId' in self.params and self.params['expId']): # expId should always be included from appionwrapper derived appionscript sessiondata = apDatabase.getSessionDataFromSessionId(self.params['expId']) self.params['rundir'] = self.getDefaultBaseAppionDir(sessiondata,[self.processdirname,self.params['runname']]) # The rest should not be needed with appionwrapper format from appionlib import apStack if ( self.params['rundir'] is None and 'reconid' in self.params and self.params['reconid'] is not None ): self.params['stackid'] = apStack.getStackIdFromRecon(self.params['reconid'], msg=False) if ( self.params['rundir'] is None and 'stackid' in self.params and self.params['stackid'] is not None ): #auto set the run directory stackdata = apStack.getOnlyStackData(self.params['stackid'], msg=False) path = os.path.abspath(stackdata['path']['path']) path = os.path.dirname(path) path = os.path.dirname(path) self.params['rundir'] = os.path.join(path, self.processdirname, self.params['runname']) self.params['outdir'] = self.params['rundir'] #===================== def start(self): """ this is the main component of the script where all the processing is done """ raise NotImplementedError() #===================== def onInit(self): return #===================== def onClose(self): return def runAppionScriptInIndependentThread(self,cmd): self.process_launcher.launch(cmd, shell=True) def runAppionScriptInSubprocess(self,cmd,logfilepath): # Running another AppionScript as a subprocess apDisplay.printMsg('running AppionScript:') apDisplay.printMsg('------------------------------------------------') apDisplay.printMsg(cmd) # stderr=subprocess.PIPE only works with shell=True with python 2.4. # works on python 2.6. Use shell=True now but shell=True does not # work with path changed by appionwrapper. It behaves as if the wrapper # is not used proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout_value = proc.communicate()[0] while proc.returncode is None: time.wait(60) stdout_value = proc.communicate()[0] try: logdir = os.path.dirname(logfilepath) apParam.createDirectory(logdir) file = open(logfilepath,'w') except: apDisplay.printError('Log file can not be created, process did not run.') file.write(stdout_value) file.close() if proc.returncode > 0: pieces = cmd.split(' ') apDisplay.printWarning('AppionScript %s had an error.
#!/usr/bin/python # -*- coding: UTF-8 -*- from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.models import Model from tensorflow.keras.preprocessing import image from tensorflow.keras.preprocessing import sequence from deprecated import deprecated import os import numpy as np from tqdm import tqdm from PIL import Image import pickle import pandas as pd import re import string from collections import Counter from lib.utils_xrh import * class BatchDataGenerator: """ 数据批量生成器 当数据量过大时, 受限于内存空间, 不能每次都将全部数据喂给模型, 而是分批输入 Author: xrh Date: 2021-9-25 """ def __init__(self, dataset_dir='cache_data/train_dataset.json'): self.dataset_dir = dataset_dir def read_all(self, n_a, n_vocab, m, batch_size=32, dataset=None): """ 从磁盘中读取整个数据集(json)到内存, 每次随机采样一批数据, 喂入模型进行训练 :param n_a: :param n_vocab: :param m: 数据集的样本总数 :param batch_size: :param dataset: 以 DataFrame 存储的数据集 :return: """ # 只执行一次 if dataset is None: dataset = pd.read_json(self.dataset_dir) image_feature = np.array(dataset['image_feature'].tolist()) caption_encoding = np.array(dataset['caption_encoding'].tolist()) while True: # 每次调用 next() 执行下面的语句 mask = np.random.choice(m, batch_size) # 从 range(m) 中随机采样batch_size 组成list, N - 样本总数 batch_image_feature = image_feature[mask] batch_caption_encoding = caption_encoding[mask] m_batch = np.shape(batch_caption_encoding)[0] # 一个批次的样本的数量 c0 = np.zeros((m_batch, n_a)) # 语言模型的输入 和 输出要错开一个时刻, # eg. # output: 今天 /是 /个/好日子/<end> # input: <start>/今天/是/个 /好日子/ caption_out = batch_caption_encoding[:, 1:] # shape(N,39) caption_in = batch_caption_encoding[:, :-1] # shape(N,39) outputs = ArrayUtils.one_hot_array(caption_out, n_vocab) yield ((caption_in, batch_image_feature, c0), outputs) # 必须是 tuple 否则 ValueError: No gradients provided for any variable (Keras 2.4, Tensorflow 2.3.0) @deprecated() def read_by_chunk(self, image_feature_dir,caption_encoding_dir,n_a, n_vocab, m, batch_size=32): """ 读取预处理后的数据集(csv)时, 使用分批次的方式读入内存 :param n_a: :param n_vocab: :param m: 数据集的样本总数 :param batch_size: :return: """ # 只执行一次 image_feature = pd.read_csv(image_feature_dir, header=None, iterator=True) # csv 是如此之大, 无法一次读入内存 caption_encoding = pd.read_csv(caption_encoding_dir, header=None, iterator=True) steps_per_epoch = m // batch_size # 每一个 epoch 要生成的多少批数据 # N - 样本总数 count = 0 while True: # 每次调用 next() 执行下面的语句 batch_image_feature = image_feature.get_chunk(batch_size).iloc[:, 1:] # 排除第一列(索引列) batch_caption_encoding = caption_encoding.get_chunk(batch_size).iloc[:, 1:] batch_image_feature = batch_image_feature.to_numpy() batch_caption_encoding = batch_caption_encoding.to_numpy() N_batch = np.shape(batch_caption_encoding)[0] # 一个批次的样本的数量 c0 = np.zeros((N_batch, n_a)) # 语言模型的输入 和 输出要错开一个时刻, # eg. # output: 今天 /是 /个/好日子/<end> # input: <start>/今天/是/个 /好日子/ caption_out = batch_caption_encoding[:, 1:] # shape(N,39) caption_in = batch_caption_encoding[:, :-1] # shape(N,39) outputs = ArrayUtils.one_hot_array(caption_out, n_vocab) yield ((caption_in, batch_image_feature, c0), outputs) # 必须是 tuple 否则 ValueError: No gradients provided for any variable (Keras 2.4, Tensorflow 2.3.0) count += 1 if count > steps_per_epoch: # 所有批次已经走了一遍 image_feature = pd.read_csv(image_feature_dir, header=None, iterator=True) caption_encoding = pd.read_csv(caption_encoding_dir, header=None, iterator=True) count = 0 class DataPreprocess: """ 数据集预处理 主流程见 do_main() Author: xrh Date: 2021-9-25 """ def __init__(self, caption_file_dir='dataset/Flicker8k/Flickr8k.token.txt', image_folder_dir='dataset/Flicker8k/Flicker8k_Dataset/', dataset_dir='cache_data/train_dataset.json', image_caption_dict_dir='cache_data/image_caption_dict.bin', _null_str='<NULL>', _start_str='<START>', _end_str='<END>', _unk_str='<UNK>', ): """ :param caption_file_dir: 图片描述文本的路径 :param image_folder_dir: 图片文件夹的路径 image_path = image_folder_dir + image_name :param image_feature_dir: :param caption_encoding_dir: :param _null_str: 空字符 :param _start_str: 句子的开始字符 :param _end_str: 句子的结束字符 :param _unk_str: 未登录字符 """ self.caption_file_dir = caption_file_dir self.image_folder_dir = image_folder_dir self.dataset_dir = dataset_dir self.image_caption_dict_dir = image_caption_dict_dir self._null_str = _null_str self._start_str = _start_str self._end_str = _end_str self._unk_str = _unk_str # 需要删除的标点符号 remove_chars = string.punctuation # !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ # remove_chars = remove_chars.replace(".", "") # 不删除 句号. 和 逗号, # remove_chars = remove_chars.replace(",", "") self.remove_chars_re = re.compile('[%s]' % re.escape(remove_chars)) def load_captions_data(self, clean_punctuation=True): """ 读取 图片描述文本, 并将它们和对应的图片进行映射 1.图片描述文本 可以选择是否清除其中的标点符号 :param clean_punctuation: 是否清除文本中的标点符号 :return: caption_mapping: 字典, key 为图片的路径, value 为图片描述的文本列表 text_data: 所有图片描述的文本 """ with open(self.caption_file_dir) as caption_file: caption_data = caption_file.readlines() caption_mapping = {} text_data = [] for line in caption_data: line = line.rstrip("\n") # Image name and captions are separated using a tab img_name, caption = line.split("\t") # Each image is repeated five times for the five different captions. Each # image name has a prefix `#(caption_number)` img_name = img_name.split("#")[0] img_name = os.path.join(self.image_folder_dir, img_name.strip()) if img_name.endswith("jpg"): # 清除句子前后的空格 caption = caption.strip() if clean_punctuation: # 清除句子中的标点符号 caption = self.remove_chars_re.sub(' ', caption) # We will add a start and an end token to each caption caption = self._start_str + " " + caption + " " + self._end_str text_data.append(caption) if img_name in caption_mapping: caption_mapping[img_name].append(caption) else: caption_mapping[img_name] = [caption] return caption_mapping, text_data def train_val_split(self, caption_dict, train_size=0.8, shuffle=True): """ 将数据集划分为 训练数据集 和 验证数据集(测试数据) :param caption_dict: 字典, key 为图片的名字, value 为图片描述的文本列表 :param train_size: 训练数据的比例 :param shuffle: 是否混洗 :return: train_caption_dict : 字典, key 为图片的路径, value 为描述图片的文本列表 validation_caption_dict : 字典, key 为图片的路径 value 为描述图片的文本列表 """ # 1. Get the list of all image names all_images = list(caption_dict.keys()) # 2. Shuffle if necessary if shuffle: np.random.shuffle(all_images) # 3. Split into training and validation sets train_size = int(len(caption_dict) * train_size) train_caption_dict = { img_name: caption_dict[img_name] for img_name in all_images[:train_size] } validation_caption_dict = { img_name: caption_dict[img_name] for img_name in all_images[train_size:] } # 4. Return the splits return train_caption_dict, validation_caption_dict def zip_image_encoding_and_caption(self, caption_dict, image_encoding_dict, vocab_obj, max_sentence_length=40, do_persist=True): """ 1.一张图片对应多段描述, 因此需要组合 编码后的图片 和 图片的描述, 作为训练数据集 2.对图片描述的末尾 做 <NULL> 元素的填充, 直到该句子满足目标长度 :param caption_dict: 字典, key 为图片的路径, value 为图片描述的文本列表 :param image_encoding_dict: 字典, key 为图片的名字, value 为 编码后的图片向量 :param vocab_obj: 词典对象 :param max_sentence_length: 图片描述句子的目标长度 :param do_persist: 是否将结果持久化到磁盘 :return: image_feature_list shape:(m, n_image_feature) caption_encoding_list shape:(m, max_sentence_length) """ image_dir_list = [] image_feature_list = [] caption_list = [] caption_encoding_list = [] for k, v_list in caption_dict.items(): image_dir = k image_name = image_dir[len(self.image_folder_dir):] image_feature = image_encoding_dict[image_name] for caption in v_list: caption_encoding = [vocab_obj.map_word_to_id(ele) for ele in caption.split()] image_dir_list.append(image_dir) image_feature_list.append(image_feature) caption_list.append(caption) caption_encoding_list.append(caption_encoding) # 对不够长的序列进行填充 caption_encoding_list = list( sequence.pad_sequences(caption_encoding_list, maxlen=max_sentence_length, padding='post', value=vocab_obj.map_word_to_id(self._null_str))) dataset = pd.DataFrame({'image_dir': image_dir_list, 'image_feature': image_feature_list, 'caption': caption_list, 'caption_encoding': caption_encoding_list}) if do_persist: # 以 json 持久化到磁盘 dataset.to_json(self.dataset_dir) return dataset def load_dataset(self): """ 读取 训练数据集 :return: """ dataset = pd.read_json(self.dataset_dir) return dataset def build_image_caption_dict(self, caption_dict, image_encoding_dict, do_persist=True): """ 1.一张图片对应多段描述, 因此需要组合 图片路径, 图片向量 和 图片的描述, 返回组合后的字典 :param caption_dict: 字典, key 为图片的路径, value 为图片描述的文本列表 :param image_encoding_dict: 字典, key 为图片的名字, value 为 编码后的图片向量 :param do_persist: 将结果持久化到磁盘 :return: image_caption_dict = { '.../.../XXX.jpg' : { 'feature': 编码后的图片向量 'caption': 图片描述的文本列表 } } """ image_caption_dict = {} for k, v_list in caption_dict.items(): image_dir = k image_name = k[len(self.image_folder_dir):] image_feature = image_encoding_dict[image_name] image_caption_dict[image_dir] = {'feature': image_feature, 'caption': v_list} if do_persist: save_dict = {} save_dict['image_caption_dict'] = image_caption_dict with open(self.image_caption_dict_dir, 'wb') as f: pickle.dump(save_dict, f) return image_caption_dict def load_image_caption_dict(self): """ 读取 image_caption_dict :return: image_caption_dict = { '.../.../XXX.jpg' : { 'feature': 编码后的图片向量 'caption': 图片描述的文本列表 } } """ with open(self.image_caption_dict_dir, 'rb') as f: save_dict = pickle.load(f) image_caption_dict = save_dict['image_caption_dict'] return image_caption_dict def do_mian(self, max_caption_length, freq_threshold): """ 数据集预处理的主流程 :return: """ np.random.seed(1) # 设置随机数种子 print("max_caption_length:{}, freq_threshold:{}".format(max_caption_length, freq_threshold)) caption_mapping, text_data = self.load_captions_data() train_caption_dict, valid_caption_dict = self.train_val_split(caption_mapping, shuffle=True) print('build the vocab...') vocab_obj = BuildVocab(load_vocab_dict=False, freq_threshold=freq_threshold, text_data=text_data) max_sentence_length = vocab_obj.get_max_sentence_length(text_data) print('max_sentence_length: {}'.format(max_sentence_length)) print('embedding the picture...') # image_emb_obj = EmbeddingImage(use_pretrain=True) # train_image_path_list = list(train_caption_dict.keys()) # train_encoding_dict = image_emb_obj.process_encode_image(train_image_path_list, # 'cache_data/encoded_images_train_inceptionV3.p') # # valid_image_path_list = list(valid_caption_dict.keys()) # valid_encoding_dict = image_emb_obj.process_encode_image(valid_image_path_list, # 'cache_data/encoded_images_valid_inceptionV3.p') # TODO: 考虑前面对数据集进行了 shuffle image_emb_obj = EmbeddingImage(use_pretrain=False) train_encoding_dict = image_emb_obj.load_encode_image_vector('cache_data/encoded_images_train_inceptionV3.p') valid_encoding_dict = image_emb_obj.load_encode_image_vector('cache_data/encoded_images_valid_inceptionV3.p') print('building the train dataset...') self.zip_image_encoding_and_caption(train_caption_dict, train_encoding_dict, vocab_obj, max_sentence_length=max_caption_length) print('building the valid(test) dict...') self.build_image_caption_dict(valid_caption_dict, valid_encoding_dict) # print('building the train dict...') # self.build_image_caption_dict(train_caption_dict, train_encoding_dict) class BuildVocab: """ 根据数据集建立词典 1. 控制词的标号 '<NULL>' 的标号为 0, '<START>' 的标号为 1, '<END>' 的标号为 2, '<UNK>' 的标号为 3, '<UNK>' 必须与 填充的'<NULL>'做区分 2.标点符号不记录中字典 3.在语料库中出现次数大于 freq_threshold 次的词才计入词典中 Author: xrh Date: 2021-9-25 """ def __init__(self, _null_str='<NULL>', _start_str='<START>', _end_str='<END>', _unk_str='<UNK>', vocab_path='cache_data/vocab.bin', load_vocab_dict=True, freq_threshold=0, text_data=None): """ :param _null_str: 空字符 :param _start_str: 句子的开始字符 :param _end_str: 句子的结束字符 :param _unk_str: 未登录字符 :param vocab_path: 词典路径 :param load_vocab_dict: 是否读取现有的词典 :param freq_threshold : 单词出现次数的下限, 若单词出现的次数小于此值, 则不计入字典中 :param text_data: 数据集中的所有句子的列表 """ self._null_str = _null_str self._start_str = _start_str self._end_str = _end_str self._unk_str = _unk_str self.vocab_path = vocab_path self.freq_threshold = freq_threshold if load_vocab_dict: # 读取现有的词典 self.word_to_id, self.id_to_word = self.__load_vocab() else: # 生成新的词典 # 需要删除的标点符号 remove_chars = string.punctuation # !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~ remove_chars = remove_chars.replace("<", "") # 不能删除 '<' , 因为'<START>'中也有'<' remove_chars = remove_chars.replace(">", "") # remove_chars = remove_chars.replace(".", "") # 不删除 句号. 和 逗号, # remove_chars = remove_chars.replace(",", "") self.remove_chars_re = re.compile('[%s]' % re.escape(remove_chars)) # 需要删除的控制词 self.remove_word_re = re.compile( r'{}|{}|{}'.format(self._null_str, self._start_str, self._end_str, self._unk_str)) self.word_to_id, self.id_to_word = self.__build_vocab(text_data) def map_id_to_word(self, id): """ 输入单词标号, 返回单词 1.若单词标号未在 逆词典中, 返回 '<UNK>' :param id: :return: """ if id not in self.id_to_word: return self._unk_str else: return self.id_to_word[id] def map_word_to_id(self, word): """ 输入单词, 返回单词标号 考虑未登录词: 1.若输入的单词不在词典中, 返回 '<UNK>' 的标号 :param word: 单词 :return: """ if word not in self.word_to_id: return self.word_to_id[self._unk_str] else: return self.word_to_id[word] def get_max_sentence_length(self, text_data): """ 数据集中最长序列的长度 :param text_data: 数据集中的所有句子的列表 :return: """ max_caption_length = 0 for caption in text_data: capation_length = len(caption.split()) if capation_length > max_caption_length: max_caption_length = capation_length return max_caption_length def __build_vocab(self, text_data): """ 制作词典 1.配置 '<NULL>' 的标号为 0, '<START>' 的标号为 1, '<END>' 的标号为 2 2.标点符号不记录字典 3.在语料库中出现次数大于 5次的词才计入词典中 :param text_data: 数据集中的所有句子的列表
<filename>xdwlib/basedocument.py #!/usr/bin/env python3 # vim: set fileencoding=utf-8 fileformat=unix expandtab : """basedocument.py -- BaseDocument, base class for Document/DocumentInBinder Copyright (C) 2010 <NAME> <<EMAIL>> All rights reserved. This software is subject to the provisions of the Zope Public License, Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. """ import sys import os from io import StringIO from .xdwapi import * from .common import * from .xdwtemp import XDWTemp from .observer import * from .struct import Point from .xdwfile import xdwopen from .page import Page, PageCollection __all__ = ("BaseDocument",) class BaseDocument(Subject): """DocuWorks document base class. This class is a base class, which is expected to be inherited by Document or DocumentInBinder class. Each BaseDocument instance has an observer dict. This dict holds (page_number, Page_object) pairs, and is used to notify page insertion or deletion. Receiving this notification, every Page object should adjust its memorized page number. """ def _pos(self, pos, append=False): append = 1 if append else 0 if not (-self.pages <= pos < self.pages + append): raise IndexError( "Page number must be in [{0}, {1}), {2} given".format( -self.pages, self.pages + append, pos)) if pos < 0: pos += self.pages return pos def _slice(self, pos): if pos.step == 0 and pos.start != pos.stop: raise ValueError("slice.step must not be 0") return slice( self._pos(pos.start or 0), self.pages if pos.stop is None else pos.stop, 1 if pos.step is None else pos.step) def __init__(self): Subject.__init__(self) def __repr__(self): # abstract raise NotImplementedError() def __str__(self): # abstract raise NotImplementedError() def __len__(self): return self.pages def __getitem__(self, pos): if isinstance(pos, slice): pos = self._slice(pos) return PageCollection(self.page(p) for p in range(pos.start, pos.stop, pos.step)) return self.page(pos) def __setitem__(self, pos, val): raise NotImplementedError() def __delitem__(self, pos): if isinstance(pos, slice): pos = self._slice(pos) deleted = 0 for p in range(pos.start, pos.stop, pos.step): self.delete(p - deleted) deleted += 1 else: self.delete(pos) def __iter__(self): for pos in range(self.pages): yield self.page(pos) def absolute_page(self, pos, append=False): """Abstract method to get absolute page number in binder/document.""" raise NotImplementedError() def update_pages(self): """Abstract method to update number of pages.""" raise NotImplementedError() def page(self, pos): """Get a Page. pos (int) page number; starts with 0 Returns a Page object. """ pos = self._pos(pos) if pos not in self.observers: self.observers[pos] = Page(self, pos) return self.observers[pos] def append(self, obj): """Append a Page/PageCollection/Document at the end of document. obj (Page, PageCollection or Document) """ self.insert(self.pages, obj) def insert(self, pos, obj): """Insert a Page/PageCollection/Document. pos (int) position to insert; starts with 0 obj (Page, PageCollection, BaseDocument or str) """ pos = self._pos(pos, append=True) if isinstance(obj, Page): temp = XDWTemp() obj.export(temp.path) elif isinstance(obj, PageCollection): temp = XDWTemp() obj.export(temp.path, flat=True) elif isinstance(obj, BaseDocument): temp = XDWTemp() pc = PageCollection(obj) pc.export(temp.path, flat=True) elif isinstance(obj, str): # XDW path temp = obj if not temp.lower().endswith(".xdw"): raise TypeError("binder is not acceptable") else: raise ValueError(f"can't insert {obj.__class__} object") if XDWVER < 8: XDW_InsertDocument( self.handle, self.absolute_page(pos, append=True) + 1, cp(temp if isinstance(temp, str) else temp.path)) else: XDW_InsertDocumentW( self.handle, self.absolute_page(pos, append=True) + 1, temp if isinstance(temp, str) else temp.path) inslen = XDW_GetDocumentInformation(self.handle).nPages - self.pages self.pages += inslen if not isinstance(obj, str): temp.close() # Check inserted pages in order to attach them to this document and # shift observer entries appropriately. for p in range(pos, pos + inslen): Page(self, p) def append_image(self, *args, **kw): """Append a page created from image file(s). See insert_image() for description on arguments. """ self.insert_image(self.pages, *args, **kw) def insert_image(self, pos, input_path, fitimage="FITDEF", compress="NORMAL", zoom=0, # %; 0=100% size=Point(0, 0), # Point(width, height); 0=A4R align=("CENTER", "CENTER"), # LEFT/CENTER/RIGHT, TOP/CENTER/BOTTOM maxpapersize="DEFAULT", ): """Insert a page created from image file(s). fitimage 'FITDEF' | 'FIT' | 'FITDEF_DIVIDEBMP' | 'USERDEF' | 'USERDEF_FIT' compress 'NORMAL' | 'LOSSLESS' | 'HIGHQUALITY' | 'HIGHCOMPRESS' | 'MRC_NORMAL' | 'MRC_HIGHQUALITY' | 'MRC_HIGHCOMPRESS' zoom (float) in percent; 0 means 100%. < 1/1000 is ignored. size (Point) in mm; for fitimange 'userdef' or 'userdef_fit' (int) 1=A3R, 2=A3, 3=A4R, 4=A4, 5=A5R, 6=A5, 7=B4R, 8=B4, 9=B5R, 10=B5 (str) 'A3R' | 'A3' | 'A4R' | 'A4' | 'A5R' | 'A5' | 'B4R' | 'B4' | 'B5R' | 'B5' align (horiz, vert) where: horiz 'CENTER' | 'LEFT' | 'RIGHT' vert 'CENTER' | 'TOP' | 'BOTTOM' maxpapersize 'DEFAULT' | 'A3' | '2A0' """ prev_pages = self.pages pos = self._pos(pos, append=True) opt = XDW_CREATE_OPTION_EX2() opt.nFitImage = XDW_CREATE_FITIMAGE.normalize(fitimage) opt.nCompress = XDW_COMPRESS.normalize(compress) if opt.nCompress in ( XDW_COMPRESS_NOCOMPRESS, XDW_COMPRESS_JPEG, XDW_COMPRESS_PACKBITS, XDW_COMPRESS_G4, XDW_COMPRESS_MRC, XDW_COMPRESS_JPEG_TTN2, ): raise ValueError("invalid compression method `{0}'".format( XDW_COMPRESS[opt.nCompress])) #opt.nZoom = 0 opt.nZoomDetail = int(zoom * 1000) # .3f # NB. Width and height are valid only for XDW_CREATE_USERDEF(_FIT). if isinstance(size, (int, float, str)): size = Point(*XDW_SIZE_MM[XDW_SIZE.normalize(size)]) opt.nWidth, opt.nHeight = list(map(int, size * 100)) # .2f; opt.nHorPos = XDW_CREATE_HPOS.normalize(align[0]) opt.nVerPos = XDW_CREATE_VPOS.normalize(align[1]) opt.nMaxPaperSize = XDW_CREATE_MAXPAPERSIZE.normalize(maxpapersize) if XDWVER < 8: XDW_CreateXdwFromImageFileAndInsertDocument( self.handle, self.absolute_page(pos, append=True) + 1, cp(input_path), opt) else: XDW_CreateXdwFromImageFileAndInsertDocumentW( self.handle, self.absolute_page(pos, append=True) + 1, input_path, opt) self.update_pages() # Check inserted pages in order to attach them to this document and # shift observer entries appropriately. for p in range(pos, pos + (self.pages - prev_pages)): Page(self, p) def export(self, pos, path=None): """Export page to another document. pos (int) page number; starts with 0 path (str) export to {path}; with no dir, export to {document/binder dir}/{path} (None) export to {document/binder dir}/{document name}_P{num}.xdw Returns the exported pathname which may differ from path. """ path = newpath(path or f"{self.name}_P{pos + 1}.xdw", dir=self.dirname()) if XDWVER < 8: XDW_GetPage(self.handle, self.absolute_page(pos) + 1, cp(path)) else: XDW_GetPageW(self.handle, self.absolute_page(pos) + 1, path) return path def export_image(self, pos, path=None, pages=1, dpi=600, color="COLOR", format=None, compress="NORMAL", direct=False): """Export page(s) to image file. pos (int or tuple (start stop) in half-open style like slice) path (str) export to {path}; with no dir, export to {document/binder dir}/{path} (None) export to {document/binder dir}/{document name}_P{num}.bmp pages (int) dpi (int) 10..600 color 'COLOR' | 'MONO' | 'MONO_HIGHQUALITY' format 'BMP' | 'TIFF' | 'JPEG' | 'PDF' compress for BMP, not available for TIFF, 'NOCOMPRESS' | 'PACKBITS' | 'JPEG | 'JPEG_TTN2' | 'G4' for JPEG, 'NORMAL' | 'HIGHQUALITY' | 'HIGHCOMPRESS' for PDF, 'NORMAL' | 'HIGHQUALITY' | 'HIGHCOMPRESS' | 'MRC_NORMAL' | 'MRC_HIGHQUALITY' | 'MRC_HIGHCOMPRESS' direct (bool) export internal compressed image data directly. If True: - pos must be int; pages, dpi, color, format and compress are ignored. - Exported image format is recognized with the extension of returned pathname, which is either 'tiff', 'jpeg' or 'pdf'. - Annotations and page forms are not included in the exported image. Image orientation depends on the internal state, so check 'degree' attribute of the page if needed. Returns the exported pathname which may differ from path. """ if direct: return self._export_direct_image(pos, path) if isinstance(pos, (list, tuple)): pos, pages = pos pages -= pos pos = self._pos(pos) if not format: ext = os.path.splitext(path or "_.bmp")[1].lstrip(".").lower() format = {"dib": "bmp", "tif": "tiff", "jpg": "jpeg"}.get(ext, ext) if format.lower() not in ("bmp", "tiff", "jpeg", "pdf"): raise TypeError("image type must be BMP, TIFF, JPEG or PDF.") path = newpath(path or ( f"{self.name}_P{pos + 1}.{format}" if pages == 1 else f"{self.name}_P{pos + 1}-{pos + pages}.{format}"), dir=self.dirname()) if not (10 <= dpi <= 600): raise ValueError("specify resolution between 10 and 600") opt = XDW_IMAGE_OPTION_EX() opt.nDpi = int(dpi) opt.nColor = XDW_IMAGE_COLORSCHEME.normalize(color) opt.nImageType = XDW_IMAGE_FORMAT.normalize(format) if opt.nImageType == XDW_IMAGE_DIB: opt.pDetailOption = NULL elif opt.nImageType == XDW_IMAGE_TIFF: dopt = XDW_IMAGE_OPTION_TIFF() dopt.nCompress = XDW_COMPRESS.normalize(compress) if dopt.nCompress not in ( XDW_COMPRESS_NOCOMPRESS, XDW_COMPRESS_PACKBITS, XDW_COMPRESS_JPEG, XDW_COMPRESS_JPEG_TTN2, XDW_COMPRESS_G4, ): dopt.nCompress = XDW_COMPRESS_NOCOMPRESS dopt.nEndOfMultiPages = (pos + pages - 1) + 1 opt.pDetailOption = cast(pointer(dopt), c_void_p) elif opt.nImageType == XDW_IMAGE_JPEG: dopt = XDW_IMAGE_OPTION_JPEG() dopt.nCompress = XDW_COMPRESS.normalize(compress) if dopt.nCompress not in ( XDW_COMPRESS_NORMAL, XDW_COMPRESS_HIGHQUALITY, XDW_COMPRESS_HIGHCOMPRESS, ): dopt.nCompress = XDW_COMPRESS_NORMAL opt.pDetailOption = cast(pointer(dopt), c_void_p) elif opt.nImageType == XDW_IMAGE_PDF: dopt = XDW_IMAGE_OPTION_PDF() dopt.nCompress = XDW_COMPRESS.normalize(compress) if dopt.nCompress not in ( XDW_COMPRESS_NORMAL, XDW_COMPRESS_HIGHQUALITY, XDW_COMPRESS_HIGHCOMPRESS, XDW_COMPRESS_MRC_NORMAL, XDW_COMPRESS_MRC_HIGHQUALITY, XDW_COMPRESS_MRC_HIGHCOMPRESS, ): dopt.nCompress = XDW_COMPRESS_MRC_NORMAL dopt.nEndOfMultiPages = (pos + pages - 1) + 1 # Compression method option is deprecated. dopt.nConvertMethod = XDW_CONVERT_MRC_OS opt.pDetailOption
column if json_file is not None: # load file and with open(json_file,'r+') as f: json_map = json.load(f) # if no JSON mapping file was specified then create a default one for variable-term mappings # create a json_file filename from the output file filename if output_file is None: output_file = os.path.join(directory, "nidm_pde_terms.json") # remove ".ttl" extension # else: # output_file = os.path.join(os.path.dirname(output_file), os.path.splitext(os.path.basename(output_file))[0] # + ".json") # initialize InterLex connection try: ilx_obj = InitializeInterlexRemote() except Exception as e: print("ERROR: initializing InterLex connection...") print("You will not be able to add new personal data elements.") ilx_obj=None # load NIDM OWL files if user requested it if owl_file=='nidm': try: nidm_owl_graph = load_nidm_owl_files() except Exception as e: print() print("ERROR: initializing internet connection to NIDM OWL files...") print("You will not be able to select terms from NIDM OWL files.") nidm_owl_graph = None # else load user-supplied owl file elif owl_file is not None: nidm_owl_graph = Graph() nidm_owl_graph.parse(location=owl_file) # iterate over columns for column in df.columns: # search term for elastic search search_term=str(column) # loop variable for terms markup go_loop=True # set up a dictionary entry for this column current_tuple = str(DD(source=assessment_name, variable=column)) column_to_terms[current_tuple] = {} # if we loaded a json file with existing mappings try: json_map # check for column in json file json_key = [key for key in json_map if column in key] if (json_map is not None) and (len(json_key)>0): column_to_terms[current_tuple]['label'] = json_map[json_key[0]]['label'] column_to_terms[current_tuple]['definition'] = json_map[json_key[0]]['definition'] column_to_terms[current_tuple]['url'] = json_map[json_key[0]]['url'] # column_to_terms[current_tuple]['variable'] = json_map[json_key[0]]['variable'] print("Column %s already mapped to terms in user supplied JSON mapping file" %column) print("Label: %s" %column_to_terms[current_tuple]['label']) print("Definition: %s" %column_to_terms[current_tuple]['definition']) print("Url: %s" %column_to_terms[current_tuple]['url']) # print("Variable: %s" %column_to_terms[current_tuple]['variable']) if 'description' in json_map[json_key[0]]: column_to_terms[current_tuple]['description'] = json_map[json_key[0]]['description'] print("Description: %s" %column_to_terms[current_tuple]['description']) if 'levels' in json_map[json_key[0]]: column_to_terms[current_tuple]['levels'] = json_map[json_key[0]]['levels'] print("Levels: %s" %column_to_terms[current_tuple]['levels']) print("---------------------------------------------------------------------------------------") continue except NameError: print("json mapping file not supplied") # flag for whether to use ancestors in Interlex query or not ancestor = True #Before we run anything here if both InterLex and NIDM OWL file access is down we should just alert #the user and return cause we're not going to be able to do really anything if (nidm_owl_graph is None) and (ilx_obj is None): print("Both InterLex and NIDM OWL file access is not possible") print("Check your internet connection and try again or supply a JSON mapping file with all the variables " "mapped to terms") return column_to_terms #added for an automatic mapping of participant_id, subject_id, and variants if ( ("participant_id" in search_term.lower()) or ("subject_id" in search_term.lower()) or (("participant" in search_term.lower()) and ("id" in search_term.lower())) or (("subject" in search_term.lower()) and ("id" in search_term.lower())) ): # map this term to Constants.NIDM_SUBJECTID # since our subject ids are statically mapped to the Constants.NIDM_SUBJECTID we're creating a new # named tuple for this json map entry as it's not the same source as the rest of the data frame which # comes from the 'assessment_name' function parameter. subjid_tuple = str(DD(source='ndar', variable=search_term)) column_to_terms[subjid_tuple] = {} column_to_terms[subjid_tuple]['label'] = search_term column_to_terms[subjid_tuple]['definition'] = "subject/participant identifier" column_to_terms[subjid_tuple]['url'] = Constants.NIDM_SUBJECTID.uri # column_to_terms[subjid_tuple]['variable'] = str(column) # delete temporary current_tuple key for this variable as it has been statically mapped to NIDM_SUBJECT del column_to_terms[current_tuple] print("Variable %s automatically mapped to participant/subject idenfier" %search_term) print("Label: %s" %column_to_terms[subjid_tuple]['label']) print("Definition: %s" %column_to_terms[subjid_tuple]['definition']) print("Url: %s" %column_to_terms[subjid_tuple]['url']) print("---------------------------------------------------------------------------------------") # don't need to continue while loop because we've defined a term for this CSV column go_loop=False continue # loop to find a term definition by iteratively searching InterLex...or defining your own while go_loop: # variable for numbering options returned from elastic search option = 1 print() print("Query String: %s " %search_term) if ilx_obj is not None: # for each column name, query Interlex for possible matches search_result = GetNIDMTermsFromSciCrunch(search_term, type='fde', ancestor=ancestor) temp = search_result.copy() #print("Search Term: %s" %search_term) if len(temp)!=0: print("InterLex Terms (FDEs):") #print("Search Results: ") for key, value in temp.items(): print("%d: Label: %s \t Definition: %s \t Preferred URL: %s " %(option,search_result[key]['label'],search_result[key]['definition'],search_result[key]['preferred_url'] )) search_result[str(option)] = key option = option+1 # for each column name, query Interlex for possible matches cde_result = GetNIDMTermsFromSciCrunch( search_term, type='cde', ancestor=ancestor) if len(cde_result) != 0: #only update search_result with new terms. This handles what I consider a bug in InterLex queries #where FDE and CDE queries return the same terms. search_result.update(cde_result) #temp = search_result.copy() temp = cde_result.copy() if len(temp)!=0: print() print("InterLex Terms (CDEs):") #print("Search Results: ") for key, value in temp.items(): print("%d: Label: %s \t Definition: %s \t Preferred URL: %s " %(option,search_result[key]['label'],search_result[key]['definition'],search_result[key]['preferred_url'] )) search_result[str(option)] = key option = option+1 # for each column name, query Interlex for possible matches pde_result = GetNIDMTermsFromSciCrunch(search_term, type='pde', ancestor=ancestor) if len(pde_result) != 0: search_result.update(pde_result) #temp = search_result.copy() temp = pde_result.copy() if len(temp)!=0: print() print("InterLex Terms (PDEs):") #print("Search Results: ") for key, value in temp.items(): print("%d: Label: %s \t Definition: %s \t Preferred URL: %s " %(option,search_result[key]['label'],search_result[key]['definition'],search_result[key]['preferred_url'] )) search_result[str(option)] = key option = option+1 # if user supplied an OWL file to search in for terms #if owl_file: if nidm_owl_graph is not None: # Add existing NIDM Terms as possible selections which fuzzy match the search_term nidm_constants_query = fuzzy_match_terms_from_graph(nidm_owl_graph, search_term) first_nidm_term=True for key, subdict in nidm_constants_query.items(): if nidm_constants_query[key]['score'] > min_match_score: if first_nidm_term: print() print("NIDM Terms:") first_nidm_term=False print("%d: Label(NIDM Term): %s \t Definition: %s \t URL: %s" %(option, nidm_constants_query[key]['label'], nidm_constants_query[key]['definition'], nidm_constants_query[key]['url'])) search_result[key] = {} search_result[key]['label']=nidm_constants_query[key]['label'] search_result[key]['definition']=nidm_constants_query[key]['definition'] search_result[key]['preferred_url']=nidm_constants_query[key]['url'] search_result[str(option)] = key option=option+1 if ancestor: # Broaden Interlex search print("%d: Broaden Interlex query " %option) else: # Narrow Interlex search print("%d: Narrow Interlex query " %option) option = option+1 # Add option to change query string print("%d: Change Interlex query string from: \"%s\"" % (option, search_term)) # Add option to define your own term option = option + 1 print("%d: Define my own term for this variable" % option) print("---------------------------------------------------------------------------------------") # Wait for user input selection=input("Please select an option (1:%d) from above: \t" % option) # Make sure user selected one of the options. If not present user with selection input again while (not selection.isdigit()) or (int(selection) > int(option)): # Wait for user input selection = input("Please select an option (1:%d) from above: \t" % option) # toggle use of ancestors in interlex query or not if int(selection) == (option-2): ancestor=not ancestor # check if selection is to re-run query with new search term elif int(selection) == (option-1): # ask user for new search string search_term = input("Please input new search term for CSV column: %s \t:" % column) print("---------------------------------------------------------------------------------------") elif int(selection) == option: # user wants to define their own term. Ask for term label and definition print("\nYou selected to enter a new term for CSV column: %s" % column) # collect term information from user term_label = input("Please enter a term label for this column [%s]:\t" % column) if term_label == '': term_label = column # WIP do a quick query of Interlex to see if term already exists with that label. If so show user # If user says it's the correct term then use it and stop dialog with user about new term term_definition = input("Please enter a definition:\t") #get datatype while True: term_datatype = input("Please enter the datatype (string,integer,real,categorical):\t") # check datatypes if not in [integer,real,categorical] repeat until it is if (term_datatype == "string") or (term_datatype == "integer") or (term_datatype == "real") or (term_datatype == "categorical"): break # now check if term_datatype is categorical and if so let's get the label <-> value mappings if term_datatype == "categorical": term_category = {} # ask user for the number of categories while True: num_categories = input("Please enter the number of categories/labels for this term:\t") #check if user supplied a number else repeat question try: val = int(num_categories) break except ValueError: print("That's not a number, please try again!") # loop over number of categories and collect information for category in range(1, int(num_categories)+1): # term category dictionary has labels as keys and value associated with label as value cat_label = input("Please enter the text
<reponame>perlmutter/tomopyui import numpy as np import copy import pathlib from ipywidgets import * from tomopyui._sharedvars import * from abc import ABC, abstractmethod from tomopyui.widgets.view import ( BqImViewer_Projections_Parent, BqImViewer_Projections_Child, BqImViewer_Projections_Child, ) from tomopyui.backend.runanalysis import RunAlign, RunRecon from tomopyui.backend.io import ( Projections_Child, Metadata_Align, Metadata_Recon, ) class AnalysisBase(ABC): def init_attributes(self, Import, Center): self.Import = Import self.Center = Center self.projections = Import.projections self.imported_viewer = BqImViewer_Projections_Parent() self.imported_viewer.create_app() self.altered_viewer = BqImViewer_Projections_Child(self.imported_viewer) self.altered_viewer.create_app() self.result_before_viewer = self.altered_viewer self.result_after_viewer = BqImViewer_Projections_Child( self.result_before_viewer ) self.wd = None self.log_handler, self.log = Import.log_handler, Import.log self.downsample = False self.ds_factor = 4 self.copy_hists = True self.shift_full_dataset_after = False self.pyramid_level = 1 self.num_iter = 10 self.center = Center.current_center self.upsample_factor = 50 self.extra_options = {} self.num_batches = 20 self.px_range_x = (0, 10) self.px_range_y = (0, 10) self.padding_x = 10 self.padding_y = 10 self.use_subset_correlation = False self.pre_alignment_iters = 1 self.tomopy_methods_list = [key for key in tomopy_recon_algorithm_kwargs] self.tomopy_methods_list.remove("gridrec") self.tomopy_methods_list.remove("fbp") self.astra_cuda_methods_list = [ key for key in astra_cuda_recon_algorithm_kwargs ] self.run_list = [] self.header_font_style = { "font_size": "22px", "font_weight": "bold", "font_variant": "small-caps", # "text_color": "#0F52BA", } self.accordions_open = False self.plot_output1 = Output() def init_widgets(self): """ Initializes many of the widgets in the Alignment and Recon tabs. """ self.button_font = {"font_size": "22px"} self.button_layout = Layout(width="45px", height="40px") # -- Button to turn on tab --------------------------------------------- self.open_accordions_button = Button( icon="lock-open", layout=self.button_layout, style=self.button_font, ) # -- Headers for plotting ------------------------------------- self.import_plot_header = "Imported Projections" self.import_plot_header = Label( self.import_plot_header, style=self.header_font_style ) self.altered_plot_header = "Altered Projections" self.altered_plot_header = Label( self.altered_plot_header, style=self.header_font_style ) # -- Headers for results ------------------------------------- self.before_analysis_plot_header = "Analysis Projections" self.before_analysis_plot_header = Label( self.before_analysis_plot_header, style=self.header_font_style ) self.after_analysis_plot_header = "Result" self.after_analysis_plot_header = Label( self.after_analysis_plot_header, style=self.header_font_style ) # -- Button to load metadata ---------------------------------------------- self.load_metadata_button = Button( description="Click to load metadata.", icon="upload", disabled=True, button_style="info", # 'success', 'info', 'warning', 'danger' or '' tooltip="First choose a metadata file in the Import tab, then click here", layout=Layout(width="auto", justify_content="center"), ) self.viewer_hbox = HBox( [ VBox( [ self.import_plot_header, self.imported_viewer.app, ], layout=Layout(align_items="center"), ), VBox( [ self.altered_plot_header, self.altered_viewer.app, ], layout=Layout(align_items="center"), ), ], layout=Layout(justify_content="center"), ) self.viewer_accordion = Accordion( children=[self.viewer_hbox], selected_index=None, titles=("Plot Projection Images",), ) # -- Saving Options ------------------------------------------------------- self.save_opts = {key: False for key in self.save_opts_list} self.save_opts_checkboxes = self.create_checkboxes_from_opt_list( self.save_opts_list, self.save_opts ) # Copy parent histograms? self.copy_parent_hists_checkbox = Checkbox( description="Copy parent histograms", value=True ) self.save_opts_checkboxes.append(self.copy_parent_hists_checkbox) self.shift_data_after_checkbox = Checkbox( description="Shift full dataset after:", value=True ) self.save_opts_checkboxes.append(self.shift_data_after_checkbox) # -- Method Options ------------------------------------------------------- self.methods_opts = { key: False for key in self.tomopy_methods_list + self.astra_cuda_methods_list } self.tomopy_methods_checkboxes = self.create_checkboxes_from_opt_list( self.tomopy_methods_list, self.methods_opts ) self.astra_cuda_methods_checkboxes = self.create_checkboxes_from_opt_list( self.astra_cuda_methods_list, self.methods_opts ) # -- Options ---------------------------------------------------------- # Number of iterations self.num_iterations_textbox = IntText( description="Number of Iterations: ", style=extend_description_style, value=self.num_iter, ) # Center self.center_textbox = FloatText( description="Center of Rotation: ", style=extend_description_style, value=self.center, ) center_link = link( (self.center_textbox, "value"), (self.Center.center_textbox, "value") ) # Downsampling self.downsample_checkbox = Checkbox(description="Downsample?", value=False) self.ds_factor_dropdown = Dropdown( options=[("Original", -1), (2, 0), (4, 1), (8, 2)], description="Downsample factor: ", disabled=True, style=extend_description_style, ) # Phase cross correlation subset (from altered projections) self.use_subset_correlation_checkbox = Checkbox( description="Phase Corr. Subset?", value=False ) # Batch size self.num_batches_textbox = IntText( description="Number of batches (for GPU): ", style=extend_description_style, value=self.num_batches, ) # X Padding self.padding_x_textbox = IntText( description="Padding X (px): ", style=extend_description_style, value=self.padding_x, ) # Y Padding self.padding_y_textbox = IntText( description="Padding Y (px): ", style=extend_description_style, value=self.padding_y, ) # Pre-alignment iterations self.pre_alignment_iters_textbox = IntText( description="Pre-alignment iterations: ", style=extend_description_style, value=self.pre_alignment_iters, ) # Extra options self.extra_options_textbox = Text( description="Extra options: ", placeholder='{"MinConstraint": 0}', style=extend_description_style, ) def refresh_plots(self): self.imported_viewer.plot(self.projections) self.altered_projections = Projections_Child(self.projections) self.altered_viewer.projections = self.altered_projections self.altered_viewer.copy_parent_projections(None) def set_observes(self): # -- Radio to turn on tab --------------------------------------------- self.open_accordions_button.on_click(self.activate_tab) # -- Load metadata button --------------------------------------------- self.load_metadata_button.on_click(self._load_metadata_all_on_click) # -- Options ---------------------------------------------------------- # Center self.center_textbox.observe(self.update_center_textbox, names="value") # Copy parent histograms self.copy_parent_hists_checkbox.observe(self.update_copy_hist, names="value") # Shift dataset after self.copy_parent_hists_checkbox.observe(self.update_shift_data, names="value") # Downsampling self.downsample_checkbox.observe(self._downsample_turn_on) self.altered_viewer.ds_viewer_dropdown.observe( self.update_ds_factor_from_viewer, names="value" ) self.ds_factor_dropdown.observe(self.update_ds_factor, names="value") # Phase cross correlation subset (from altered projections) self.use_subset_correlation_checkbox.observe(self._use_subset_correlation) # X Padding self.padding_x_textbox.observe(self.update_x_padding, names="value") # Y Padding self.padding_y_textbox.observe(self.update_y_padding, names="value") # Pre-alignment iterations self.pre_alignment_iters_textbox.observe( self.update_pre_alignment_iters, names="value" ) # Extra options self.extra_options_textbox.observe(self.update_extra_options, names="value") # Start button self.start_button.on_click(self.set_options_and_run) # -- Radio to turn on tab --------------------------------------------- def activate_tab(self, *args): if self.accordions_open is False: self.open_accordions_button.icon = "fa-lock" self.open_accordions_button.button_style = "success" self.projections = self.Import.projections self.center = self.Center.current_center self.center_textbox.value = self.Center.current_center self.metadata.set_metadata(self) self.load_metadata_button.disabled = False self.start_button.disabled = False self.save_options_accordion.selected_index = 0 self.options_accordion.selected_index = 0 self.methods_accordion.selected_index = 0 self.viewer_accordion.selected_index = 0 self.accordions_open = True else: self.open_accordions_button.icon = "fa-lock-open" self.open_accordions_button.button_style = "info" self.accordions_open = False self.load_metadata_button.disabled = True self.start_button.disabled = True self.save_options_accordion.selected_index = None self.options_accordion.selected_index = None self.methods_accordion.selected_index = None self.viewer_accordion.selected_index = None self.log.info("Deactivated alignment.") # # -- Button for using imported dataset --------------------------------- # def use_imported(self, *args): # self.use_altered_button.icon = "" # self.use_altered_button.button_style = "" # self.use_imported_button.button_style = "info" # self.use_imported_button.description = "Creating analysis projections" # self.use_imported_button.icon = "fas fa-cog fa-spin fa-lg" # self.projections.data = copy.deepcopy(self.Import.projections.data) # self.projections.angles_rad = copy.deepcopy(self.Import.projections.angles_rad) # self.projections.angles_deg = copy.deepcopy(self.Import.projections.angles_deg) # self.px_range_x = self.projections.px_range_x # self.px_range_y = self.projections.px_range_y # self.result_before_viewer = self.imported_viewer # self.result_after_viewer = BqImViewer_Projections_Child( # self.result_before_viewer # ) # self.use_imported_button.button_style = "success" # self.use_imported_button.description = ( # "You can now align/reconstruct your data." # ) # self.use_imported_button.icon = "fa-check-square" # -- Button for using edited dataset --------------------------------- # def use_altered(self, *args): # self.use_imported_button.icon = "" # self.use_imported_button.button_style = "" # self.use_altered_button.button_style = "info" # self.use_altered_button.description = "Creating analysis projections" # self.use_altered_button.icon = "fas fa-cog fa-spin fa-lg" # self.projections._data = self.altered_viewer.original_images # self.projections.data = self.altered_viewer.original_images # self.projections.angles_rad = copy.deepcopy(self.Import.projections.angles_rad) # self.projections.angles_deg = copy.deepcopy(self.Import.projections.angles_deg) # self.px_range_x = self.altered_viewer.px_range_x # self.px_range_y = self.altered_viewer.px_range_y # self.result_before_viewer = self.altered_viewer # self.result_after_viewer = BqImViewer_Projections_Child( # self.result_before_viewer # ) # self.use_altered_button.button_style = "success" # self.use_altered_button.description = "You can now align/reconstruct your data." # self.use_altered_button.icon = "fa-check-square" # -- Load metadata button --------------------------------------------- def _load_metadata_all_on_click(self, change): self.load_metadata_button.button_style = "info" self.load_metadata_button.icon = "fas fa-cog fa-spin fa-lg" self.load_metadata_button.description = "Importing metadata." self.load_metadata_align() self.metadata.set_attributes_from_metadata() self.set_observes() self.load_metadata_button.button_style = "success" self.load_metadata_button.icon = "fa-check-square" self.load_metadata_button.description = "Finished importing metadata." # -- Button to start alignment ---------------------------------------- def set_options_and_run(self, change): change.button_style = "info" change.icon = "fas fa-cog fa-spin fa-lg" change.description = ( "Setting options and loading data into alignment algorithm." ) self.run() change.button_style = "success" change.icon = "fa-check-square" change.description = "Finished alignment." # -- Options ---------------------------------------------------------- # Copy histogram from parent def update_copy_hist(self, change): self.copy_hists = change.new self.metadata.set_metadata(self) def update_shift_data(self, change): self.shift_full_dataset_after = change.new self.metadata.set_metadata(self) # Number of iterations def update_num_iter(self, change): self.num_iter = int(change.new) self.progress_total.max = change.new self.metadata.set_metadata(self) # Center of rotation def update_center_textbox(self, change): self.center = change.new self.metadata.set_metadata(self) # Downsampling def _downsample_turn_on(self, change): if change.new is True: self.downsample = True self.pyramid_level = self.altered_viewer.ds_viewer_dropdown.value self.ds_factor_dropdown.disabled = False self.metadata.set_metadata(self) if change.new is False: self.downsample = False self.ds_factor = 1 self.ds_factor_dropdown.disabled = True self.metadata.set_metadata(self) # Phase cross correlation subset (from altered projections) def _use_subset_correlation(self, change): self.use_subset_correlation = change.new self.metadata.set_metadata(self) def update_ds_factor_from_viewer(self, *args): self.ds_factor_dropdown.value = self.altered_viewer.ds_viewer_dropdown.value def update_ds_factor(self, *args): self.pyramid_level = self.ds_factor_dropdown.value self.ds_factor = np.power(2, int(self.pyramid_level + 1)) self.metadata.set_metadata(self) # Batch size def update_num_batches(self, change): self.num_batches = change.new self.progress_phase_cross_corr.max = change.new self.progress_shifting.max = change.new self.progress_reprj.max = change.new self.metadata.set_metadata(self) # X Padding def update_x_padding(self, change): self.padding_x = change.new self.metadata.set_metadata(self) # Y Padding def update_y_padding(self, change): self.padding_y = change.new self.metadata.set_metadata(self) # Pre-alignment iterations def update_pre_alignment_iters(self, *args): self.pre_alignment_iters = self.pre_alignment_iters_textbox.value # Extra options def update_extra_options(self, change): self.extra_options = change.new self.metadata.set_metadata(self) # def set_widgets_from_load_metadata(self): # # -- Saving Options ------------------------------------------------------- # self.save_opts_checkboxes = self.set_checkbox_bool( # self.save_opts_checkboxes, self.metadata["save_opts"] # ) # # -- Method Options ------------------------------------------------------- # # for key in self.metadata["methods"]: # # if self.metadata["methods"][key]: # # for checkbox in self.methods_checkboxes: # # if checkbox.description == str(key): # # checkbox.value = True # # elif not self.metadata["methods"][key]: # # for checkbox in self.methods_checkboxes: # # if checkbox.description == str(key): # # checkbox.value = False # self.tomopy_methods_checkboxes = self.set_checkbox_bool( # self.tomopy_methods_checkboxes, self.metadata["methods"] # ) # self.astra_cuda_methods_checkboxes = self.set_checkbox_bool( # self.astra_cuda_methods_checkboxes, self.metadata["methods"] # ) # # -- Projection Range Sliders --------------------------------------------- # # Not implemented in load metadata. # # -- Options ---------------------------------------------------------- # # Number of iterations # self.num_iterations_textbox.value = self.num_iter # # Center # self.center_textbox.value = self.center # # Downsampling # self.downsample_checkbox.value = self.downsample # self.ds_factor_textbox.value = self.ds_factor # if self.downsample_checkbox.value: # self.ds_factor_textbox.disabled
print(best_reward) # Back-propagate while node: node.visits += 1 node.value += sum_reward node = node.parent sum_reward = 0 for action in best_actions: if self.render: env.render() _, reward, terminal, _ = env.step(action) sum_reward += reward if terminal: break env.monitor.close() toret.append([best_actions,sum_reward]) best_rewards.append(sum_reward) score = max(moving_average(best_rewards, 100)) return toret class MCTS_Runner_Timed: def __init__(self, max_d=500, seconds=60, game_desc=skeleton_game_4, level_desc=dummy_maze, observer=None, render=True): self.max_depth = max_d self.seconds = seconds self.game = game_desc self.level = level_desc self.render = render self._save_game_files() self.discount_factor = 0.9 self.nodes = {} def _save_game_files(self): game_fh = open(gamefile,'w') game_fh.write(self.game) game_fh.close() level_fh = open(levelfile,'w') level_fh.write(self.level) level_fh.close() def _new_MCTS_Node(self, node, a): return MCTS_Node(node, a) def run(self): finish_at = time.time() + self.seconds toret = [] best_rewards = [] env = cim.VGDLEnv(game_file = gamefile, level_file = levelfile, obs_type = 'features', block_size=24) while True: env.reset() root = MCTS_Node() best_actions = [] best_reward = float(-inf) while True: if time.time() > finish_at: break state = copy.deepcopy(env) state.observer.game = env.observer.game #state.reset() sum_reward = 0 node = root terminal = False actions = [] # Selection while node.children: if node.explored_children < len(node.children): child = node.children[node.explored_children] node.explored_children += 1 node = child else: node = max(node.children, key = ucb) _, reward, terminal, _ = state.step(node.action) sum_reward += reward actions.append(node.action) # Expansion if not terminal: node.children = [self._new_MCTS_Node(node, a) for a in combinations(state.action_space)] random.shuffle(node.children) # Playout while not terminal: action = state.action_space.sample() if self.render: state.render() _, reward, terminal, _ = state.step(action) sum_reward += reward actions.append(action) if len(actions) > self.max_depth: sum_reward -= 1000 break # Remember the best if best_reward < sum_reward and terminal: print(sum_reward) print("asd") best_reward = sum_reward best_actions = actions # Back-propagate while node: node.visits += 1 node.value += sum_reward node = node.parent sum_reward = sum_reward*self.discount_factor del state sum_reward = 0 for action in best_actions: if self.render: env.render() _, reward, terminal, _ = env.step(action) sum_reward += reward if terminal: break del env toret.append([best_actions,sum_reward]) best_rewards.append(sum_reward) #score = max(moving_average(best_rewards, 100)) return toret class MCTS_Runner_Reward_Timeout: def __init__(self, max_d=500, seconds=60, reward_goal=-2000, game_desc=skeleton_game_4, level_desc=dummy_maze, observer=None, render=True): self.max_depth = max_d self.seconds = seconds self.game = game_desc self.level = level_desc self.render = render self.aim = reward_goal self._save_game_files() print("Got to finish in: " + str(self.seconds) + " seconds and need to get: " + str(self.aim) + " points.") def _save_game_files(self): game_fh = open(gamefile,'w') game_fh.write(self.game) game_fh.close() level_fh = open(levelfile,'w') level_fh.write(self.level) level_fh.close() def run(self): finish_at = time.time() + self.seconds toret = [] best_rewards = [] env = cim.VGDLEnv(game_file = gamefile, level_file = levelfile, obs_type = 'features', block_size=24) while True: env.reset() root = MCTS_Node() best_actions = [] best_reward = float(-inf) while True: if time.time() > finish_at: break if best_reward > self.aim: break state = copy.deepcopy(env) state.observer.game = env.observer.game sum_reward = 0 node = root terminal = False actions = [] # Selection while node.children: if node.explored_children < len(node.children): child = node.children[node.explored_children] node.explored_children += 1 node = child else: node = max(node.children, key = ucb) _, reward, terminal, _ = state.step(node.action) sum_reward += reward actions.append(node.action) # Expansion if not terminal: node.children = [MCTS_Node(node, a) for a in combinations(state.action_space)] random.shuffle(node.children) # Playout while not terminal: action = state.action_space.sample() if self.render: state.render() _, reward, terminal, _ = state.step(action) sum_reward += reward actions.append(action) if len(actions) > self.max_depth: #sum_reward -= 100 break # Remember the best if best_reward < sum_reward: best_reward = sum_reward best_actions = actions print(best_reward) # Back-propagate while node: node.visits += 1 node.value += sum_reward node = node.parent del(state._monitor) sum_reward = 0 for action in best_actions: if self.render: env.render() _, reward, terminal, _ = env.step(action) sum_reward += reward if terminal: break toret.append([best_actions,sum_reward]) best_rewards.append(sum_reward) score = max(moving_average(best_rewards, 100)) return toret class MCTS_Runner_Regular_Old: def __init__(self,nloops=1,max_d=40,n_playouts=1024, game_desc=skeleton_game_4_backup, level_desc=dummy_maze, observer=None, render=True): self.loops = nloops self.max_depth = max_d self.playouts = n_playouts self.render = render #from gym.envs.registration import register, registry self.game =game_desc self.level =level_desc self._save_game_files() #level_name = '.'.join(os.path.basename(levelfile).split('.')[:-1]) #self.env_name = 'vgdl_{}-{}-v0'.format(random.random(),level_name) #register(id = self.env_name, entry_point = 'vgdl.interfaces.gym:VGDLEnv', kwargs = {'game_file':gamefile, 'level_file':levelfile, 'block_size':24, 'obs_type':'features',},nondeterministic=True) def _save_game_files(self): game_fh = open(gamefile,'w') game_fh.write(self.game) game_fh.close() level_fh = open(levelfile,'w') level_fh.write(self.level) level_fh.close() def run(self): toret = [] best_rewards = [] env = cim.VGDLEnv(game_file = gamefile, level_file = levelfile, obs_type='features', block_size=24)#gym.make(self.env_name) for loop in range(self.loops): env.reset() root = MCTS_Node() best_actions = [] best_reward = float(-inf) for num_playout in range(self.playouts): state = copy.deepcopy(env) state.observer.game = env.observer.game sum_reward = 0 node = root terminal = False actions = [] # Selection while node.children: if node.explored_children < len(node.children): child = node.children[node.explored_children] node.explored_children += 1 node = child else: node = max(node.children, key=ucb) _, reward, terminal, _ = state.step(node.action) sum_reward += reward actions.append(node.action) # Expansion if not terminal: node.children = [MCTS_Node(parent=node, action=a) for a in combinations(state.action_space)] random.shuffle(node.children) # Playout while not terminal: action = state.action_space.sample() if self.render: state.render() _, reward, terminal, _ = state.step(action) sum_reward += reward actions.append(action) if len(actions) > self.max_depth: sum_reward -= 100 break # Remember the best if best_reward < sum_reward: best_reward = sum_reward best_actions = actions # Back-propagate while node: node.visits += 1 node.value += sum_reward node = node.parent sum_reward = 0 for action in best_actions: if self.render: env.render() _, reward, terminal, _ = env.step(action) sum_reward += reward if terminal: break toret.append([best_actions,sum_reward]) return toret class MCTS_Runner_Regular_with_Time_Limit: def __init__(self,time_limit,nloops=1,max_d=40,n_playouts=500, rollout_depth=40, game_desc=skeleton_game_4_backup, level_desc=dummy_maze, observer=None, render=True, discount_factor=0.99): self.deadline = time_limit self.loops = nloops self.max_depth = max_d self.rollout_depth = rollout_depth self.playouts = n_playouts self.render = render self.game =game_desc self.level =level_desc self._save_game_files() self.df = discount_factor self.width = len(level_desc.split('\n')[0]) self.height = self.width def init_my_second_level(self): self.second_level = [] for i in range(0,self.height): temp = [] for i in range(0, self.width): temp.append(0) self.second_level.append(copy.deepcopy(temp)) def _save_game_files(self): game_fh = open(gamefile,'w') game_fh.write(self.game) game_fh.close() level_fh = open(levelfile,'w') level_fh.write(self.level) level_fh.close() def run(self): start_time = time.time() terminal = False to_ret = [] max_reward = float(-inf) env = cim.VGDLEnv(game_file=gamefile, level_file=levelfile, obs_type='features', block_size=24) total_search_count = 0 self.init_my_second_level() for loop in range(self.loops): if time.time()-start_time > self.deadline: break moves_to_play = [] while len(moves_to_play) != self.rollout_depth: sum_reward = 0 env.reset() for move in moves_to_play: _, rew, terminal, _ = env.step(move) if rew > 0: print('+') sum_reward += rew if terminal: #I guess? If a search returns a terminal state, either we cannot win the game at this position, or it is possible to win and this route is returned. break total_search_count += 1 move = self.search(env) #search function to be filled. moves_to_play.append(move) if sum_reward > max_reward: max_reward = sum_reward to_ret = moves_to_play return [[to_ret, max_reward]] def search(self, env): #Get your second level started here. best_actions = [] temp = self.level.split('\n') my_pos_left = env.game.sprite_registry.get_avatar().rect.left my_pos_top = env.game.sprite_registry.get_avatar().rect.top for a in range(0,self.height): temp[a] = list(temp[a]) self.second_level[my_pos_top][my_pos_left] += 1 best_reward = float(-inf) root = MCTS_Node(my_pos_top, my_pos_left)#Find your avatar's position on game map? for num_playout in range(self.playouts): actions = [] state = copy.deepcopy(env) state.observer.game = env.observer.game sum_reward = 0 sum_reward2= 0 terminal = False node = root while node.children: if node.explored_children < len(node.children): child = node.children[node.explored_children] node.explored_children += 1 node = child else: node = max(node.children, key=ucb) _, reward, terminal, _ = state.step(node.action) #This is where self.second_level[node.first][node.second] += 1 sum_reward += reward * self.second_level[node.first][node.second] sum_reward2+= reward actions.append(node.action) if not terminal: node.children = [] if node.first != 0 and temp[node.first-1][node.second] == '1': node.children.append(MCTS_Node(node.first,node.second,node,0))#UP else: node.children.append(MCTS_Node(node.first-1,node.second,node,0))#UP if node.first != (self.width-1) and temp[node.first+1][node.second] == '1': node.children.append(MCTS_Node(node.first,node.second,node,2))#DOWN else: node.children.append(MCTS_Node(node.first+1,node.second,node,2))#DOWN if node.second != 0 and temp[node.first][node.second-1] == '1': node.children.append(MCTS_Node(node.first,node.second,node,1))#LEFT else: node.children.append(MCTS_Node(node.first,node.second-1,node,1))#LEFT if node.first != (self.width-1) and temp[node.first][node.second+1] == '1': node.children.append(MCTS_Node(node.first,node.second,node,3))#RIGHT else: node.children.append(MCTS_Node(node.first,node.second+1,node,3))#RIGHT random.shuffle(node.children) while not terminal: action = state.action_space.sample() if self.render: state.render() _, reward, terminal, _ = state.step(action) #And where sum_reward += reward sum_reward2+= reward actions.append(action) if len(actions) > self.max_depth: break # Remember the best if best_reward < sum_reward: #XXX: Remember next action only, not all actions. best_reward = sum_reward best_actions = actions while node: node.visits += 1 node.value += sum_reward node = node.parent sum_reward = sum_reward * self.df sum_reward = 0 del state return best_actions[0] class MCTS_Runner_Regular: def __init__(self,nloops=1,max_d=40,n_playouts=500, rollout_depth=40, game_desc=skeleton_game_4_backup, level_desc=dummy_maze, observer=None, render=True, discount_factor=0.95): self.loops = nloops self.max_depth = max_d self.rollout_depth = rollout_depth self.playouts
GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1531 glTexSubImage3D = _link_function('glTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1538 glCopyTexSubImage3D = _link_function('glCopyTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei], None) PFNGLDRAWRANGEELEMENTSPROC = CFUNCTYPE(None, GLenum, GLuint, GLuint, GLsizei, GLenum, POINTER(GLvoid)) # /usr/include/GL/gl.h:1544 PFNGLTEXIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLsizei, GLsizei, GLsizei, GLint, GLenum, GLenum, POINTER(GLvoid)) # /usr/include/GL/gl.h:1545 PFNGLTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)) # /usr/include/GL/gl.h:1546 PFNGLCOPYTEXSUBIMAGE3DPROC = CFUNCTYPE(None, GLenum, GLint, GLint, GLint, GLint, GLint, GLint, GLsizei, GLsizei) # /usr/include/GL/gl.h:1547 GL_CONSTANT_COLOR = 32769 # /usr/include/GL/gl.h:1554 GL_ONE_MINUS_CONSTANT_COLOR = 32770 # /usr/include/GL/gl.h:1555 GL_CONSTANT_ALPHA = 32771 # /usr/include/GL/gl.h:1556 GL_ONE_MINUS_CONSTANT_ALPHA = 32772 # /usr/include/GL/gl.h:1557 GL_COLOR_TABLE = 32976 # /usr/include/GL/gl.h:1558 GL_POST_CONVOLUTION_COLOR_TABLE = 32977 # /usr/include/GL/gl.h:1559 GL_POST_COLOR_MATRIX_COLOR_TABLE = 32978 # /usr/include/GL/gl.h:1560 GL_PROXY_COLOR_TABLE = 32979 # /usr/include/GL/gl.h:1561 GL_PROXY_POST_CONVOLUTION_COLOR_TABLE = 32980 # /usr/include/GL/gl.h:1562 GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE = 32981 # /usr/include/GL/gl.h:1563 GL_COLOR_TABLE_SCALE = 32982 # /usr/include/GL/gl.h:1564 GL_COLOR_TABLE_BIAS = 32983 # /usr/include/GL/gl.h:1565 GL_COLOR_TABLE_FORMAT = 32984 # /usr/include/GL/gl.h:1566 GL_COLOR_TABLE_WIDTH = 32985 # /usr/include/GL/gl.h:1567 GL_COLOR_TABLE_RED_SIZE = 32986 # /usr/include/GL/gl.h:1568 GL_COLOR_TABLE_GREEN_SIZE = 32987 # /usr/include/GL/gl.h:1569 GL_COLOR_TABLE_BLUE_SIZE = 32988 # /usr/include/GL/gl.h:1570 GL_COLOR_TABLE_ALPHA_SIZE = 32989 # /usr/include/GL/gl.h:1571 GL_COLOR_TABLE_LUMINANCE_SIZE = 32990 # /usr/include/GL/gl.h:1572 GL_COLOR_TABLE_INTENSITY_SIZE = 32991 # /usr/include/GL/gl.h:1573 GL_CONVOLUTION_1D = 32784 # /usr/include/GL/gl.h:1574 GL_CONVOLUTION_2D = 32785 # /usr/include/GL/gl.h:1575 GL_SEPARABLE_2D = 32786 # /usr/include/GL/gl.h:1576 GL_CONVOLUTION_BORDER_MODE = 32787 # /usr/include/GL/gl.h:1577 GL_CONVOLUTION_FILTER_SCALE = 32788 # /usr/include/GL/gl.h:1578 GL_CONVOLUTION_FILTER_BIAS = 32789 # /usr/include/GL/gl.h:1579 GL_REDUCE = 32790 # /usr/include/GL/gl.h:1580 GL_CONVOLUTION_FORMAT = 32791 # /usr/include/GL/gl.h:1581 GL_CONVOLUTION_WIDTH = 32792 # /usr/include/GL/gl.h:1582 GL_CONVOLUTION_HEIGHT = 32793 # /usr/include/GL/gl.h:1583 GL_MAX_CONVOLUTION_WIDTH = 32794 # /usr/include/GL/gl.h:1584 GL_MAX_CONVOLUTION_HEIGHT = 32795 # /usr/include/GL/gl.h:1585 GL_POST_CONVOLUTION_RED_SCALE = 32796 # /usr/include/GL/gl.h:1586 GL_POST_CONVOLUTION_GREEN_SCALE = 32797 # /usr/include/GL/gl.h:1587 GL_POST_CONVOLUTION_BLUE_SCALE = 32798 # /usr/include/GL/gl.h:1588 GL_POST_CONVOLUTION_ALPHA_SCALE = 32799 # /usr/include/GL/gl.h:1589 GL_POST_CONVOLUTION_RED_BIAS = 32800 # /usr/include/GL/gl.h:1590 GL_POST_CONVOLUTION_GREEN_BIAS = 32801 # /usr/include/GL/gl.h:1591 GL_POST_CONVOLUTION_BLUE_BIAS = 32802 # /usr/include/GL/gl.h:1592 GL_POST_CONVOLUTION_ALPHA_BIAS = 32803 # /usr/include/GL/gl.h:1593 GL_CONSTANT_BORDER = 33105 # /usr/include/GL/gl.h:1594 GL_REPLICATE_BORDER = 33107 # /usr/include/GL/gl.h:1595 GL_CONVOLUTION_BORDER_COLOR = 33108 # /usr/include/GL/gl.h:1596 GL_COLOR_MATRIX = 32945 # /usr/include/GL/gl.h:1597 GL_COLOR_MATRIX_STACK_DEPTH = 32946 # /usr/include/GL/gl.h:1598 GL_MAX_COLOR_MATRIX_STACK_DEPTH = 32947 # /usr/include/GL/gl.h:1599 GL_POST_COLOR_MATRIX_RED_SCALE = 32948 # /usr/include/GL/gl.h:1600 GL_POST_COLOR_MATRIX_GREEN_SCALE = 32949 # /usr/include/GL/gl.h:1601 GL_POST_COLOR_MATRIX_BLUE_SCALE = 32950 # /usr/include/GL/gl.h:1602 GL_POST_COLOR_MATRIX_ALPHA_SCALE = 32951 # /usr/include/GL/gl.h:1603 GL_POST_COLOR_MATRIX_RED_BIAS = 32952 # /usr/include/GL/gl.h:1604 GL_POST_COLOR_MATRIX_GREEN_BIAS = 32953 # /usr/include/GL/gl.h:1605 GL_POST_COLOR_MATRIX_BLUE_BIAS = 32954 # /usr/include/GL/gl.h:1606 GL_POST_COLOR_MATRIX_ALPHA_BIAS = 32955 # /usr/include/GL/gl.h:1607 GL_HISTOGRAM = 32804 # /usr/include/GL/gl.h:1608 GL_PROXY_HISTOGRAM = 32805 # /usr/include/GL/gl.h:1609 GL_HISTOGRAM_WIDTH = 32806 # /usr/include/GL/gl.h:1610 GL_HISTOGRAM_FORMAT = 32807 # /usr/include/GL/gl.h:1611 GL_HISTOGRAM_RED_SIZE = 32808 # /usr/include/GL/gl.h:1612 GL_HISTOGRAM_GREEN_SIZE = 32809 # /usr/include/GL/gl.h:1613 GL_HISTOGRAM_BLUE_SIZE = 32810 # /usr/include/GL/gl.h:1614 GL_HISTOGRAM_ALPHA_SIZE = 32811 # /usr/include/GL/gl.h:1615 GL_HISTOGRAM_LUMINANCE_SIZE = 32812 # /usr/include/GL/gl.h:1616 GL_HISTOGRAM_SINK = 32813 # /usr/include/GL/gl.h:1617 GL_MINMAX = 32814 # /usr/include/GL/gl.h:1618 GL_MINMAX_FORMAT = 32815 # /usr/include/GL/gl.h:1619 GL_MINMAX_SINK = 32816 # /usr/include/GL/gl.h:1620 GL_TABLE_TOO_LARGE = 32817 # /usr/include/GL/gl.h:1621 GL_BLEND_EQUATION = 32777 # /usr/include/GL/gl.h:1622 GL_MIN = 32775 # /usr/include/GL/gl.h:1623 GL_MAX = 32776 # /usr/include/GL/gl.h:1624 GL_FUNC_ADD = 32774 # /usr/include/GL/gl.h:1625 GL_FUNC_SUBTRACT = 32778 # /usr/include/GL/gl.h:1626 GL_FUNC_REVERSE_SUBTRACT = 32779 # /usr/include/GL/gl.h:1627 GL_BLEND_COLOR = 32773 # /usr/include/GL/gl.h:1628 # /usr/include/GL/gl.h:1631 glColorTable = _link_function('glColorTable', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1635 glColorSubTable = _link_function('glColorSubTable', None, [GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1640 glColorTableParameteriv = _link_function('glColorTableParameteriv', None, [GLenum, GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1643 glColorTableParameterfv = _link_function('glColorTableParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1646 glCopyColorSubTable = _link_function('glCopyColorSubTable', None, [GLenum, GLsizei, GLint, GLint, GLsizei], None) # /usr/include/GL/gl.h:1649 glCopyColorTable = _link_function('glCopyColorTable', None, [GLenum, GLenum, GLint, GLint, GLsizei], None) # /usr/include/GL/gl.h:1652 glGetColorTable = _link_function('glGetColorTable', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1655 glGetColorTableParameterfv = _link_function('glGetColorTableParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1658 glGetColorTableParameteriv = _link_function('glGetColorTableParameteriv', None, [GLenum, GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1661 glBlendEquation = _link_function('glBlendEquation', None, [GLenum], None) # /usr/include/GL/gl.h:1663 glBlendColor = _link_function('glBlendColor', None, [GLclampf, GLclampf, GLclampf, GLclampf], None) # /usr/include/GL/gl.h:1666 glHistogram = _link_function('glHistogram', None, [GLenum, GLsizei, GLenum, GLboolean], None) # /usr/include/GL/gl.h:1669 glResetHistogram = _link_function('glResetHistogram', None, [GLenum], None) # /usr/include/GL/gl.h:1671 glGetHistogram = _link_function('glGetHistogram', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1675 glGetHistogramParameterfv = _link_function('glGetHistogramParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1678 glGetHistogramParameteriv = _link_function('glGetHistogramParameteriv', None, [GLenum, GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1681 glMinmax = _link_function('glMinmax', None, [GLenum, GLenum, GLboolean], None) # /usr/include/GL/gl.h:1684 glResetMinmax = _link_function('glResetMinmax', None, [GLenum], None) # /usr/include/GL/gl.h:1686 glGetMinmax = _link_function('glGetMinmax', None, [GLenum, GLboolean, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1690 glGetMinmaxParameterfv = _link_function('glGetMinmaxParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1693 glGetMinmaxParameteriv = _link_function('glGetMinmaxParameteriv', None, [GLenum, GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1696 glConvolutionFilter1D = _link_function('glConvolutionFilter1D', None, [GLenum, GLenum, GLsizei, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1700 glConvolutionFilter2D = _link_function('glConvolutionFilter2D', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1704 glConvolutionParameterf = _link_function('glConvolutionParameterf', None, [GLenum, GLenum, GLfloat], None) # /usr/include/GL/gl.h:1707 glConvolutionParameterfv = _link_function('glConvolutionParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1710 glConvolutionParameteri = _link_function('glConvolutionParameteri', None, [GLenum, GLenum, GLint], None) # /usr/include/GL/gl.h:1713 glConvolutionParameteriv = _link_function('glConvolutionParameteriv', None, [GLenum, GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1716 glCopyConvolutionFilter1D = _link_function('glCopyConvolutionFilter1D', None, [GLenum, GLenum, GLint, GLint, GLsizei], None) # /usr/include/GL/gl.h:1719 glCopyConvolutionFilter2D = _link_function('glCopyConvolutionFilter2D', None, [GLenum, GLenum, GLint, GLint, GLsizei, GLsizei], None) # /usr/include/GL/gl.h:1723 glGetConvolutionFilter = _link_function('glGetConvolutionFilter', None, [GLenum, GLenum, GLenum, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1726 glGetConvolutionParameterfv = _link_function('glGetConvolutionParameterfv', None, [GLenum, GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1729 glGetConvolutionParameteriv = _link_function('glGetConvolutionParameteriv', None, [GLenum, GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1732 glSeparableFilter2D = _link_function('glSeparableFilter2D', None, [GLenum, GLenum, GLsizei, GLsizei, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1736 glGetSeparableFilter = _link_function('glGetSeparableFilter', None, [GLenum, GLenum, GLenum, POINTER(GLvoid), POINTER(GLvoid), POINTER(GLvoid)], None) PFNGLBLENDCOLORPROC = CFUNCTYPE(None, GLclampf, GLclampf, GLclampf, GLclampf) # /usr/include/GL/gl.h:1739 PFNGLBLENDEQUATIONPROC = CFUNCTYPE(None, GLenum) # /usr/include/GL/gl.h:1740 GL_TEXTURE0 = 33984 # /usr/include/GL/gl.h:1749 GL_TEXTURE1 = 33985 # /usr/include/GL/gl.h:1750 GL_TEXTURE2 = 33986 # /usr/include/GL/gl.h:1751 GL_TEXTURE3 = 33987 # /usr/include/GL/gl.h:1752 GL_TEXTURE4 = 33988 # /usr/include/GL/gl.h:1753 GL_TEXTURE5 = 33989 # /usr/include/GL/gl.h:1754 GL_TEXTURE6 = 33990 # /usr/include/GL/gl.h:1755 GL_TEXTURE7 = 33991 # /usr/include/GL/gl.h:1756 GL_TEXTURE8 = 33992 # /usr/include/GL/gl.h:1757 GL_TEXTURE9 = 33993 # /usr/include/GL/gl.h:1758 GL_TEXTURE10 = 33994 # /usr/include/GL/gl.h:1759 GL_TEXTURE11 = 33995 # /usr/include/GL/gl.h:1760 GL_TEXTURE12 = 33996 # /usr/include/GL/gl.h:1761 GL_TEXTURE13 = 33997 # /usr/include/GL/gl.h:1762 GL_TEXTURE14 = 33998 # /usr/include/GL/gl.h:1763 GL_TEXTURE15 = 33999 # /usr/include/GL/gl.h:1764 GL_TEXTURE16 = 34000 # /usr/include/GL/gl.h:1765 GL_TEXTURE17 = 34001 # /usr/include/GL/gl.h:1766 GL_TEXTURE18 = 34002 # /usr/include/GL/gl.h:1767 GL_TEXTURE19 = 34003 # /usr/include/GL/gl.h:1768 GL_TEXTURE20 = 34004 # /usr/include/GL/gl.h:1769 GL_TEXTURE21 = 34005 # /usr/include/GL/gl.h:1770 GL_TEXTURE22 = 34006 # /usr/include/GL/gl.h:1771 GL_TEXTURE23 = 34007 # /usr/include/GL/gl.h:1772 GL_TEXTURE24 = 34008 # /usr/include/GL/gl.h:1773 GL_TEXTURE25 = 34009 # /usr/include/GL/gl.h:1774 GL_TEXTURE26 = 34010 # /usr/include/GL/gl.h:1775 GL_TEXTURE27 = 34011 # /usr/include/GL/gl.h:1776 GL_TEXTURE28 = 34012 # /usr/include/GL/gl.h:1777 GL_TEXTURE29 = 34013 # /usr/include/GL/gl.h:1778 GL_TEXTURE30 = 34014 # /usr/include/GL/gl.h:1779 GL_TEXTURE31 = 34015 # /usr/include/GL/gl.h:1780 GL_ACTIVE_TEXTURE = 34016 # /usr/include/GL/gl.h:1781 GL_CLIENT_ACTIVE_TEXTURE = 34017 # /usr/include/GL/gl.h:1782 GL_MAX_TEXTURE_UNITS = 34018 # /usr/include/GL/gl.h:1783 GL_NORMAL_MAP = 34065 # /usr/include/GL/gl.h:1785 GL_REFLECTION_MAP = 34066 # /usr/include/GL/gl.h:1786 GL_TEXTURE_CUBE_MAP = 34067 # /usr/include/GL/gl.h:1787 GL_TEXTURE_BINDING_CUBE_MAP = 34068 # /usr/include/GL/gl.h:1788 GL_TEXTURE_CUBE_MAP_POSITIVE_X = 34069 # /usr/include/GL/gl.h:1789 GL_TEXTURE_CUBE_MAP_NEGATIVE_X = 34070 # /usr/include/GL/gl.h:1790 GL_TEXTURE_CUBE_MAP_POSITIVE_Y = 34071 # /usr/include/GL/gl.h:1791 GL_TEXTURE_CUBE_MAP_NEGATIVE_Y = 34072 # /usr/include/GL/gl.h:1792 GL_TEXTURE_CUBE_MAP_POSITIVE_Z = 34073 # /usr/include/GL/gl.h:1793 GL_TEXTURE_CUBE_MAP_NEGATIVE_Z = 34074 # /usr/include/GL/gl.h:1794 GL_PROXY_TEXTURE_CUBE_MAP = 34075 # /usr/include/GL/gl.h:1795 GL_MAX_CUBE_MAP_TEXTURE_SIZE = 34076 # /usr/include/GL/gl.h:1796 GL_COMPRESSED_ALPHA = 34025 # /usr/include/GL/gl.h:1798 GL_COMPRESSED_LUMINANCE = 34026 # /usr/include/GL/gl.h:1799 GL_COMPRESSED_LUMINANCE_ALPHA = 34027 # /usr/include/GL/gl.h:1800 GL_COMPRESSED_INTENSITY = 34028 # /usr/include/GL/gl.h:1801 GL_COMPRESSED_RGB = 34029 # /usr/include/GL/gl.h:1802 GL_COMPRESSED_RGBA = 34030 # /usr/include/GL/gl.h:1803 GL_TEXTURE_COMPRESSION_HINT = 34031 # /usr/include/GL/gl.h:1804 GL_TEXTURE_COMPRESSED_IMAGE_SIZE = 34464 # /usr/include/GL/gl.h:1805 GL_TEXTURE_COMPRESSED = 34465 # /usr/include/GL/gl.h:1806 GL_NUM_COMPRESSED_TEXTURE_FORMATS = 34466 # /usr/include/GL/gl.h:1807 GL_COMPRESSED_TEXTURE_FORMATS = 34467 # /usr/include/GL/gl.h:1808 GL_MULTISAMPLE = 32925 # /usr/include/GL/gl.h:1810 GL_SAMPLE_ALPHA_TO_COVERAGE = 32926 # /usr/include/GL/gl.h:1811 GL_SAMPLE_ALPHA_TO_ONE = 32927 # /usr/include/GL/gl.h:1812 GL_SAMPLE_COVERAGE = 32928 # /usr/include/GL/gl.h:1813 GL_SAMPLE_BUFFERS = 32936 # /usr/include/GL/gl.h:1814 GL_SAMPLES = 32937 # /usr/include/GL/gl.h:1815 GL_SAMPLE_COVERAGE_VALUE = 32938 # /usr/include/GL/gl.h:1816 GL_SAMPLE_COVERAGE_INVERT = 32939 # /usr/include/GL/gl.h:1817 GL_MULTISAMPLE_BIT = 536870912 # /usr/include/GL/gl.h:1818 GL_TRANSPOSE_MODELVIEW_MATRIX = 34019 # /usr/include/GL/gl.h:1820 GL_TRANSPOSE_PROJECTION_MATRIX = 34020 # /usr/include/GL/gl.h:1821 GL_TRANSPOSE_TEXTURE_MATRIX = 34021 # /usr/include/GL/gl.h:1822 GL_TRANSPOSE_COLOR_MATRIX = 34022 # /usr/include/GL/gl.h:1823 GL_COMBINE = 34160 # /usr/include/GL/gl.h:1825 GL_COMBINE_RGB = 34161 # /usr/include/GL/gl.h:1826 GL_COMBINE_ALPHA = 34162 # /usr/include/GL/gl.h:1827 GL_SOURCE0_RGB = 34176 # /usr/include/GL/gl.h:1828 GL_SOURCE1_RGB = 34177 # /usr/include/GL/gl.h:1829 GL_SOURCE2_RGB = 34178 # /usr/include/GL/gl.h:1830 GL_SOURCE0_ALPHA = 34184 # /usr/include/GL/gl.h:1831 GL_SOURCE1_ALPHA = 34185 # /usr/include/GL/gl.h:1832 GL_SOURCE2_ALPHA = 34186 # /usr/include/GL/gl.h:1833 GL_OPERAND0_RGB = 34192 # /usr/include/GL/gl.h:1834 GL_OPERAND1_RGB = 34193 # /usr/include/GL/gl.h:1835 GL_OPERAND2_RGB = 34194 # /usr/include/GL/gl.h:1836 GL_OPERAND0_ALPHA = 34200 # /usr/include/GL/gl.h:1837 GL_OPERAND1_ALPHA = 34201 # /usr/include/GL/gl.h:1838 GL_OPERAND2_ALPHA = 34202 # /usr/include/GL/gl.h:1839 GL_RGB_SCALE = 34163 # /usr/include/GL/gl.h:1840 GL_ADD_SIGNED = 34164 # /usr/include/GL/gl.h:1841 GL_INTERPOLATE = 34165 # /usr/include/GL/gl.h:1842 GL_SUBTRACT = 34023 # /usr/include/GL/gl.h:1843 GL_CONSTANT = 34166 # /usr/include/GL/gl.h:1844 GL_PRIMARY_COLOR = 34167 # /usr/include/GL/gl.h:1845 GL_PREVIOUS = 34168 # /usr/include/GL/gl.h:1846 GL_DOT3_RGB = 34478 # /usr/include/GL/gl.h:1848 GL_DOT3_RGBA = 34479 # /usr/include/GL/gl.h:1849 GL_CLAMP_TO_BORDER = 33069 # /usr/include/GL/gl.h:1851 # /usr/include/GL/gl.h:1853 glActiveTexture = _link_function('glActiveTexture', None, [GLenum], None) # /usr/include/GL/gl.h:1855 glClientActiveTexture = _link_function('glClientActiveTexture', None, [GLenum], None) # /usr/include/GL/gl.h:1857 glCompressedTexImage1D = _link_function('glCompressedTexImage1D', None, [GLenum, GLint, GLenum, GLsizei, GLint, GLsizei, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1859 glCompressedTexImage2D = _link_function('glCompressedTexImage2D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1861 glCompressedTexImage3D = _link_function('glCompressedTexImage3D', None, [GLenum, GLint, GLenum, GLsizei, GLsizei, GLsizei, GLint, GLsizei, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1863 glCompressedTexSubImage1D = _link_function('glCompressedTexSubImage1D', None, [GLenum, GLint, GLint, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1865 glCompressedTexSubImage2D = _link_function('glCompressedTexSubImage2D', None, [GLenum, GLint, GLint, GLint, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1867 glCompressedTexSubImage3D = _link_function('glCompressedTexSubImage3D', None, [GLenum, GLint, GLint, GLint, GLint, GLsizei, GLsizei, GLsizei, GLenum, GLsizei, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1869 glGetCompressedTexImage = _link_function('glGetCompressedTexImage', None, [GLenum, GLint, POINTER(GLvoid)], None) # /usr/include/GL/gl.h:1871 glMultiTexCoord1d = _link_function('glMultiTexCoord1d', None, [GLenum, GLdouble], None) # /usr/include/GL/gl.h:1873 glMultiTexCoord1dv = _link_function('glMultiTexCoord1dv', None, [GLenum, POINTER(GLdouble)], None) # /usr/include/GL/gl.h:1875 glMultiTexCoord1f = _link_function('glMultiTexCoord1f', None, [GLenum, GLfloat], None) # /usr/include/GL/gl.h:1877 glMultiTexCoord1fv = _link_function('glMultiTexCoord1fv', None, [GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1879 glMultiTexCoord1i = _link_function('glMultiTexCoord1i', None, [GLenum, GLint], None) # /usr/include/GL/gl.h:1881 glMultiTexCoord1iv = _link_function('glMultiTexCoord1iv', None, [GLenum, POINTER(GLint)], None) # /usr/include/GL/gl.h:1883 glMultiTexCoord1s = _link_function('glMultiTexCoord1s', None, [GLenum, GLshort], None) # /usr/include/GL/gl.h:1885 glMultiTexCoord1sv = _link_function('glMultiTexCoord1sv', None, [GLenum, POINTER(GLshort)], None) # /usr/include/GL/gl.h:1887 glMultiTexCoord2d = _link_function('glMultiTexCoord2d', None, [GLenum, GLdouble, GLdouble], None) # /usr/include/GL/gl.h:1889 glMultiTexCoord2dv = _link_function('glMultiTexCoord2dv', None, [GLenum, POINTER(GLdouble)], None) # /usr/include/GL/gl.h:1891 glMultiTexCoord2f = _link_function('glMultiTexCoord2f', None, [GLenum, GLfloat, GLfloat], None) # /usr/include/GL/gl.h:1893 glMultiTexCoord2fv = _link_function('glMultiTexCoord2fv', None, [GLenum, POINTER(GLfloat)], None) # /usr/include/GL/gl.h:1895 glMultiTexCoord2i =
<filename>jnpr/openclos/underlayRestRoutes.py ''' Created on Sep 2, 2014 @author: moloyc ''' import os import bottle from sqlalchemy.orm import exc import StringIO import zipfile import traceback import json import util import logging from bottle import error, request, response, PluginError, ServerAdapter from exception import InvalidRequest, PodNotFound, CablingPlanNotFound, DeviceConfigurationNotFound, DeviceNotFound, ImageNotFound, CreatePodFailed, UpdatePodFailed from model import Pod, Device, InterfaceLogical from dao import Dao from report import ResourceAllocationReport, L2Report, L3Report from l3Clos import L3ClosMediation from ztp import ZtpServer from loader import OpenClosProperty, DeviceSku, loadLoggingConfig #moduleName = 'underlayRestRoutes' #loadLoggingConfig(appName=moduleName) #logger = logging.getLogger(moduleName) logger = None webServerRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'out') junosImageRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'conf', 'ztp') def install(context): global logger logger = context['logger'] UnderlayRestRoutes().install(context) class UnderlayRestRoutes(): def install(self, context): self.baseUrl = context['baseUrl'] + '/underlay' self._conf = context['conf'] self.__dao = context['dao'] self.__daoClass = context['daoClass'] self.app = context['app'] if 'outputDir' in self._conf: global webServerRoot webServerRoot = self._conf['outputDir'] self.report = ResourceAllocationReport(self._conf, self.__daoClass) # Create a single instance of l2Report as it holds thread-pool # for device connection. Don't create l2Report multiple times self.l2Report = L2Report(self._conf, self.__daoClass) # Create a single instance of l3Report as it holds thread-pool # for device connection. Don't create l3Report multiple times self.l3Report = L3Report(self._conf, self.__daoClass) self.deviceSku = DeviceSku() # install index links context['restServer'].addIndexLink(self.baseUrl + '/pods') context['restServer'].addIndexLink(self.baseUrl + '/conf') # GET APIs self.app.route(self.baseUrl + '/conf', 'GET', self.getOpenClosConfigParams) self.app.route(self.baseUrl + '/pods', 'GET', self.getPods) self.app.route(self.baseUrl + '/images/<junosImageName>', 'GET', self.getJunosImage) self.app.route(self.baseUrl + '/pods/<podId>', 'GET', self.getPod) self.app.route(self.baseUrl + '/pods/<podId>/cabling-plan', 'GET', self.getCablingPlan) self.app.route(self.baseUrl + '/pods/<podId>/ztp-configuration', 'GET', self.getZtpConfig) self.app.route(self.baseUrl + '/pods/<podId>/device-configuration', 'GET', self.getDeviceConfigsInZip) self.app.route(self.baseUrl + '/pods/<podId>/leaf-generic-configurations/<deviceModel>', 'GET', self.getLeafGenericConfiguration) self.app.route(self.baseUrl + '/pods/<podId>/l2-report', 'GET', self.getL2Report) self.app.route(self.baseUrl + '/pods/<podId>/l3-report', 'GET', self.getL3Report) self.app.route(self.baseUrl + '/pods/<podId>/devices', 'GET', self.getDevices) self.app.route(self.baseUrl + '/pods/<podId>/devices/<deviceId>', 'GET', self.getDevice) self.app.route(self.baseUrl + '/pods/<podId>/devices/<deviceId>/config', 'GET', self.getDeviceConfig) # POST/PUT APIs self.app.route(self.baseUrl + '/pods', 'POST', self.createPod) self.app.route(self.baseUrl + '/pods/<podId>/cabling-plan', 'PUT', self.createCablingPlan) self.app.route(self.baseUrl + '/pods/<podId>/device-configuration', 'PUT', self.createDeviceConfiguration) self.app.route(self.baseUrl + '/pods/<podId>/ztp-configuration', 'PUT', self.createZtpConfiguration) self.app.route(self.baseUrl + '/pods/<podId>', 'PUT', self.reconfigPod) self.app.route(self.baseUrl + '/conf/', 'PUT', self.setOpenClosConfigParams) # DELETE APIs self.app.route(self.baseUrl + '/pods/<podId>', 'DELETE', self.deletePod) def getPods(self, dbSession): url = str(bottle.request.url).translate(None, ',') podsData = {} listOfIpFbarics = [] pods = self.report.getPods(dbSession) logger.debug("count of pods: %d", len(pods)) if not pods: logger.debug("There are no pods in the system ") for i in range(len(pods)): pod = {} pod['uri'] = url +'/'+ pods[i]['id'] pod['id'] = pods[i]['id'] pod['name'] = pods[i]['name'] pod['spineDeviceType'] = pods[i]['spineDeviceType'] pod['spineCount'] = pods[i]['spineCount'] pod['leafSettings'] = pods[i]['leafSettings'] pod['leafCount'] = pods[i]['leafCount'] pod['devicePassword'] = pods[i]['devicePassword'] listOfIpFbarics.append(pod) podsData['pod'] = listOfIpFbarics podsData['total'] = len(listOfIpFbarics) podsData['uri'] = url return {'pods': podsData} @staticmethod def getPodFieldListToCopy(): return ['id', 'name', 'description', 'spineSettings', 'spineCount', 'spineAS', 'leafSettings', 'leafCount', 'leafAS', 'devicePassword', 'leafUplinkcountMustBeUp', 'loopbackPrefix', 'vlanPrefix', 'interConnectPrefix', 'managementPrefix', 'outOfBandAddressList', 'outOfBandGateway', 'topologyType', 'hostOrVmCountPerLeaf'] def getPod(self, dbSession, podId, requestUrl=None): if requestUrl is None: requestUrl = str(bottle.request.url).translate(None, ',') pod = self.report.getPod(dbSession, podId) if pod is not None: outputDict = {} devices = pod.devices for field in self.getPodFieldListToCopy(): outputDict[field] = pod.__dict__.get(field) outputDict['spineSettings'] = [] outputDict['spineSettings'].append({'deviceType': pod.spineDeviceType, 'uplinkPorts': pod.spineUplinkRegex, 'downlinkPorts': pod.spineDownlinkRegex, 'junosImage': pod.spineJunosImage}) outputDict['leafSettings'] = [] for leafSetting in pod.leafSettings: outputDict['leafSettings'].append({'deviceType': leafSetting.deviceFamily, 'uplinkPorts': leafSetting.uplinkRegex, 'downlinkPorts': leafSetting.downlinkRegex, 'junosImage': leafSetting.junosImage}) outputDict['devicePassword'] = pod.getCleartextPassword() outputDict['uri'] = requestUrl outputDict['devices'] = {'uri': requestUrl + '/devices', 'total':len(devices)} outputDict['cablingPlan'] = {'uri': requestUrl + '/cabling-plan'} outputDict['deviceConfiguration'] = {'uri': requestUrl + '/device-configuration'} outputDict['ztpConfiguration'] = {'uri': requestUrl + '/ztp-configuration'} outputDict['l2Report'] = {'uri': requestUrl + '/l2-report'} outputDict['l3Report'] = {'uri': requestUrl + '/l3-report'} logger.debug('getPod: %s', podId) return {'pod': outputDict} else: raise bottle.HTTPError(404, exception=PodNotFound(podId)) def getCablingPlan(self, dbSession, podId): header = bottle.request.get_header('Accept') logger.debug('Accept header before processing: %s', header) # hack to remove comma character, must be a bug on Bottle header = header.translate(None, ',') logger.debug('Accept header after processing: %s', header) pod = self.report.getPod(dbSession, podId) if pod is not None: logger.debug('Pod name: %s', pod.name) if header == 'application/json': cablingPlan = pod.cablingPlan if cablingPlan is not None and cablingPlan.json is not None: logger.debug('CablingPlan found in DB') return cablingPlan.json else: raise bottle.HTTPError(404, exception=CablingPlanNotFound(pod.id)) else: podFolder = pod.id + '-' + pod.name fileName = os.path.join(podFolder, 'cablingPlan.dot') logger.debug('webServerRoot: %s, fileName: %s, exists: %s', webServerRoot, fileName, os.path.exists(os.path.join(webServerRoot, fileName))) logger.debug('Cabling file name: %s', fileName) cablingPlan = bottle.static_file(fileName, root=webServerRoot) if isinstance(cablingPlan, bottle.HTTPError): raise bottle.HTTPError(404, exception=CablingPlanNotFound(podFolder)) return cablingPlan else: raise bottle.HTTPError(404, exception=PodNotFound(podId)) def getLeafGenericConfiguration(self, dbSession, podId, deviceModel): pod = self.report.getPod(dbSession, podId) if pod is None: raise bottle.HTTPError(404, exception=PodNotFound(podId)) logger.debug('Pod name: %s, id: %s', pod.name, podId) leafSetting = self.__dao.getLeafSetting(dbSession, podId, deviceModel) if leafSetting is None or leafSetting.config is None: raise bottle.HTTPError(404, exception=DeviceConfigurationNotFound("Pod exists but no leaf generic config found, probably configuration \ was not created. deviceModel: %s, pod name: '%s', id: '%s'" % (deviceModel, pod.name, podId))) bottle.response.headers['Content-Type'] = 'application/json' return leafSetting.config def getDeviceConfigsInZip(self, dbSession, podId): pod = self.report.getPod(dbSession, podId) if pod is None: raise bottle.HTTPError(404, exception=PodNotFound(podId)) logger.debug('Pod name: %s', pod.name) zippedConfigFiles = UnderlayRestRoutes.createZipArchive(pod) if zippedConfigFiles is not None: bottle.response.headers['Content-Type'] = 'application/zip' return zippedConfigFiles else: raise bottle.HTTPError(404, exception=DeviceConfigurationNotFound("Pod exists but no configs for devices.'%s " % (pod.name))) @staticmethod def createZipArchive(pod): buff = StringIO.StringIO() zipArchive = zipfile.ZipFile(buff, mode='w') for device in pod.devices: fileName = device.id + '__' + device.name + '.conf' if device.config is not None: zipArchive.writestr(fileName, device.config.config) if pod.leafSettings is not None: for leafSetting in pod.leafSettings: if leafSetting.config is not None: zipArchive.writestr(leafSetting.deviceFamily + '.conf', leafSetting.config) zipArchive.close() logger.debug('zip file content:\n' + str(zipArchive.namelist())) return buff.getvalue() @staticmethod def getDeviceLoopbackIp(dbSession, deviceId): try: loopbackIfl = dbSession.query(InterfaceLogical).join(Device).filter(Device.id == deviceId).filter(InterfaceLogical.name == 'lo0.0').one() except exc.NoResultFound: logger.debug("Loopback interface not found for deviceId: '%s'", deviceId) return None return util.stripNetmaskFromIpString(loopbackIfl.ipaddress) def copyAdditionalDeviceFields(self, dict, device): ''' Hook to enhance Device object ''' def getDevices(self, dbSession, podId): devices = {} listOfDevices = [] pod = self.report.getPod(dbSession, podId) if pod is not None: for device in pod.devices: outputDict = {} outputDict['id'] = device.id outputDict['name'] = device.name outputDict['role'] = device.role outputDict['family'] = device.family outputDict['macAddress'] = device.macAddress outputDict['managementIp'] = device.managementIp outputDict['serialNumber'] = device.serialNumber outputDict['deployStatus'] = device.deployStatus outputDict['configStatus'] = device.configStatus outputDict['l2Status'] = device.l2Status outputDict['l3Status'] = device.l3Status outputDict['uri'] = str(bottle.request.url).translate(None, ',') + '/' +device.id outputDict['loopbackIp'] = UnderlayRestRoutes.getDeviceLoopbackIp(dbSession, device.id) self.copyAdditionalDeviceFields(outputDict, device) listOfDevices.append(outputDict) devices['device'] = listOfDevices devices['uri'] = str(bottle.request.url).translate(None, ',') devices['total'] = len(pod.devices) return {'devices' : devices} else: raise bottle.HTTPError(404, exception=PodNotFound(podId)) def getDevice(self, dbSession, podId, deviceId): device = UnderlayRestRoutes.isDeviceExists(dbSession, podId, deviceId) #podUri is constructed from url url = str(bottle.request.url).translate(None, ',') uri = url.split("/") uri.pop() uri.pop() ipFbaricUri = "/".join(uri) if device is not None: outputDict = {} outputDict['id'] = device.id outputDict['name'] = device.name outputDict['role'] = device.role outputDict['family'] = device.family outputDict['username'] = device.username outputDict['password'] = device.getCleartextPassword() outputDict['macAddress'] = device.macAddress outputDict['managementIp'] = device.managementIp outputDict['asn'] = device.asn outputDict['configStatus'] = device.configStatus outputDict['configStatusReason'] = device.configStatusReason outputDict['l2Status'] = device.l2Status outputDict['l2StatusReason'] = device.l2StatusReason outputDict['l3Status'] = device.l3Status outputDict['l3StatusReason'] = device.l3StatusReason outputDict['serialNumber'] = device.serialNumber outputDict['deployStatus'] = device.deployStatus outputDict['uri'] = str(bottle.request.url).translate(None, ',') outputDict['pod'] = {'uri': ipFbaricUri} outputDict['config'] = {'uri': str(bottle.request.url).translate(None, ',') + '/config'} outputDict['loopbackIp'] = UnderlayRestRoutes.getDeviceLoopbackIp(dbSession, device.id) self.copyAdditionalDeviceFields(outputDict, device) return {'device': outputDict} else: raise bottle.HTTPError(404, exception=DeviceNotFound("No device found with podId: '%s', deviceId: '%s'" % (podId, deviceId))) def getDeviceConfig(self, dbSession, podId, deviceId): device = UnderlayRestRoutes.isDeviceExists(dbSession, podId, deviceId) if device is None: raise bottle.HTTPError(404, exception=DeviceNotFound("No device found with podId: '%s', deviceId: '%s'" % (podId, deviceId))) config = device.config if config is None: raise bottle.HTTPError(404, exception=DeviceConfigurationNotFound("Device exists but no config found, probably fabric script is not ran. podId: '%s', deviceId: '%s'" % (podId, deviceId))) bottle.response.headers['Content-Type'] = 'application/json' return config.config def getZtpConfig(self, dbSession, podId): pod = self.report.getPod(dbSession, podId) if pod is not None: logger.debug('pod name: %s', pod.name) podFolder = pod.id + '-' + pod.name fileName = os.path.join(podFolder, "dhcpd.conf") logger.debug('webServerRoot: %s, fileName: %s, exists: %s', webServerRoot, fileName, os.path.exists(os.path.join(webServerRoot, fileName))) ztpConf = bottle.static_file(fileName, root=webServerRoot) if isinstance(ztpConf, bottle.HTTPError): raise bottle.HTTPError(404, exception=DeviceConfigurationNotFound("Pod exists but no ztp Config found. Pod name: '%s " % (pod.name))) return ztpConf else: raise bottle.HTTPError(404, exception=PodNotFound(podId)) @staticmethod def isDeviceExists(dbSession, podId, deviceId): try: device = dbSession.query(Device).join(Pod).filter(Device.id == deviceId).filter(Pod.id == podId).one() return device except exc.NoResultFound: raise bottle.HTTPError(404, exception=DeviceNotFound("No device found with podId: '%s', deviceId: '%s'" % (podId, deviceId))) def getJunosImage(self, dbSession, junosImageName): fileName = os.path.join(junosImageRoot, junosImageName) logger.debug('junosImageRoot: %s, image: %s, exists: %s', junosImageRoot, junosImageName, os.path.exists(fileName)) config = bottle.static_file(junosImageName, root=junosImageRoot) if isinstance(config, bottle.HTTPError): raise bottle.HTTPError(404, exception=ImageNotFound("Junos image file not found. name: '%s'" % (junosImageName))) return config
torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype) z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype) for dim in range(4): res = torch.stack((x, y, z), dim) res_neg = torch.stack((x, y, z), dim - 4) expected_size = x.size()[:dim] + (3,) + x.size()[dim:] self.assertEqual(res, res_neg) self.assertEqual(res.size(), expected_size) self.assertEqual(res.select(dim, 0), x, 0) self.assertEqual(res.select(dim, 1), y, 0) self.assertEqual(res.select(dim, 2), z, 0) def test_stack_out(self): for dtype in (torch.half, torch.double, torch.int): x = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype) y = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype) z = torch.randint(low=-100, high=100, size=(2, 3, 4)).to(dtype) for dim in range(4): expected_size = x.size()[:dim] + (3,) + x.size()[dim:] res_out = x.new(expected_size) res_neg_out = x.new(expected_size) res_out_dp = res_out.data_ptr() res_out_neg_dp = res_neg_out.data_ptr() torch.stack((x, y, z), dim, out=res_out) torch.stack((x, y, z), dim - 4, out=res_neg_out) self.assertEqual(res_out, res_neg_out) self.assertEqual(res_out.size(), expected_size) self.assertEqual(res_out_dp, res_out.data_ptr()) self.assertEqual(res_out_neg_dp, res_neg_out.data_ptr()) self.assertEqual(res_out.select(dim, 0), x, 0) self.assertEqual(res_out.select(dim, 1), y, 0) self.assertEqual(res_out.select(dim, 2), z, 0) def test_unbind(self): x = torch.rand(2, 3, 4, 5) for dim in range(4): res = torch.unbind(x, dim) res2 = x.unbind(dim) self.assertEqual(x.size(dim), len(res)) self.assertEqual(x.size(dim), len(res2)) for i in range(dim): self.assertEqual(x.select(dim, i), res[i]) self.assertEqual(x.select(dim, i), res2[i]) def test_linspace(self): for device in torch.testing.get_all_device_types(): _from = random.random() to = _from + random.random() res1 = torch.linspace(_from, to, 137, device=device) res2 = torch.tensor((), device=device) torch.linspace(_from, to, 137, out=res2) self.assertEqual(res1, res2, 0) self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device)) self.assertEqual(torch.linspace(0, 1, 1, device=device), torch.zeros(1, device=device), 0) # Check linspace for generating with start > end. self.assertEqual(torch.linspace(2, 0, 3, device=device), torch.tensor((2, 1, 0), device=device), 0) # Check linspace for non-contiguous tensors. x = torch.zeros(2, 3, device=device) y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2)) self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device), 0) def test_logspace(self): _from = random.random() to = _from + random.random() res1 = torch.logspace(_from, to, 137) res2 = torch.Tensor() torch.logspace(_from, to, 137, out=res2) self.assertEqual(res1, res2, 0) self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1)) self.assertEqual(torch.logspace(0, 1, 1), torch.ones(1), 0) # Check non-default base=2 self.assertEqual(torch.logspace(1, 1, 1, 2), torch.ones(1) * 2) self.assertEqual(torch.logspace(0, 2, 3, 2), torch.Tensor((1, 2, 4))) # Check logspace_ for generating with start > end. self.assertEqual(torch.logspace(1, 0, 2), torch.Tensor((10, 1)), 0) # Check logspace_ for non-contiguous tensors. x = torch.zeros(2, 3) y = torch.logspace(0, 3, 4, out=x.narrow(1, 1, 2)) self.assertEqual(x, torch.Tensor(((0, 1, 10), (0, 100, 1000))), 0) def test_rand(self): torch.manual_seed(123456) res1 = torch.rand(SIZE, SIZE) res2 = torch.Tensor() torch.manual_seed(123456) torch.rand(SIZE, SIZE, out=res2) self.assertEqual(res1, res2) def test_randint(self): torch.manual_seed(123456) res1 = torch.randint(0, 6, (SIZE, SIZE)) res2 = torch.Tensor() torch.manual_seed(123456) torch.randint(0, 6, (SIZE, SIZE), out=res2) torch.manual_seed(123456) res3 = torch.randint(6, (SIZE, SIZE)) res4 = torch.Tensor() torch.manual_seed(123456) torch.randint(6, (SIZE, SIZE), out=res4) self.assertEqual(res1, res2) self.assertEqual(res1, res3) self.assertEqual(res1, res4) self.assertEqual(res2, res3) self.assertEqual(res2, res4) self.assertEqual(res3, res4) res1 = res1.view(-1) high = (res1 < 6).type(torch.LongTensor) low = (res1 >= 0).type(torch.LongTensor) tensorSize = res1.size()[0] assert(tensorSize == high.sum()) assert(tensorSize == low.sum()) def test_randn(self): torch.manual_seed(123456) res1 = torch.randn(SIZE, SIZE) res2 = torch.Tensor() torch.manual_seed(123456) torch.randn(SIZE, SIZE, out=res2) self.assertEqual(res1, res2) def test_slice(self): empty = torch.empty(0, 4) x = torch.arange(0., 16).view(4, 4) self.assertEqual(x[:], x) self.assertEqual(x[:4], x) # start and stop are clamped to the size of dim self.assertEqual(x[:5], x) # if start >= stop then the result is empty self.assertEqual(x[2:1], empty) self.assertEqual(x[2:2], empty) # out of bounds is also empty self.assertEqual(x[10:12], empty) # additional correctness checks self.assertEqual(x[:1].data.tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:-3].data.tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:, -2:3].data.tolist(), [[2], [6], [10], [14]]) self.assertEqual(x[0:-1:2].data.tolist(), [[0, 1, 2, 3], [8, 9, 10, 11]]) def test_is_signed(self): self.assertEqual(torch.IntTensor(5).is_signed(), True) self.assertEqual(torch.ByteTensor(5).is_signed(), False) self.assertEqual(torch.CharTensor(5).is_signed(), True) self.assertEqual(torch.FloatTensor(5).is_signed(), True) self.assertEqual(torch.HalfTensor(10).is_signed(), True) @unittest.skipIf(not torch.cuda.is_available(), 'no CUDA') def test_is_signed_cuda(self): self.assertEqual(torch.cuda.IntTensor(5).is_signed(), True) self.assertEqual(torch.cuda.ByteTensor(5).is_signed(), False) self.assertEqual(torch.cuda.CharTensor(5).is_signed(), True) self.assertEqual(torch.cuda.FloatTensor(5).is_signed(), True) self.assertEqual(torch.cuda.HalfTensor(10).is_signed(), True) @staticmethod def _test_solve(self, cast): a = cast(torch.Tensor(((6.80, -2.11, 5.66, 5.97, 8.23), (-6.05, -3.30, 5.36, -4.44, 1.08), (-0.45, 2.58, -2.70, 0.27, 9.04), (8.32, 2.71, 4.35, -7.17, 2.14), (-9.67, -5.14, -7.26, 6.08, -6.87)))).t() b = cast(torch.Tensor(((4.02, 6.19, -8.22, -7.57, -3.03), (-1.56, 4.00, -8.67, 1.75, 2.86), (9.81, -4.09, -4.57, -8.61, 8.99)))).t() res1 = torch.solve(b, a)[0] self.assertLessEqual(b.dist(torch.mm(a, res1)), 1e-12) ta = cast(torch.Tensor()) tb = cast(torch.Tensor()) res2 = torch.solve(b, a, out=(tb, ta))[0] res3 = torch.solve(b, a, out=(b, a))[0] self.assertEqual(res1, tb) self.assertEqual(res1, b) self.assertEqual(res1, res2) self.assertEqual(res1, res3) # test reuse res1 = torch.solve(b, a)[0] ta = cast(torch.Tensor()) tb = cast(torch.Tensor()) torch.solve(b, a, out=(tb, ta))[0] self.assertEqual(res1, tb) torch.solve(b, a, out=(tb, ta))[0] self.assertEqual(res1, tb) @skipIfNoLapack def test_solve(self): self._test_solve(self, lambda t: t) @staticmethod def _test_solve_batched(self, cast): from common_utils import random_fullrank_matrix_distinct_singular_value # test against solve: one batch A = cast(random_fullrank_matrix_distinct_singular_value(5, 1)) b = cast(torch.randn(1, 5, 10)) x_exp, LU_exp = torch.solve(b.squeeze(0), A.squeeze(0)) x, LU = torch.solve(b, A) self.assertEqual(x, x_exp.unsqueeze(0)) self.assertEqual(LU, LU_exp.unsqueeze(0)) # test against solve in a loop: four batches A = cast(random_fullrank_matrix_distinct_singular_value(5, 4)) b = cast(torch.randn(4, 5, 10)) x_exp_list = [] LU_exp_list = [] for i in range(4): x_exp, LU_exp = torch.solve(b[i], A[i]) x_exp_list.append(x_exp) LU_exp_list.append(LU_exp) x_exp = torch.stack(x_exp_list) LU_exp = torch.stack(LU_exp_list) x, LU = torch.solve(b, A) self.assertEqual(x, x_exp) self.assertEqual(LU, LU_exp) # basic correctness test A = cast(random_fullrank_matrix_distinct_singular_value(5, 3)) b = cast(torch.randn(3, 5, 10)) x, LU = torch.solve(b, A) self.assertEqual(torch.matmul(A, x), b) # Test non-contiguous inputs. if not TEST_NUMPY: return from numpy.linalg import solve A = cast(random_fullrank_matrix_distinct_singular_value(2, 2)).permute(1, 0, 2) b = cast(torch.randn(2, 2, 2)).permute(2, 1, 0) x, _ = torch.solve(b, A) x_exp = torch.Tensor(solve(A.cpu().numpy(), b.cpu().numpy())) self.assertEqual(x.data, cast(x_exp)) @skipIfNoLapack def test_solve_batched(self): self._test_solve_batched(self, lambda t: t) @staticmethod def _test_solve_batched_dims(self, cast): if not TEST_NUMPY: return from numpy.linalg import solve from common_utils import random_fullrank_matrix_distinct_singular_value # test against numpy.linalg.solve A = cast(random_fullrank_matrix_distinct_singular_value(4, 2, 1, 3)) b = cast(torch.randn(2, 1, 3, 4, 6)) x, _ = torch.solve(b, A) x_exp = torch.Tensor(solve(A.cpu().numpy(), b.cpu().numpy())) self.assertEqual(x.data, cast(x_exp)) # test column major format A = cast(random_fullrank_matrix_distinct_singular_value(4, 2, 1, 3)).transpose(-2, -1) b = cast(torch.randn(2, 1, 3, 6, 4)).transpose(-2, -1) assert not A.is_contiguous() assert not b.is_contiguous() x, _ = torch.solve(b, A) x_exp = torch.Tensor(solve(A.cpu().numpy(), b.cpu().numpy())) self.assertEqual(x.data, cast(x_exp)) # broadcasting b A = cast(random_fullrank_matrix_distinct_singular_value(4, 2, 1, 3)) b = cast(torch.randn(4, 6)) x, _ = torch.solve(b, A) x_exp = torch.Tensor(solve(A.cpu().numpy(), b.cpu().numpy())) self.assertEqual(x.data, cast(x_exp)) # broadcasting A A = cast(random_fullrank_matrix_distinct_singular_value(4)) b = cast(torch.randn(2, 1, 3, 4, 2)) x, _ = torch.solve(b, A) x_exp = torch.Tensor(solve(A.cpu().numpy(), b.cpu().numpy())) self.assertEqual(x.data, cast(x_exp)) # broadcasting both A & b A = cast(random_fullrank_matrix_distinct_singular_value(4, 1, 3, 1)) b = cast(torch.randn(2, 1, 3, 4, 5)) x, _ = torch.solve(b, A) x_exp = torch.Tensor(solve(A.cpu().numpy(), b.cpu().numpy())) self.assertEqual(x.data, cast(x_exp)) @skipIfNoLapack def test_solve_batched_dims(self): self._test_solve_batched_dims(self, lambda t: t) def test_solve_methods_arg_device(self): if not torch.cuda.is_available(): return for b_device, A_device in product(['cpu', 'cuda'], repeat=2): if b_device == A_device: continue b = torch.randn(3, 1, device=b_device) A = torch.randn(3, 3, device=A_device) err_str = "Expected b and A to be on the same device" with self.assertRaisesRegex(RuntimeError, err_str): torch.solve(b, A) with self.assertRaisesRegex(RuntimeError, err_str): torch.cholesky_solve(b, A) with self.assertRaisesRegex(RuntimeError, err_str): torch.triangular_solve(b, A) @skipIfNoLapack def test_qr(self): # Since the QR decomposition is unique only up to the signs of the rows of # R, we must ensure these are positive before doing the comparison. def canonicalize(q, r): d = r.diag().sign().diag() return torch.mm(q, d), torch.mm(d, r) def canon_and_check(q, r, expected_q, expected_r): q_canon, r_canon = canonicalize(q, r) expected_q_canon, expected_r_canon = canonicalize(expected_q, expected_r) self.assertEqual(q_canon, expected_q_canon) self.assertEqual(r_canon, expected_r_canon) def check_qr(a, expected_q, expected_r): # standard invocation q, r = torch.qr(a) canon_and_check(q, r, expected_q, expected_r) # in-place q, r = torch.Tensor(), torch.Tensor() torch.qr(a, out=(q, r)) canon_and_check(q, r, expected_q, expected_r) # manually calculate qr using geqrf and orgqr m = a.size(0) n = a.size(1) k = min(m, n) result, tau = torch.geqrf(a) self.assertEqual(result.size(0), m) self.assertEqual(result.size(1), n) self.assertEqual(tau.size(0), k) r = torch.triu(result.narrow(0, 0, k)) q = torch.orgqr(result, tau) q, r = q.narrow(1, 0, k), r canon_and_check(q, r, expected_q, expected_r) # check square case a = torch.Tensor(((1, 2, 3), (4, 5, 6), (7, 8, 10))) expected_q = torch.Tensor(( (-1.230914909793328e-01, 9.045340337332914e-01, 4.082482904638621e-01), (-4.923659639173310e-01, 3.015113445777629e-01, -8.164965809277264e-01), (-8.616404368553292e-01, -3.015113445777631e-01, 4.082482904638634e-01))) expected_r = torch.Tensor(( (-8.124038404635959e+00, -9.601136296387955e+00, -1.193987e+01), (0.000000000000000e+00, 9.045340337332926e-01, 1.507557e+00), (0.000000000000000e+00, 0.000000000000000e+00, 4.082483e-01))) check_qr(a, expected_q, expected_r) # check rectangular thin a = torch.Tensor(( (1, 2, 3), (4, 5, 6), (7, 8, 9), (10, 11, 13), )) expected_q = torch.Tensor(( (-0.0776150525706334, -0.833052161400748, 0.3651483716701106), (-0.3104602102825332, -0.4512365874254053, -0.1825741858350556), (-0.5433053679944331, -0.0694210134500621, -0.7302967433402217), (-0.7761505257063329, 0.3123945605252804, 0.5477225575051663) )) expected_r = torch.Tensor(( (-12.8840987267251261, -14.5916298832790581, -17.0753115655393231), (0, -1.0413152017509357, -1.770235842976589), (0, 0, 0.5477225575051664) )) check_qr(a, expected_q, expected_r) # check rectangular
""" File contains the groovy scripts used by Nexus 3 script api as objects that may be used in the nexus3 state module. I put these here as it made it easy to sync the groovy with the module itself """ create_blobstore = """ import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) existingBlobStore = blobStore.getBlobStoreManager().get(parsed_args.name) if (existingBlobStore == null) { if (parsed_args.type == "S3") { blobStore.createS3BlobStore(parsed_args.name, parsed_args.config) msg = "S3 blobstore {} created" } else { blobStore.createFileBlobStore(parsed_args.name, parsed_args.path) msg = "Created blobstore {} created" } log.info(msg, parsed_args.name) } else { msg = "Blobstore {} already exists. Left untouched" } log.info(msg, parsed_args.name) """ create_content_selector = """ import groovy.json.JsonSlurper import org.sonatype.nexus.selector.SelectorManager import org.sonatype.nexus.selector.SelectorConfiguration parsed_args = new JsonSlurper().parseText(args) selectorManager = container.lookup(SelectorManager.class.name) def selectorConfig boolean update = true selectorConfig = selectorManager.browse().find { it -> it.name == parsed_args.name } if (selectorConfig == null) { update = false selectorConfig = new SelectorConfiguration( 'name': parsed_args.name ) } selectorConfig.setDescription(parsed_args.description) selectorConfig.setType('csel') selectorConfig.setAttributes([ 'expression': parsed_args.search_expression ] as Map<String, Object>) if (update) { selectorManager.update(selectorConfig) } else { selectorManager.create(selectorConfig) } """ create_repo_group = """ import groovy.json.JsonSlurper import org.sonatype.nexus.repository.config.Configuration parsed_args = new JsonSlurper().parseText(args) repositoryManager = repository.repositoryManager existingRepository = repositoryManager.get(parsed_args.name) if (existingRepository != null) { newConfig = existingRepository.configuration.copy() // We only update values we are allowed to change (cf. greyed out options in gui) if (parsed_args.recipe_name == 'docker-group') { newConfig.attributes['docker']['forceBasicAuth'] = parsed_args.docker_force_basic_auth newConfig.attributes['docker']['v1Enabled'] = parsed_args.docker_v1_enabled newConfig.attributes['docker']['httpPort'] = parsed_args.http_port } newConfig.attributes['group']['memberNames'] = parsed_args.member_repos newConfig.attributes['storage']['strictContentTypeValidation'] = Boolean.valueOf(parsed_args.strict_content_validation) repositoryManager.update(newConfig) } else { if (parsed_args.recipe_name == 'docker-group') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ docker: [ forceBasicAuth: parsed_args.docker_force_basic_auth, v1Enabled : parsed_args.docker_v1_enabled, httpPort: parsed_args.docker_http_port ], group: [ memberNames: parsed_args.member_repos ], storage: [ blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ] ] ) } else { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ group : [ memberNames: parsed_args.member_repos ], storage: [ blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ] ] ) } repositoryManager.create(configuration) } """ create_repo_hosted = """ import groovy.json.JsonSlurper import org.sonatype.nexus.repository.config.Configuration parsed_args = new JsonSlurper().parseText(args) repositoryManager = repository.repositoryManager existingRepository = repositoryManager.get(parsed_args.name) msg = "Args: {}" log.debug(msg, args) if (existingRepository != null) { msg = "Repo {} already exists. Updating..." log.debug(msg, parsed_args.name) newConfig = existingRepository.configuration.copy() // We only update values we are allowed to change (cf. greyed out options in gui) if (parsed_args.recipe_name == 'docker-hosted') { newConfig.attributes['docker']['forceBasicAuth'] = parsed_args.docker_force_basic_auth newConfig.attributes['docker']['v1Enabled'] = parsed_args.docker_v1_enabled newConfig.attributes['docker']['httpPort'] = parsed_args.docker_http_port } else if (parsed_args.recipe_name == 'maven2-hosted') { newConfig.attributes['maven']['versionPolicy'] = parsed_args.maven_version_policy.toUpperCase() newConfig.attributes['maven']['layoutPolicy'] = parsed_args.maven_layout_policy.toUpperCase() } else if (parsed_args.recipe_name == 'yum-hosted') { newConfig.attributes['yum']['repodataDepth'] = parsed_args.yum_repodata_depth as Integer newConfig.attributes['yum']['deployPolicy'] = parsed_args.yum_deploy_policy.toUpperCase() } newConfig.attributes['storage']['writePolicy'] = parsed_args.write_policy.toUpperCase() newConfig.attributes['storage']['strictContentTypeValidation'] = Boolean.valueOf(parsed_args.strict_content_validation) repositoryManager.update(newConfig) } else { if (parsed_args.recipe_name == 'maven2-hosted') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ maven: [ versionPolicy: parsed_args.maven_version_policy.toUpperCase(), layoutPolicy : parsed_args.maven_layout_policy.toUpperCase() ], storage: [ writePolicy: parsed_args.write_policy.toUpperCase(), blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ] ] ) } else if (parsed_args.recipe_name == 'docker-hosted') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ docker: [ forceBasicAuth: parsed_args.docker_force_basic_auth, v1Enabled : parsed_args.docker_v1_enabled, httpPort: parsed_args.docker_http_port ], storage: [ writePolicy: parsed_args.write_policy.toUpperCase(), blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ] ] ) } else if (parsed_args.recipe_name == 'yum-hosted') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ yum : [ repodataDepth : parsed_args.yum_repodata_depth.toInteger(), deployPolicy : parsed_args.yum_deploy_policy.toUpperCase() ], storage: [ writePolicy: parsed_args.write_policy.toUpperCase(), blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ] ] ) } else { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ storage: [ writePolicy: parsed_args.write_policy.toUpperCase(), blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ] ] ) } msg = "Configuration: {}" log.debug(msg, configuration) repositoryManager.create(configuration) } """ create_repo_proxy = """ import groovy.json.JsonSlurper import org.sonatype.nexus.repository.config.Configuration parsed_args = new JsonSlurper().parseText(args) repositoryManager = repository.repositoryManager authentication = parsed_args.remote_username == null ? null : [ type: 'username', username: parsed_args.remote_username, password: parsed_args.remote_password ] existingRepository = repositoryManager.get(parsed_args.name) msg = "Args: {}" log.debug(msg, args) if (existingRepository != null) { msg = "Repo {} already exists. Updating..." log.debug(msg, parsed_args.name) newConfig = existingRepository.configuration.copy() // We only update values we are allowed to change (cf. greyed out options in gui) if (parsed_args.recipe_name == 'docker-proxy') { newConfig.attributes['docker']['forceBasicAuth'] = parsed_args.docker_force_basic_auth newConfig.attributes['docker']['v1Enabled'] = parsed_args.docker_v1_enabled newConfig.attributes['dockerProxy']['indexType'] = parsed_args.docker_index_type newConfig.attributes['dockerProxy']['useTrustStoreForIndexAccess'] = parsed_args.docker_use_nexus_certificates_to_access_index newConfig.attributes['docker']['httpPort'] = parsed_args.docker_http_port } else if (parsed_args.recipe_name == 'maven2-proxy') { newConfig.attributes['maven']['versionPolicy'] = parsed_args.maven_version_policy.toUpperCase() newConfig.attributes['maven']['layoutPolicy'] = parsed_args.maven_layout_policy.toUpperCase() } newConfig.attributes['proxy']['remoteUrl'] = parsed_args.remote_url newConfig.attributes['proxy']['contentMaxAge'] = parsed_args.get('content_max_age', 1440.0) newConfig.attributes['proxy']['metadataMaxAge'] = parsed_args.get('metadata_max_age', 1440.0) newConfig.attributes['storage']['strictContentTypeValidation'] = Boolean.valueOf(parsed_args.strict_content_validation) newConfig.attributes['httpclient']['authentication'] = authentication repositoryManager.update(newConfig) } else { if (parsed_args.recipe_name == 'bower-proxy') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ bower: [ rewritePackageUrls: true ], proxy: [ remoteUrl: parsed_args.remote_url, contentMaxAge: parsed_args.get('content_max_age', 1440.0), metadataMaxAge: parsed_args.get('metadata_max_age', 1440.0) ], httpclient: [ blocked: false, autoBlock: true, authentication: authentication ], storage: [ blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ], negativeCache: [ enabled: parsed_args.get("negative_cache_enabled", true), timeToLive: parsed_args.get("negative_cache_ttl", 1440.0) ] ] ) } else if (parsed_args.recipe_name == 'maven2-proxy') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ maven : [ versionPolicy: parsed_args.maven_version_policy.toUpperCase(), layoutPolicy : parsed_args.maven_layout_policy.toUpperCase() ], proxy: [ remoteUrl: parsed_args.remote_url, contentMaxAge: parsed_args.get('content_max_age', 1440.0), metadataMaxAge: parsed_args.get('metadata_max_age', 1440.0) ], httpclient: [ blocked: false, autoBlock: true, authentication: authentication ], storage: [ blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ], negativeCache: [ enabled: parsed_args.get("negative_cache_enabled", true), timeToLive: parsed_args.get("negative_cache_ttl", 1440.0) ] ] ) } else if (parsed_args.recipe_name == 'docker-proxy') { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ docker: [ forceBasicAuth: parsed_args.docker_force_basic_auth, v1Enabled : parsed_args.docker_v1_enabled, httpPort: parsed_args.docker_http_port ], proxy: [ remoteUrl: parsed_args.remote_url, contentMaxAge: parsed_args.get('content_max_age', 1440.0), metadataMaxAge: parsed_args.get('metadata_max_age', 1440.0) ], dockerProxy: [ indexType: parsed_args.docker_index_type.toUpperCase(), useTrustStoreForIndexAccess: parsed_args.docker_use_nexus_certificates_to_access_index ], httpclient: [ blocked: false, autoBlock: true, authentication: authentication ], storage: [ blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ], negativeCache: [ enabled: parsed_args.get("negative_cache_enabled", true), timeToLive: parsed_args.get("negative_cache_ttl", 1440.0) ] ] ) } else { configuration = new Configuration( repositoryName: parsed_args.name, recipeName: parsed_args.recipe_name, online: true, attributes: [ proxy: [ remoteUrl: parsed_args.remote_url, contentMaxAge: parsed_args.get('content_max_age', 1440.0), metadataMaxAge: parsed_args.get('metadata_max_age', 1440.0) ], httpclient: [ blocked: false, autoBlock: true, authentication: authentication ], storage: [ blobStoreName: parsed_args.blob_store, strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) ], negativeCache: [ enabled: parsed_args.get("negative_cache_enabled", true), timeToLive: parsed_args.get("negative_cache_ttl", 1440.0) ] ] ) } msg = "Configuration: {}" log.debug(msg, configuration) repositoryManager.create(configuration) } """ create_task = """ import groovy.json.JsonSlurper import org.sonatype.nexus.scheduling.TaskConfiguration import org.sonatype.nexus.scheduling.TaskInfo import org.sonatype.nexus.scheduling.TaskScheduler import org.sonatype.nexus.scheduling.schedule.Schedule parsed_args = new JsonSlurper().parseText(args) TaskScheduler taskScheduler = container.lookup(TaskScheduler.class.getName()) TaskInfo existingTask = taskScheduler.listsTasks().find { TaskInfo taskInfo -> taskInfo.name == parsed_args.name } if (existingTask && existingTask.getCurrentState().getRunState() != null) { log.info("Could not update currently running task : " + parsed_args.name) return } TaskConfiguration taskConfiguration = taskScheduler.createTaskConfigurationInstance(parsed_args.typeId) if (existingTask) { taskConfiguration.setId(existingTask.getId()) } taskConfiguration.setName(parsed_args.name) parsed_args.taskProperties.each { key, value -> taskConfiguration.setString(key, value) } if (parsed_args.task_alert_email) { taskConfiguration.setAlertEmail(parsed_args.task_alert_email) } parsed_args.booleanTaskProperties.each { key, value -> taskConfiguration.setBoolean(key, Boolean.valueOf(value)) } Schedule schedule = taskScheduler.scheduleFactory.cron(new Date(), parsed_args.cron) taskScheduler.scheduleTask(taskConfiguration, schedule) """ delete_blobstore = """ import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) existingBlobStore = blobStore.getBlobStoreManager().get(parsed_args.name) if (existingBlobStore != null) { blobStore.getBlobStoreManager().delete(parsed_args.name) } """ delete_repo = """ import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) repository.getRepositoryManager().delete(parsed_args.name) """ setup_anonymous_access = """ import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) security.setAnonymousAccess(Boolean.valueOf(parsed_args.anonymous_access)) """ setup_base_url = """ import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) core.baseUrl(parsed_args.base_url) """ setup_capability = """ import groovy.json.JsonSlurper import org.sonatype.nexus.capability.CapabilityReference import org.sonatype.nexus.capability.CapabilityType import org.sonatype.nexus.internal.capability.DefaultCapabilityReference import org.sonatype.nexus.internal.capability.DefaultCapabilityRegistry parsed_args = new JsonSlurper().parseText(args) parsed_args.capability_properties['headerEnabled'] = parsed_args.capability_properties['headerEnabled'].toString() parsed_args.capability_properties['footerEnabled'] = parsed_args.capability_properties['footerEnabled'].toString() def capabilityRegistry = container.lookup(DefaultCapabilityRegistry.class.getName()) def capabilityType = CapabilityType.capabilityType(parsed_args.capability_typeId) DefaultCapabilityReference existing = capabilityRegistry.all.find { CapabilityReference capabilityReference -> capabilityReference.context().descriptor().type() == capabilityType } if (existing) { log.info(parsed_args.typeId + ' capability updated to: {}', capabilityRegistry.update(existing.id(), Boolean.valueOf(parsed_args.get('capability_enabled', true)), existing.notes(), parsed_args.capability_properties).toString() ) } else { log.info(parsed_args.typeId + ' capability created as: {}', capabilityRegistry. add(capabilityType, Boolean.valueOf(parsed_args.get('capability_enabled', true)), 'configured through api', parsed_args.capability_properties).toString() ) } """ setup_email = """ import groovy.json.JsonSlurper import org.sonatype.nexus.email.EmailConfiguration import org.sonatype.nexus.email.EmailManager parsed_args = new JsonSlurper().parseText(args) def emailMgr = container.lookup(EmailManager.class.getName()); emailConfig = new EmailConfiguration( enabled: parsed_args.email_server_enabled, host: parsed_args.email_server_host, port: Integer.valueOf(parsed_args.email_server_port), username: parsed_args.email_server_username, password: parsed_args.email_server_password, fromAddress: parsed_args.email_from_address, subjectPrefix: parsed_args.email_subject_prefix, startTlsEnabled: parsed_args.email_tls_enabled, startTlsRequired: parsed_args.email_tls_required, sslOnConnectEnabled: parsed_args.email_ssl_on_connect_enabled, sslCheckServerIdentityEnabled: parsed_args.email_ssl_check_server_identity_enabled, nexusTrustStoreEnabled: parsed_args.email_trust_store_enabled ) emailMgr.configuration = emailConfig """ setup_http_proxy = """ import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) core.removeHTTPProxy() if (parsed_args.with_http_proxy) { if (parsed_args.http_proxy_username) { core.httpProxyWithBasicAuth(parsed_args.http_proxy_host, parsed_args.http_proxy_port as int, parsed_args.http_proxy_username, parsed_args.http_proxy_password) } else { core.httpProxy(parsed_args.http_proxy_host, parsed_args.http_proxy_port as int) } } core.removeHTTPSProxy() if (parsed_args.with_https_proxy) { if (parsed_args.https_proxy_username) { core.httpsProxyWithBasicAuth(parsed_args.https_proxy_host, parsed_args.https_proxy_port as int, parsed_args.https_proxy_username, parsed_args.https_proxy_password) } else { core.httpsProxy(parsed_args.https_proxy_host, parsed_args.https_proxy_port as int) } } if (parsed_args.with_http_proxy || parsed_args.with_https_proxy) { core.nonProxyHosts() core.nonProxyHosts(parsed_args.proxy_exclude_hosts as String[]) } """ setup_ldap = """ import org.sonatype.nexus.ldap.persist.LdapConfigurationManager import org.sonatype.nexus.ldap.persist.entity.LdapConfiguration import org.sonatype.nexus.ldap.persist.entity.Connection import org.sonatype.nexus.ldap.persist.entity.Mapping import groovy.json.JsonSlurper parsed_args = new JsonSlurper().parseText(args) def ldapConfigMgr = container.lookup(LdapConfigurationManager.class.getName()); def ldapConfig = new LdapConfiguration() boolean update = false; // Look for existing config to update ldapConfigMgr.listLdapServerConfigurations().each { if (it.name == parsed_args.name) { ldapConfig = it update = true } } ldapConfig.setName(parsed_args.name) // Connection connection = new Connection() connection.setHost(new Connection.Host(Connection.Protocol.valueOf(parsed_args.protocol), parsed_args.hostname, Integer.valueOf(parsed_args.port))) if (parsed_args.auth == "simple") { connection.setAuthScheme("simple") connection.setSystemUsername(parsed_args.username) connection.setSystemPassword(parsed_args.password) } else { connection.setAuthScheme("none") } connection.setSearchBase(parsed_args.search_base) connection.setConnectionTimeout(30) connection.setConnectionRetryDelay(300) connection.setMaxIncidentsCount(3) ldapConfig.setConnection(connection) // Mapping mapping = new Mapping() mapping.setUserBaseDn(parsed_args.user_base_dn) mapping.setLdapFilter(parsed_args.user_ldap_filter) mapping.setUserObjectClass(parsed_args.user_object_class) mapping.setUserIdAttribute(parsed_args.user_id_attribute) mapping.setUserRealNameAttribute(parsed_args.user_real_name_attribute) mapping.setEmailAddressAttribute(parsed_args.user_email_attribute) if (parsed_args.map_groups_as_roles) { if(parsed_args.map_groups_as_roles_type == "static"){ mapping.setLdapGroupsAsRoles(true) mapping.setGroupBaseDn(parsed_args.group_base_dn) mapping.setGroupObjectClass(parsed_args.group_object_class) mapping.setGroupIdAttribute(parsed_args.group_id_attribute) mapping.setGroupMemberAttribute(parsed_args.group_member_attribute) mapping.setGroupMemberFormat(parsed_args.group_member_format) } else if (parsed_args.map_groups_as_roles_type == "dynamic") { mapping.setLdapGroupsAsRoles(true) mapping.setUserMemberOfAttribute(parsed_args.user_memberof_attribute) } } mapping.setUserSubtree(parsed_args.user_subtree) mapping.setGroupSubtree(parsed_args.group_subtree) ldapConfig.setMapping(mapping) if (update) { ldapConfigMgr.updateLdapServerConfiguration(ldapConfig) } else { ldapConfigMgr.addLdapServerConfiguration(ldapConfig) } """ setup_privilege = """ import groovy.json.JsonSlurper import org.sonatype.nexus.security.privilege.NoSuchPrivilegeException import org.sonatype.nexus.security.user.UserManager import org.sonatype.nexus.security.privilege.Privilege parsed_args = new JsonSlurper().parseText(args) authManager = security.getSecuritySystem().getAuthorizationManager(UserManager.DEFAULT_SOURCE) def privilege boolean update = true try { privilege = authManager.getPrivilege(parsed_args.name) } catch (NoSuchPrivilegeException ignored) { // could not find any existing privilege update = false privilege = new Privilege( 'id': parsed_args.name, 'name': parsed_args.name ) } privilege.setDescription(parsed_args.description) privilege.setType(parsed_args.type) privilege.setProperties([ 'format': parsed_args.format,
import json import os import time import re import asyncio import async_timeout import aiohttp import aiofiles import aiodns import aiobotocore import sys import logging from time import mktime, strptime from datetime import datetime, timedelta ACCESS_LOG_NAME="edgex_access" MAX_SINGLE_OBJ=5* 1024 * 1024 * 1024 # 5Gb from xml.etree.ElementTree import fromstring as parse_xml, ParseError # Error objects, Exceptions etc # ============================================================================ class edgex_s3exception(Exception): """Base for exceptions returned by S3 servers""" @staticmethod def from_bytes(status, body): if not body: raise RuntimeError("HTTP Error {}".format(status)) try: xml = parse_xml(body) except ParseError: raise RuntimeError(body) code_el = xml.find("Code") if code_el is None or not code_el.text: raise RuntimeError(body) class_name = code_el.text try: cls = globals()[class_name] except KeyError: raise RuntimeError("Error {} is unknown".format(class_name)) msg = xml.find("Message") return cls(class_name if msg is None else msg.text) class AccessDenied(edgex_s3exception): pass class AccountProblem(edgex_s3exception): pass class AmbiguousGrantByEmailAddress(edgex_s3exception): pass class BadDigest(edgex_s3exception): pass class BucketAlreadyExists(edgex_s3exception): pass class BucketAlreadyOwnedByYou(edgex_s3exception): pass class BucketNotEmpty(edgex_s3exception): pass class CredentialsNotSupported(edgex_s3exception): pass class CrossLocationLoggingProhibited(edgex_s3exception): pass class EntityTooSmall(edgex_s3exception): pass class EntityTooLarge(edgex_s3exception): pass class ExpiredToken(edgex_s3exception): pass class IllegalVersioningConfigurationException(edgex_s3exception): pass class IncompleteBody(edgex_s3exception): pass class IncorrectNumberOfFilesInPostRequest(edgex_s3exception): pass class InlineDataTooLarge(edgex_s3exception): pass class InternalError(edgex_s3exception): pass class InvalidAccessKeyId(edgex_s3exception): pass class InvalidAddressingHeader(edgex_s3exception): pass class InvalidArgument(edgex_s3exception): pass class InvalidBucketName(edgex_s3exception): pass class InvalidBucketState(edgex_s3exception): pass class InvalidDigest(edgex_s3exception): pass class InvalidEncryptionAlgorithmError(edgex_s3exception): pass class InvalidLocationConstraint(edgex_s3exception): pass class InvalidObjectState(edgex_s3exception): pass class InvalidPart(edgex_s3exception): pass class InvalidPartOrder(edgex_s3exception): pass class InvalidPayer(edgex_s3exception): pass class InvalidPolicyDocument(edgex_s3exception): pass class InvalidRange(edgex_s3exception): pass class InvalidRequest(edgex_s3exception): pass class InvalidSecurity(edgex_s3exception): pass class InvalidSOAPRequest(edgex_s3exception): pass class InvalidStorageClass(edgex_s3exception): pass class InvalidTargetBucketForLogging(edgex_s3exception): pass class InvalidToken(edgex_s3exception): pass class InvalidURI(edgex_s3exception): pass class InvalidCommand(edgex_s3exception): pass class InvalidStore(edgex_s3exception): pass class KeyTooLong(edgex_s3exception): pass class MalformedACLError(edgex_s3exception): pass class MalformedPOSTRequest(edgex_s3exception): pass class MalformedXML(edgex_s3exception): pass class MaxMessageLengthExceeded(edgex_s3exception): pass class MaxPostPreDataLengthExceededError(edgex_s3exception): pass class MetadataTooLarge(edgex_s3exception): pass class MethodNotAllowed(edgex_s3exception): pass class MissingAttachment(edgex_s3exception): pass class MissingContentLength(edgex_s3exception): pass class MissingRequestBodyError(edgex_s3exception): pass class MissingSecurityElement(edgex_s3exception): pass class MissingSecurityHeader(edgex_s3exception): pass class NoLoggingStatusForKey(edgex_s3exception): pass class NoSuchBucket(edgex_s3exception): pass class NoSuchKey(edgex_s3exception): pass class NoSuchLifecycleConfiguration(edgex_s3exception): pass class NoSuchUpload(edgex_s3exception): pass class NoSuchVersion(edgex_s3exception): pass class NotImplemented(edgex_s3exception): pass class NotSignedUp(edgex_s3exception): pass class NotSuchBucketPolicy(edgex_s3exception): pass class OperationAborted(edgex_s3exception): pass class PermanentRedirect(edgex_s3exception): pass class PreconditionFailed(edgex_s3exception): pass class Redirect(edgex_s3exception): pass class RestoreAlreadyInProgress(edgex_s3exception): pass class RequestIsNotMultiPartContent(edgex_s3exception): pass class RequestTimeout(edgex_s3exception): pass class RequestTimeTooSkewed(edgex_s3exception): pass class RequestTorrentOfBucketError(edgex_s3exception): pass class SignatureDoesNotMatch(edgex_s3exception): pass class ServiceUnavailable(edgex_s3exception): pass class SlowDown(edgex_s3exception): pass class TemporaryRedirect(edgex_s3exception): pass class TokenRefreshRequired(edgex_s3exception): pass class TooManyBuckets(edgex_s3exception): pass class UnexpectedContent(edgex_s3exception): pass class UnresolvableGrantByEmailAddress(edgex_s3exception): pass class UserKeyMustBeSpecified(edgex_s3exception): pass # ============================================================================ # Error End # buffer hash computation class edgex_hasher(object): """ Class that performs the hashlib-based hash calculations .. note :: This class is a set of helper methods around the real methods that compute the hash Adaptation of hashlib-based hash functions that return unicode-encoded hex- and base64-digest strings. """ def __init__(self, data, h): if data is None: data = b'' if isinstance(data, str): data = data.encode('utf-8') self.h = h(data) @classmethod def md5(cls, data=''): return cls(data, hashlib.md5) @classmethod def sha256(cls, data=''): return cls(data, hashlib.sha256) def update(self, data): if isinstance(data, str): data = data.encode('utf-8') self.h.update(data) def hexdigest(self): r = self.h.hexdigest() return r.decode('utf-8') if isinstance(r, bytes) else r def base64digest(self): r = base64.b64encode(self.h.digest()) return r.decode('utf-8') if isinstance(r, bytes) else r # ============================================================================ # logger logger_level = { 0 : logging.DEBUG, 1 : logging.INFO, 2 : logging.WARNING, 3 : logging.ERROR, 4 : logging.CRITICAL } class edgex_logger: def __init__(self, debug_level, logFile): if debug_level >= 3: return #file_format='[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s' #file_format='%(levelname)s:%(message)s' file_format='%(levelname)s:{%(filename)s:%(lineno)d}: %(message)s' log_level = logger_level[debug_level] logging.basicConfig(format=file_format, level=log_level, filename=logFile, filemode='a') h = logging.StreamHandler(sys.stdout) h.flush = sys.stdout.flush h.setLevel(logging.ERROR) self.logger = logging.getLogger(ACCESS_LOG_NAME) self.logger.addHandler(h) def log_print(self, logData): print(logData) def log_info(self, logData): logging.info(logData) def log_debug(self, logData): logging.debug(logData) def log_error(self, logData): logging.error(logData) def log_critical(self, logData): logging.critical(logData) def log_warning(self, logData): logging.warning(logData) class edgex_store: def __init__(self): self.islocal_store = False def fromjson(self, cfg): self.name = cfg['NAME'] self.type = cfg['STORE_TYPE'] if self.type == "FS": self.cwd = os.getcwd() self.bucket = cfg['BUCKET'] self.token = cfg['TOKEN'] self.tag = cfg['TAG'] if self.type == "S3": self.access = cfg['ACCESS'] self.secret = cfg['SECRET'] self.endpoint = cfg['ENDPOINT'] self.region = cfg['REGION'] self.use_ssl = cfg['SSL'] elif self.type == "FS": self.islocal_store = True else: raise InvalidStore(self.type) def islocal(self): return self.islocal_store def create(self, name, store_type, bucket, access=None, secret=None, endpoint=None, region=None, token=None, tag=None): self.name = name self.type = store_type if self.type == "FS": self.cwd = os.getcwd() self.access = access self.secret = secret self.endpoint = endpoint self.region = region self.bucket = bucket self.token = token self.tag = tag def get_name(self): return self.name def get_type(self): return self.type def default_bucket(self): return self.bucket def list_buckets(self): # TODO pass class edgex_config: def __init__(self, cfg_filedata, elog): self.cfg_data = json.loads(cfg_filedata) self.store_dict = {} stores = self.cfg_data['stores'] for x in stores: self.store_dict[ x['NAME'] ] = edgex_store() self.store_dict[ x['NAME'] ].fromjson(x) if self.cfg_data['PRIMARY']: self.primary = self.cfg_data['PRIMARY'] if self.cfg_data['DEBUG']: self.debug_level = self.cfg_data['DEBUG'] if self.cfg_data['SYNCIO']: self.syncio = self.cfg_data['SYNCIO'] self.elog = elog def get_primary_store(self): if self.cfg_data['PRIMARY'] is None: raise return self.cfg_data['PRIMARY'] def show_stores(self): for k in self.store_dict: store = self.store_dict[k] self.elog.log_print(store.name + "\t" + store.get_type() + "\t" + store.default_bucket()) def get_stores(self): ret = [] for k in self.store_dict: store = self.store_dict[k] ret.append(store.name) return ret def get_store(self, store_name): try: store = self.store_dict[store_name] return store except: return None def get_primary_store(self): if self.primary: return self.store_dict[self.primary] else: return None # TODO: revise def get_local_pwd(self): store = edgex_store() store.create("file", "FS", os.getcwd()) self.store_dict["file"] = store return store def show_all(self): self.elog.log_print("primary:" + "\t" + self.primary) self.elog.log_print("debug_level: " + "\t" + str(self.debug_level)) self.elog.log_print("syncio :" + "\t" + self.syncio) self.elog.log_print("stores:") self.show_stores() class edgex_object: def __init__(self, cfg, elog, name, store=None, as_is=False): self.oname = name self.as_is = as_is self.cfg = cfg # time for the creation of this in-memory object t = datetime.utcnow() self.amzdate = t.strftime('%Y%m%dT%H%M%SZ') self.datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope self.elog = elog # contains the databuffer on one task only. .. not the entire content-length. self.databuf = None self.obj_name = "" self.bucket_name = "" # used only to pass around in callbacks etc self.arg = None self.ctx = None if self.localpath() is True: return if store is None: is_store = False else: is_store = True # first we figure out the stores, parse the names etc sname = self.oname.split(":") if len(sname) == 2: if is_store is False: store = self.cfg.get_store(sname[0]) if store is None: raise InvalidStore("Store not defined: " + sname[0]) else: is_store=True if sname[1].startswith('//'): bpath = re.sub('//', '/',sname[1]) else: bpath = self.oname elif len(sname) == 1: if is_store is False: store = self.cfg.get_store(sname[0]) if store is None: raise InvalidStore("Store not defined: " + str(sname[0])) else: is_store=True if sname[0].startswith('//'): bpath = re.sub('//', '/',sname[0]) else: bpath = self.oname else: raise InvalidStore("Store not defined: " + sname[0]) # if is_store is False: raise InvalidStore("No store defined: " + name) self.store = store self.isfolder = False if self.store.get_type() == "S3": if self.oname.endswith("/"): self.isfolder = True if self.store.get_type() == "FS": if os.path.isdir(self.oname): self.isfolder = True if not self.oname.endswith("/"): self.oname += "/" if os.path.isfile(self.oname): self.islocal = True # sometimes isdir does not good enough # so added below if self.oname.endswith("/"): self.isfolder = True # now initialize the bucket, name itself bname = bpath.split("/") if len(bname) == 2: self.bucket_name = bname[1] self.obj_name = "" elif len(bname) > 2: self.bucket_name = bname[1] self.obj_name = "/".join(bname[2:]) else: raise InvalidStore(name + ": Path not specified") if len(self.bucket_name) == 0 and (self.store.get_type() != "FS"): self.elog.log_debug("No Bucket name") self.elog.log_debug("OBJECT : " + self.pathname()) if (self.store is None) and (self.as_is is False): self.store = self.cfg.get_primary_store() self.bucket_name = self.store.default_bucket() def localpath(self): if self.as_is is True: self.obj_name = self.oname self.bucket_name = os.getcwd() self.store = self.cfg.get_local_pwd() self.isfolder = True if os.path.isdir(self.oname) else False return True else: return False # Properties of the object def get_store(self): return self.store def store_type(self): return self.store.type def islocal(self): if self.store.type == "FS": return True else: return False def bucketname(self): if self.bucket_name is None: return None else: return self.bucket_name def objname(self): if self.obj_name is None: return None else: return self.obj_name # return only the storename://bucketname of this object def basename(self): if self.store.get_name() != "local": fpath = self.store.get_name() + "://" + self.bucket_name + "/" else: fpath = self.store.get_name() + ":/" + self.bucket_name + "/" return fpath def stat(self, create=False): if self.store_type() == "FS": file_found = os.path.exists(self.pathname()) if (file_found is False) and (create is True) and self.obj_name.endswith("/"): self.elog.log_info("mkdir " + self.pathname()) os.makedirs(self.pathname()) else: return file_found else: self.elog.log_error("Error: No stat on store_type: " + self.store_type()) raise InvalidStore(str(sef.store_type())) def pathname(self): if self.store_type() == "FS": fpath = self.bucket_name + "/" + self.obj_name elif self.store_type() == "S3": fpath = self.store.endpoint + "/" + self.bucket_name + "/" + self.obj_name else: self.elog.log_error("Error: store_type: " + self.store_type()) raise InvalidStore(str(self.store_type())) return fpath def auth(self): auth = AWS4Auth(self.store.access, self.store.secret, self.store.region, 's3') return auth # return only the name def addchild(self, child): if self.store_type() == "FS": objname = "//" + str(self.pathname()) + child
<filename>reddit_user.py # -*- coding: utf-8 -*- import csv import datetime import re import json import time import sys import calendar from collections import Counter from itertools import groupby from urlparse import urlparse import requests import pytz from subreddits import subreddits_dict, ignore_text_subs, default_subs from text_parser import TextParser parser = TextParser() class UserNotFoundError(Exception): pass class NoDataError(Exception): pass class Util: """ Contains a collection of common utility methods. """ @staticmethod def sanitize_text(text): """ Returns text after removing unnecessary parts. """ MAX_WORD_LENGTH = 1024 _text = " ".join([ l for l in text.strip().split("\n") if ( not l.strip().startswith("&gt;") ) ]) substitutions = [ (r"\[(.*?)\]\((.*?)\)", r""), # Remove links from Markdown (r"[\"](.*?)[\"]", r""), # Remove text within quotes (r" \'(.*?)\ '", r""), # Remove text within quotes (r"\.+", r". "), # Remove ellipses (r"\(.*?\)", r""), # Remove text within round brackets (r"&amp;", r"&"), # Decode HTML entities (r"http.?:\S+\b", r" ") # Remove URLs ] for pattern, replacement in substitutions: _text = re.sub(pattern, replacement, _text, flags=re.I) # Remove very long words _text = " ".join( [word for word in _text.split(" ") if len(word) <= MAX_WORD_LENGTH] ) return _text @staticmethod def coalesce(l): """ Given a list, returns the last element that is not equal to "generic". """ l = [x for x in l if x.lower() != "generic"] return next(iter(l[::-1]), "") @staticmethod def humanize_days(days): """ Return text with years, months and days given number of days. """ y = days/365 if days > 365 else 0 m = (days - y*365)/31 if days > 30 else 0 d = (days - m*31 - y*365) yy = str(y) + " year" if y else "" if y > 1: yy += "s" mm = str(m) + " month" if m else "" if m > 1: mm += "s" dd = str(d) + " day" if d>1 or d==0: dd += "s" return (yy + " " + mm + " " + dd).strip() @staticmethod def scale(val, src, dst): """ Scale the given value from the scale of src to the scale of dst. """ return ((val - src[0])/(src[1] - src[0])) * (dst[1]-dst[0]) + dst[0] # Base class for comments and submissions class Post(object): """ A class for "posts" - a post can either be a submission or a comment. """ def __init__( self, id, subreddit, text, created_utc, score, permalink, gilded ): # Post id self.id = id # Subreddit in which this comment or submission was posted self.subreddit = subreddit # For comments, the comment body and for submissions, the self-text self.text = text # UTC timestamp when post was created self.created_utc = created_utc # Post score self.score = score # Permalink to post self.permalink = permalink # Gilded self.gilded = gilded class Comment(Post): """ A class for comments derived from Post. """ def __init__( self, id, subreddit, text, created_utc, score, permalink, submission_id, edited, top_level, gilded ): super(Comment, self).__init__( id, subreddit, text, created_utc, score, permalink, gilded ) # Link ID where comment was posted self.submission_id = submission_id # Edited flag self.edited = edited # Top-level flag self.top_level = top_level class Submission(Post): """ A class for submissions derived from Post. """ def __init__( self, id, subreddit, text, created_utc, score, permalink, url, title, is_self, gilded, domain ): super(Submission, self).__init__( id, subreddit, text, created_utc, score, permalink, gilded ) # Submission link URL self.url = url # Submission title self.title = title # Self post? self.is_self = is_self # Domain self.domain = domain class RedditUser: """ Models a redditor object. Contains methods for processing comments and submissions. """ # If user has posted in a sub 3 times or more, they are # probably interested in the topic. MIN_THRESHOLD = 3 MIN_THRESHOLD_FOR_DEFAULT = 10 HEADERS = { 'User-Agent': 'Sherlock v0.1 by /u/orionmelt' } IMAGE_DOMAINS = ["imgur.com", "flickr.com"] VIDEO_DOMAINS = ["youtube.com", "youtu.be", "vimeo.com", "liveleak.com"] IMAGE_EXTENSIONS = ["jpg", "png", "gif", "bmp"] def __init__(self, username, json_data=None): # Populate username and about data self.username = username self.comments = [] self.submissions = [] if not json_data: # Retrieve about self.about = self.get_about() if not self.about: raise UserNotFoundError # Retrieve comments and submissions self.comments = self.get_comments() self.submissions = self.get_submissions() else: data = json.loads(json_data) self.about = { "created_utc" : datetime.datetime.fromtimestamp( data["about"]["created_utc"], tz=pytz.utc ), "link_karma" : data["about"]["link_karma"], "comment_karma" : data["about"]["comment_karma"], "name" : data["about"]["name"], "reddit_id" : data["about"]["id"], "is_mod" : data["about"]["is_mod"] } for c in data["comments"]: self.comments.append( Comment( id=c["id"], subreddit=c["subreddit"], text=c["text"], created_utc=c["created_utc"], score=c["score"], permalink=c["permalink"], submission_id=c["submission_id"], edited=c["edited"], top_level=c["top_level"], gilded=c["gilded"] ) ) for s in data["submissions"]: self.submissions.append( Submission( id=s["id"], subreddit=s["subreddit"], text=s["text"], created_utc=s["created_utc"], score=s["score"], permalink=s["permalink"], url=s["url"], title=s["title"], is_self=s["is_self"], gilded=s["gilded"], domain=s["domain"] ) ) self.username = self.about["name"] self.signup_date = self.about["created_utc"] self.link_karma = self.about["link_karma"] self.comment_karma = self.about["comment_karma"] self.reddit_id = self.about["reddit_id"] self.is_mod = self.about["is_mod"] # Initialize other properties self.today = datetime.datetime.now(tz=pytz.utc).date() start = self.signup_date.date() self.age_in_days = (self.today - start).days self.first_post_date = None self.earliest_comment = None self.latest_comment = None self.best_comment = None self.worst_comment = None self.earliest_submission = None self.latest_submission = None self.best_submission = None self.worst_submission = None self.metrics = { "date" : [], "weekday" : [], "hour" : [], "subreddit" : [], "heatmap" : [], "recent_karma" : [], "recent_posts" : [] } self.submissions_by_type = { "name" : "All", "children" : [ { "name" : "Self", "children" : [] }, { "name" : "Image", "children" : [] }, { "name" : "Video", "children" : [] }, { "name" : "Other", "children" : [] } ] } self.metrics["date"] = [ { "date" : (year, month), "comments" : 0, "submissions": 0, "comment_karma": 0, "submission_karma": 0 } for (year, month) in sorted( list( set([ ( (self.today - datetime.timedelta(days=x)).year, (self.today - datetime.timedelta(days=x)).month ) for x in range(0, (self.today - start).days) ]) ) ) ] self.metrics["heatmap"] = [0] * 24 * 61 self.metrics["recent_karma"] = [0] * 61 self.metrics["recent_posts"] = [0] * 61 self.metrics["hour"] = [ { "hour": hour, "comments": 0, "submissions": 0, "comment_karma": 0, "submission_karma": 0 } for hour in range(0, 24) ] self.metrics["weekday"] = [ { "weekday": weekday, "comments": 0, "submissions": 0, "comment_karma": 0, "submission_karma": 0 } for weekday in range(0, 7) ] self.genders = [] self.orientations = [] self.relationship_partners = [] # Data that we are reasonably sure that *are* names of places. self.places_lived = [] # Data that looks like it could be a place, but we're not sure. self.places_lived_extra = [] # Data that we are reasonably sure that *are* names of places. self.places_grew_up = [] # Data that looks like it could be a place, but we're not sure. self.places_grew_up_extra = [] self.family_members = [] self.pets = [] self.attributes = [] self.attributes_extra = [] self.possessions = [] self.possessions_extra = [] self.actions = [] self.actions_extra = [] self.favorites = [] self.sentiments = [] self.derived_attributes = { "family_members" : [], "gadget" : [], "gender" : [], "locations" : [], "orientation" : [], "physical_characteristics" : [], "political_view" : [], "possessions" : [], "religion and spirituality" : [] } self.corpus = "" self.commented_dates = [] self.submitted_dates = [] self.lurk_period = None self.comments_gilded = 0 self.submissions_gilded = 0 self.process() def __str__(self): return str(self.results()) def get_about(self): """ Returns basic data about redditor. """ url = r"http://www.reddit.com/user/%s/about.json" % self.username response = requests.get(url, headers=self.HEADERS) response_json = response.json() if "error" in response_json and response_json["error"] == 404: return None about = { "created_utc" : datetime.datetime.fromtimestamp( response_json["data"]["created_utc"], tz=pytz.utc ), "link_karma" : response_json["data"]["link_karma"], "comment_karma" : response_json["data"]["comment_karma"], "name" : response_json["data"]["name"], "reddit_id" : response_json["data"]["id"], "is_mod" : response_json["data"]["is_mod"] } return about def get_comments(self, limit=None): """ Returns a list of redditor's comments. """ comments = [] more_comments = True after = None base_url = r"http://www.reddit.com/user/%s/comments/.json?limit=100" \ % self.username url = base_url while more_comments: response = requests.get(url, headers=self.HEADERS) response_json = response.json() # TODO - Error handling for user not found (404) and # rate limiting (429) errors for child in response_json["data"]["children"]: id = child["data"]["id"].encode("ascii", "ignore") subreddit = child["data"]["subreddit"].\ encode("ascii", "ignore") text = child["data"]["body"].encode("ascii", "ignore") created_utc = child["data"]["created_utc"] score = child["data"]["score"] submission_id = child["data"]["link_id"].\ encode("ascii", "ignore").lower()[3:] edited =
item in self.celldefs if item["name"] == cellname ) except Exception: celldef = None else: newpad["iolib"] = celldef["iolib"] newpad["width"] = celldef["width"] newpad["height"] = celldef["height"] newpad["class"] = celldef["class"] newpad["subclass"] = celldef["subclass"] newpad["x"] = float(placex) / float(units) newpad["y"] = float(placey) / float(units) newpad["o"] = placeo # Adjust bounds if celldef: if newpad["x"] < self.llx: self.llx = newpad["x"] if newpad["y"] < self.lly: self.lly = newpad["y"] if newpad["o"] == "N" or newpad["o"] == "S": padurx = newpad["x"] + celldef["width"] padury = newpad["y"] + celldef["height"] else: padurx = newpad["x"] + celldef["height"] padury = newpad["y"] + celldef["width"] if padurx > self.urx: self.urx = padurx if padury > self.ury: self.ury = padury # First four entries in the DEF file are corners # padring puts the lower left corner at zero, so # use the zero coordinates to determine which pads # are which. Note that padring assumes the corner # pad is drawn in the SW corner position! if corners < 4: if newpad["x"] == 0 and newpad["y"] == 0: SWpad.append(newpad) elif newpad["x"] == 0: NWpad.append(newpad) elif newpad["y"] == 0: SEpad.append(newpad) else: NEpad.append(newpad) corners += 1 else: # Place according to orientation. If orientation # is not standard, be sure to make it standard! placeo = self.rotate_orientation(placeo) if placeo == "N": Spadlist.append(newpad) elif placeo == "E": Wpadlist.append(newpad) elif placeo == "S": Npadlist.append(newpad) else: Epadlist.append(newpad) elif "END COMPONENTS" in line: in_components = False elif "COMPONENTS" in line: in_components = True self.Npads = Npadlist self.Wpads = Wpadlist self.Spads = Spadlist self.Epads = Epadlist self.NWpad = NWpad self.NEpad = NEpad self.SWpad = SWpad self.SEpad = SEpad # The padframe has its own DEF file from the padring app, but the core # does not. The core needs to be floorplanned in a very similar manner. # This will be done by searching for a DEF file of the project top-level # layout. If none exists, it is created by generating it from the layout. # If the top-level layout does not exist, then all core cells are placed # at the origin, and the origin placed at the padframe inside corner. mag_path = self.projectpath + "/mag" if not os.path.isfile(mag_path + "/" + self.project + ".def"): if os.path.isfile(mag_path + "/" + self.project + ".mag"): # Create a DEF file from the layout with open(mag_path + "/pfg_write_def.tcl", "w") as ofile: print("drc off", file=ofile) print("box 0 0 0 0", file=ofile) print("load " + self.project, file=ofile) print("def write", file=ofile) print("quit", file=ofile) magicexec = self.magic_path if self.magic_path else "magic" mproc = subprocess.Popen( [magicexec, "-dnull", "-noconsole", "pfg_write_def.tcl"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=mag_path, universal_newlines=True, ) self.watch(mproc) os.remove(mag_path + "/pfg_write_def.tcl") elif not os.path.isfile(mag_path + "/core.def"): # With no other information available, copy the corecells # (from the verilog file) into the coregroup list. # Position all core cells starting at the padframe top left # inside corner, and arranging in rows without overlapping. # Note that no attempt is made to organize the cells or # otherwise produce an efficient layout. Any dimension larger # than the current padframe overruns to the right or bottom. if self.SWpad != []: corellx = SWpad[0]["x"] + SWpad[0]["width"] + self.margin corelly = SWpad[0]["y"] + SWpad[0]["height"] + self.margin else: corellx = Wpadlist[0]["x"] + Wpadlist[0]["height"] + self.margin corelly = Spadlist[0]["x"] + Spadlist[0]["height"] + self.margin if self.NEpad != []: coreurx = NEpad[0]["x"] - self.margin coreury = NEpad[0]["y"] - self.margin else: coreurx = Epadlist[0]["x"] - self.margin coreury = Npadlist[0]["x"] - self.margin locllx = corellx testllx = corellx loclly = corelly testlly = corelly nextlly = corelly for cell in self.corecells: testllx = locllx + cell["width"] if testllx > coreurx: locllx = corellx corelly = nextlly loclly = nextlly newcore = cell newcore["x"] = locllx newcore["y"] = loclly newcore["o"] = "N" locllx += cell["width"] + self.margin testlly = corelly + cell["height"] + self.margin if testlly > nextlly: nextlly = testlly coregroup.append(newcore) self.coregroup = coregroup if os.path.isfile(mag_path + "/" + self.project + ".def"): # Read the top-level DEF, and use it to position the core cells. self.print("Reading the top-level cell DEF for core cell placement.") units = 1000 in_components = False with open(mag_path + "/" + self.project + ".def", "r") as ifile: deflines = ifile.read().splitlines() for line in deflines: if "UNITS DISTANCE MICRONS" in line: units = line.split()[3] elif in_components: lparse = line.split() if lparse[0] == "-": instname = lparse[1] # NOTE: Magic should not drop the entire path to the # cell for the cellname; this needs to be fixed! To # work around it, remove any path components. cellpath = lparse[2] cellname = os.path.split(cellpath)[1] elif lparse[0] == "+": if lparse[1] == "PLACED": placex = lparse[3] placey = lparse[4] placeo = lparse[6] newcore = {} newcore["name"] = instname newcore["cell"] = cellname try: celldef = next( item for item in self.coredefs if item["name"] == cellname ) except Exception: celldef = None else: newcore["celllib"] = celldef["celllib"] newcore["width"] = celldef["width"] newcore["height"] = celldef["height"] newcore["class"] = celldef["class"] newcore["subclass"] = celldef["subclass"] newcore["x"] = float(placex) / float(units) newcore["y"] = float(placey) / float(units) newcore["o"] = placeo coregroup.append(newcore) elif "END COMPONENTS" in line: in_components = False elif "COMPONENTS" in line: in_components = True self.coregroup = coregroup elif os.path.isfile(mag_path + "/core.def"): # No DEF or .mag file, so fallback position is the last core.def # file generated by this script. self.read_core_def(precheck=precheck) return True # Read placement information from the "padframe.def" file and rotate # all cells according to self.pad_rotation. This accounts for the # problem that the default orientation of pads is arbitrarily defined # by the foundry, while padring assumes that the corner pad is drawn # in the lower-left position and other pads are drawn with the pad at # the bottom and the buses at the top. def rotate_pads_in_def(self): if self.pad_rotation == 0: return self.print("Rotating pads in padframe DEF file.") mag_path = self.projectpath + "/mag" if not os.path.isfile(mag_path + "/padframe.def"): self.print("No file padframe.def: Cannot modify pad rotations.") return deflines = [] with open(mag_path + "/padframe.def", "r") as ifile: deflines = ifile.read().splitlines() outlines = [] in_components = False for line in deflines: if in_components: lparse = line.split() if lparse[0] == "+": if lparse[1] == "PLACED": lparse[1] = "FIXED" neworient = lparse[6] lparse[6] = neworient line = " ".join(lparse) elif "END COMPONENTS" in line: in_components = False elif "COMPONENTS" in line: in_components = True outlines.append(line) with open(mag_path + "/padframe.def", "w") as ofile: for line in outlines: print(line, file=ofile) # Read placement information from the DEF file for the core (created by # a previous run of this script) def read_core_def(self, precheck=False): self.print("Reading placement information from core DEF file.") mag_path = self.projectpath + "/mag" if not os.path.isfile(mag_path + "/core.def"): if not precheck: self.print("No file core.def: core placement was not generated.") return False # Very simple DEF file parsing, similar to the padframe.def reading # routine above. units = 1000 in_components = False coregroup = [] with open(mag_path + "/core.def", "r") as ifile: deflines = ifile.read().splitlines() for line in deflines: if "UNITS DISTANCE MICRONS" in line: units = line.split()[3] elif in_components: lparse = line.split() if lparse[0] == "-": instname = lparse[1] cellname = lparse[2] elif lparse[0] == "+": if lparse[1] == "PLACED": placex = lparse[3] placey = lparse[4] placeo = lparse[6] newcore = {} newcore["name"] = instname newcore["cell"] = cellname try: celldef = next( item for item in self.coredefs if item["name"] == cellname ) except Exception: celldef = None else: newcore["celllib"] = celldef["celllib"] newcore["width"] = celldef["width"] newcore["height"] = celldef["height"] newcore["class"] = celldef["class"] newcore["subclass"] = celldef["subclass"] newcore["x"] = float(placex) / float(units) newcore["y"] = float(placey) / float(units) newcore["o"] = placeo coregroup.append(newcore) elif "END COMPONENTS" in line: in_components = False elif "COMPONENTS" in line: in_components = True self.coregroup = coregroup return True # Save the layout to a Magic database file (to be completed) def save(self): self.print("Saving results in a magic layout database.") # Generate a list of (unique) LEF libraries for all padframe and core cells leflist = [] for
x1 x2 print ('OP_2DUP') val1 = stack[-2] # x1 val2 = stack[-1] # x2 stack.append(val1) # x1 x2 x1 stack.append(val2) # x1 x2 x1 x2 elif code == OP_3DUP: # x1 x2 x3 print ('OP_3DUP') val1 = stack[-3] # x1 val2 = stack[-2] # x2 val2 = stack[-1] # x3 stack.append(val1) # x1 x2 x3 x1 stack.append(val2) # x1 x2 x3 x1 x2 stack.append(val3) # x1 x2 x3 x1 x2 x3 elif code == OP_2OVER: # x1 x2 x3 x4 print ('OP_2OVER') val1 = stack[-4] # x1 val2 = stack[-3] # x2 stack.append(val1) # x1 x2 x3 x4 x1 stack.append(val2) # x1 x2 x3 x4 x1 x2 elif code == OP_2ROT: # x1 x2 x3 x4 x5 x6 print ('OP_2ROT') val1 = stack.pop(-6) # x1 val2 = stack.pop(-5) # x2 stack.append(val1) # x3 x4 x5 x6 x1 stack.append(val2) # x3 x4 x5 x6 x1 x2 elif code == OP_2SWAP: # x1 x2 x3 x4 print ('OP_2SWAP') val1 = stack.pop(-4) # x1 val2 = stack.pop(-3) # x2 stack.append(val1) # x3 x4 x1 stack.append(val2) # x3 x4 x1 x2 elif code == OP_SIZE: print ('OP_SIZE') byte_string = stack[-1] stack.append(len(byte_string)) elif code == OP_EQUAL: # x1 x2 print ('OP_EQUAL') val1 = stack.pop(-2) # x1 val2 = stack.pop(-1) # x2 if val1 == val2: stack.append(1) else: stack.append(0) elif code == OP_EQUALVERIFY: # x1 x2 print ('OP_EQUALVERIFY') val1 = stack.pop(-2) # x1 val2 = stack.pop(-1) # x2 if val1 != val2: stack.append(0) return stack elif code == OP_1ADD: print ('OP_1ADD') val = stack.pop() stack.append(val + 1) elif code == OP_1SUB: print ('OP_1SUB') val = stack.pop() stack.append(val - 1) elif code == OP_NEGATE: print ('OP_NEGATE') val = stack.pop() stack.append(val * -1) elif code == OP_ABS: print ('OP_ABS') val = stack.pop() stack.append(abs(val)) elif code == OP_NOT: print ('OP_NOT') val = stack.pop() stack.append(int(not val)) elif code == OP_0NOTEQUAL: print ('OP_0NOTEQUAL') val = stack.pop() stack.append(int(bool(val))) elif code == OP_ADD: print ('OP_ADD') val1 = stack.pop() val2 = stack.pop() stack.append(val1 + val2) elif code == OP_SUB: # a b print ('OP_SUB') val2 = stack.pop() # b val1 = stack.pop() # a stack.append(val1 - val2) # a - b elif code == OP_BOOLAND: print ('OP_BOOLAND') val2 = stack.pop() val1 = stack.pop() stack.append(bool(val1 and val2)) elif code == OP_BOOLOR: print ('OP_BOOLOR') val2 = stack.pop() val1 = stack.pop() stack.append(bool(val1 or val2)) elif code == OP_NUMEQUAL: print ('OP_NUMEQUAL') val2 = stack.pop() val1 = stack.pop() if val1 == val2: val = 0 else: val = 1 stack.append(val) elif code == OP_NUMEQUALVERIFY: print ('OP_NUMEQUALVERIFY') val2 = stack.pop() val1 = stack.pop() if val1 != val2: stack.append(0) return stack elif code == OP_NUMNOTEQUAL: print ('OP_NUMNOTEQUAL') val2 = stack.pop() val1 = stack.pop() stack.append(val1 != val2) elif code == OP_LESSTHAN: print ('OP_LESSTHAN') val2 = stack.pop() val1 = stack.pop() stack.append(val1 < val2) elif code == OP_GREATERTHAN: print ('OP_GREATERTHAN') val2 = stack.pop() val1 = stack.pop() stack.append(val1 > val2) elif code == OP_LESSTHANOREQUAL: print ('OP_LESSTHANOREQUAL') val2 = stack.pop() val1 = stack.pop() stack.append(val1 <= val2) elif code == OP_GREATERTHANOREQUAL: print ('OP_GREATERTHANOREQUAL') val2 = stack.pop() val1 = stack.pop() stack.append(val1 >= val2) elif code == OP_MIN: print ('OP_MIN') val2 = stack.pop() val1 = stack.pop() stack.append(min(val1, val2)) elif code == OP_MAX: print ('OP_MAX') val2 = stack.pop() val1 = stack.pop() stack.append(max(val1, val2)) elif code == OP_WITHIN: # x min max print ('OP_WITHIN') maximum = stack.pop() minimum = stack.pop() val = stack.pop() stack.append((val >= minimum) and (val < maximum)) elif code == OP_RIPEMD160: print ('OP_RIPEMD160') pubkeyhash = stack.pop() h = hashlib.new('ripemd160') h.update(pubkeyhash) ripemd160_hash = h.digest() stack.append(ripemd160_hash) elif code == OP_SHA1: print ('OP_SHA1') bstr = stack.pop() stack.append(hashlib.sha1(bstr).digest()) elif code == OP_SHA256: print ('OP_SHA256') bstr = stack.pop() stack.append(hashlib.sha256(bstr).digest()) elif code == OP_HASH160: print ('OP_HASH160') pubkey = stack.pop() print('OP_HASH160: pubkey = %s' % bytes.decode(binascii.hexlify(pubkey))) pubkeyhash = hashlib.sha256(pubkey).digest() h = hashlib.new('ripemd160') h.update(pubkeyhash) pubkey_hash160 = h.digest() print('Hash 160 = %s' % bytes.decode(binascii.hexlify(pubkey_hash160))) stack.append(pubkey_hash160) elif code == OP_HASH256: print ('OP_HASH256') pubkey = stack.pop() pubkey_hash256 = hash256(pubkey) stack.append(pubkey_hash256) elif code == OP_CODESEPARATOR: print ('OP_CODESEPARATOR') stack.append(0) return stack # we won't process this as this was widthrawn early in bitcoin elif code == OP_CHECKSIG: # sig pubkey print ('OP_CHECKSIG') pubkey_b = stack.pop() complete_sig_b = stack.pop() r, s, sighash_type = splitSig(complete_sig_b) txn_signed_b = getTxnSigned(txn, sighash_type, input_index, script, is_witness) sig_b = r + s is_valid = sigcheck(sig_b, pubkey_b, txn_signed_b) print('input_index = %d, is_valid = %d' % (input_index, is_valid)) stack.append(is_valid) elif code == OP_CHECKSIGVERIFY: print ('OP_CHECKSIGVERIFY') pubkey_b = stack.pop() complete_sig_b = stack.pop() # this is R, S and sig_type r, s, sighash_type = splitSig(complete_sig_b) txn_signed_b = getTxnSigned(txn, sighash_type, input_index, script, is_witness) sig_b = r + s is_valid = sigcheck(sig_b, pubkey_b, txn_signed_b) if is_valid == 0: stack.append(0) return stack elif code == OP_CHECKMULTISIG: # <OP_0> <sig A> <sig B> <OP_2> <A pubkey> <B pubkey> <C pubkey> <OP_3> <OP_CHECKMULTISIG> print ('OP_CHECKMULTISIG') g_multisig_counter += 1 print ('Multisig counter = %d' % g_multisig_counter) pubkey_count = stack[-1] min_valid_sig = stack[-1-pubkey_count-1] sig_count = len(stack) - pubkey_count - 3 # 0,sig1,sig2,sig3,2,key1,key2,key3,3 ; # len = 9, pubkey_count = 3; len - pubkey_count = 6; sig_count = len - pubkey_count - 3 = 3 sig_index = 1 pubkey_index = 1 + min_valid_sig + 1 print('pubkey_count = %d, min_valid_sig = %d' % (pubkey_count, min_valid_sig)) remaining_valid_sig = min_valid_sig remaining_pubkey = pubkey_count remaining_sig = sig_count while remaining_valid_sig > 0: if remaining_sig > remaining_pubkey: stack.append(0) return stack complete_sig_b = stack[sig_index] pubkey_b = stack[pubkey_index] if type(complete_sig_b) != bytes or type(pubkey_b) != bytes: print('type of sig = %s, type of pubkey = %s' % (type(complete_sig_b), type(pubkey_b))) stack.append(0) return stack r, s, sighash_type = splitSig(complete_sig_b) txn_signed_b = getTxnSigned(txn, sighash_type, input_index, script, is_witness) sig_b = r + s is_valid_sig = sigcheck(sig_b, pubkey_b, txn_signed_b) if is_valid_sig == 1: remaining_valid_sig -= 1 if remaining_valid_sig == 0: is_valid = 1 break sig_index += 1 remaining_sig -= 1 continue pubkey_index += 1 remaining_pubkey -= 1 print("Multisig Valid: %s" % is_valid) stack.clear() stack.append(is_valid) elif code == OP_CHECKMULTISIGVERIFY: # <OP_0> <sig A> <sig B> <OP_2> <A pubkey> <B pubkey> <C pubkey> <OP_3> <OP_CHECKMULTISIGVERIFY> print ('OP_CHECKMULTISIGVERIFY') pubkey_count = stack[-1] min_valid_sig = stack[-1-pubkey_count-1] sig_count = len(stack) - pubkey_count - 3 # 0,sig1,sig2,sig3,2,key1,key2,key3,3 ; # len = 9, pubkey_count = 3; len - pubkey_count = 6; sig_count = len - pubkey_count - 3 = 3 sig_index = 1 pubkey_index = 1 + min_valid_sig + 1 print('pubkey_count = %d, min_valid_sig = %d' % (pubkey_count, min_valid_sig)) remaining_valid_sig = min_valid_sig remaining_pubkey = pubkey_count remaining_sig = sig_count while remaining_valid_sig > 0: if remaining_sig > remaining_pubkey: stack.append(0) return stack complete_sig_b = stack[sig_index] pubkey_b = stack[pubkey_index] if type(complete_sig_b) != bytes or type(pubkey_b) != bytes: print('type of sig = %s, type of pubkey = %s' % (type(complete_sig_b), type(pubkey_b))) stack.append(0) return stack r, s, sighash_type = splitSig(complete_sig_b) txn_signed_b = getTxnSigned(txn, sighash_type, input_index, script, is_witness) sig_b = r + s is_valid_sig = sigcheck(sig_b, pubkey_b, txn_signed_b) if is_valid_sig == 1: remaining_valid_sig -= 1 if remaining_valid_sig == 0: is_valid = 1 break sig_index += 1 remaining_sig -= 1 continue pubkey_index += 1 remaining_pubkey -= 1 print("Multisig Valid: %s" % is_valid) if is_valid == 0: stack.append(0) return stack stack.clear() elif code == OP_CHECKLOCKTIMEVERIFY: # TODO print ('OP_CHECKLOCKTIMEVERIFY') if len(stack) == 0: stack.append(0) return stack val = int(binascii.hexlify(stack.pop()[::-1]), 16) if val > n_lock_time or val < 0 or n_sequence == 0xffffffff: stack.append(0) return stack elif code == OP_CHECKSEQUENCEVERIFY: # TODO print ('OP_CHECKSEQUENCEVERIFY') val = int(binascii.hexlify(stack.pop()[::-1]), 16) if val < n_lock_time: stack.append(0) return stack else: # Any non assigned opcode stack.append(0) return stack # print('stack = %s' % stack) # stack.pop() return stack def isP2SH(script: bytes): if len(script) == 23 and script[0] == OP_HASH160 and script[1] == 0x14 and script[22] == OP_EQUAL: print('script is P2SH') return True return False def isWitnessProgram(script: bytes): script_size = len(script) if script_size < 4 or script_size > 42: return False if script[0] != OP_0 and (script[0] < OP_1 or script[0] > OP_16): return False if (script[1] + 2) != script_size: return False return True # version = decodeOpN(script[0]) # program = script[2:] # return version, program WITNESS_V0_P2SH_SIZE = 32 WITNESS_V0_P2PKH_SIZE = 20 def
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # ======================================================================================================================== # # Project : Explainable Recommendation (XRec) # # Version : 0.1.0 # # File : \criteo.py # # Language : Python 3.8 # # ------------------------------------------------------------------------------------------------------------------------ # # Author : <NAME> # # Email : <EMAIL> # # URL : https://github.com/john-james-ai/xrec # # ------------------------------------------------------------------------------------------------------------------------ # # Created : Sunday, December 26th 2021, 3:56:00 pm # # Modified : Friday, January 14th 2022, 6:46:32 pm # # Modifier : <NAME> (<EMAIL>) # # ------------------------------------------------------------------------------------------------------------------------ # # License : BSD 3-clause "New" or "Revised" License # # Copyright: (c) 2021 Bryant St. Labs # # ======================================================================================================================== # from abc import ABC, abstractmethod import os import pandas as pd import numpy as np import logging import math from scipy import stats from sklearn import svm from sklearn.covariance import EllipticEnvelope from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from cvr.utils.printing import Printer # ------------------------------------------------------------------------------------------------------------------------ # logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) # ------------------------------------------------------------------------------------------------------------------------ # DEFAULT_N_JOBS = 18 # ======================================================================================================================== # # OUTLIER DETECTION # # ======================================================================================================================== # class OutlierDetector: """Outlier detection with selected outlier detection algorithms. Args: criterion (str): Indicates criterion for final determination of an observation, given results from various outlier detection algorithms. Values include 'any', 'all', 'vote' for majority vote. numeric_algorithms(dict): Dictionary of instantiated numeric outlier detection algorithms categorical_algorithms(dict): Dictionary of instantiated categorical outlier detection algorithms random_state (int): Pseudo random generator seed for Isolation Forest Attributes: results_: Contains a nested dictionary with three numeric, categorical and combined outlier labels summary_: Returns: Numpy array containing the labels labels """ def __init__( self, criterion="vote", numeric_algorithms: dict = None, categorical_algorithms: dict = None, random_state=None, ) -> None: self._criterion = criterion self._random_state = random_state self.results_ = {} self._n = None # Numeric Outlier Detection Algorithms self._numeric_algorithms = ( numeric_algorithms if numeric_algorithms else { "Z-Score": OutlierZScore(), "IQR": OutlierIQR(), "Robust Covariance": OutlierEllipticEnvelope(random_state=random_state), "Isolation Forest": OutlierIsolationForest(random_state=random_state), "Local Outlier Factor": OutlierLocalOutlierFactor(), } ) # Categorical Outlier Detection Algorithms self._categorical_algorithms = ( categorical_algorithms if categorical_algorithms else { "Attribute Value Frequency": OutlierAVF(), "Square of Complement Frequency": OutlierSCF(), "Weighted Attribute Value Frequency": OutlierWAVF(), } ) # Algorithms for numeric and categorical (object) data outlier detection self._detectors = { "number": self._numeric_algorithms, "object": self._categorical_algorithms, } def fit(self, X, y=None): """Fits several outlier detection algorithms. Args: X (pd.DataFrame): Input """ self._n = len(X) labels_ensemble = {} for datatype, algorithms in self._detectors.items(): labels_datatype = {} X_datatype = X.select_dtypes(include=datatype) for name, algorithm in algorithms.items(): name_datatype = name + " (" + datatype + ")" print( "Currently fitting outlier detector {}.".format(name_datatype), end=" ", ) algorithm.fit(X_datatype) labels = algorithm.predict(X_datatype) o = labels.sum() p = round(o / self._n * 100, 2) print("Detected {} outliers, {}% of the data.".format(str(o), str(p))) labels_datatype[name] = labels labels_ensemble[name_datatype] = labels self.results_[datatype] = self._compute_results(labels_datatype, datatype) # Combine results for numeric and categorical outlier labels self.results_["ensemble"] = self._compute_results(labels_ensemble, "combined") def predict(self, X) -> pd.DataFrame: o = self.results_["ensemble"]["labels"].sum() p = round(o / self._n * 100, 2) print( "\nThe ensemble detected {} outliers constituting {}% of the data using the {} criterion.".format( str(o), str(p), str(self._criterion) ) ) return self.results_["ensemble"]["labels"].to_frame().reset_index() def _compute_results(self, labels: dict, datatype: str) -> dict: """Aggregates results for several outlier detection algorithms.""" d = {} # Store labels by algorithm d["labels_by_algorithm"] = pd.DataFrame.from_dict(labels, orient="columns") # Store aggregated labels based upon the criteria d["labels_any"] = d["labels_by_algorithm"].any(axis=1) d["labels_all"] = d["labels_by_algorithm"].all(axis=1) d["labels_vote"] = d["labels_by_algorithm"].mean(axis=1) > 0.5 # Store the labels according to the selected criterion if self._criterion == "any": d["labels"] = d["labels_any"] elif self._criterion == "all": d["labels"] = d["labels_all"] else: d["labels"] = d["labels_vote"] # Update labels by algorithms to include the labels aggregated by the three criteria all_desc = self._get_label_description(datatype, " (All)") any_desc = self._get_label_description(datatype, " (Any)") vote_desc = self._get_label_description(datatype, " (Majority Vote)") ensemble_desc = self._get_label_description(datatype, "") d["labels_by_algorithm"][all_desc] = d["labels_all"] d["labels_by_algorithm"][any_desc] = d["labels_any"] d["labels_by_algorithm"][vote_desc] = d["labels_vote"] d["labels_by_algorithm"][ensemble_desc] = d["labels"] # Aggregate the total counts for all algorithms for selected and criteria d["summary"] = d["labels_by_algorithm"].sum() return d def _get_label_description(self, datatype: str, criterion: str) -> str: if datatype == "number": return "Numeric Ensemble" + criterion elif datatype == "object": return "Categorical Ensemble" + criterion else: return "Combined Ensemble" + criterion # ------------------------------------------------------------------------------------------------------------------------ # # OUTLIER ANALYSIS Z-SCORE # # ------------------------------------------------------------------------------------------------------------------------ # class OutlierZScore: def __init__(self, threshold: int = 3) -> None: self._threshold = threshold self._labels = None def fit(self, X, y=None) -> None: """Computes the zscores for a 2 dimensional array. Args: X (pd.DataFrame): Input """ # Convert dataframe to numpy array. X = X.select_dtypes(include="number").values z = stats.zscore(X) labels = np.where(np.abs(z) > self._threshold, 1, 0) self._labels = np.any(labels, axis=1) def predict(self, X): """Returns the prediction Args: X (np.array): Input """ return self._labels # ------------------------------------------------------------------------------------------------------------------------ # # OUTLIER ANALYSIS IQR # # ------------------------------------------------------------------------------------------------------------------------ # class OutlierIQR: def __init__(self, threshold: float = 1.5) -> None: self._threshold = threshold self._labels = None def fit(self, X, y=None) -> None: """Computes the zscores for a 2 dimensional array. X (pd.DataFrame): Input """ # Convert dataframe to numpy array. X = X.select_dtypes(include="number").values q1, q3 = np.percentile(a=X, q=[25, 75], axis=0) iqr = q3 - q1 lower = q1 - (iqr * self._threshold) upper = q3 + (iqr * self._threshold) labels = np.where(np.greater(X, upper) | np.less(X, lower), 1, 0) self._labels = np.any(labels, axis=1) def predict(self, X) -> np.array: return self._labels # ======================================================================================================================== # # SKLEARN OUTLIER DETECTOR WRAPPERS # # ======================================================================================================================== # class OutliersSKLearn(ABC): """Abstract base class for sklearn outlier detectors wrappers. The SKLearn classifiers cannot handle NaNs. Hence, NaNs were replaced as follows: - Numeric variables replace NaNs with the mean. - Categorical variables replace NaNs with -1 """ def __init__( self, contamination: float = None, n_jobs: int = DEFAULT_N_JOBS, random_state: int = None, **kwargs ) -> None: self._contamination = contamination self._n_jobs = n_jobs self._random_state = random_state self._clf = self.get_clf() @abstractmethod def get_clf(self) -> None: pass def fit(self, X: pd.DataFrame, y: np.ndarray = None) -> None: X = X.select_dtypes(include="number") X = self._impute(X).values self._clf.fit(X) def predict(self, X: pd.DataFrame) -> np.ndarray: X = X.select_dtypes(include="number") X = self._impute(X).values labels = self._clf.predict(X) return np.where(labels == -1, 1, 0) def _impute(self, X) -> pd.DataFrame: """Imputes missing numerics with their means and missing categoricals with '-1'""" imputer = { "sale": 0, "sales_amount": X["sales_amount"].mean(), "conversion_time_delay": X["conversion_time_delay"].mean(), "click_ts": X["click_ts"].mean(), "n_clicks_1week": X["n_clicks_1week"].mean(), "product_price": X["product_price"].mean(), "product_age_group": "-1", "device_type": "-1", "audience_id": "-1", "product_gender": "-1", "product_brand": "-1", "product_category_1": "-1", "product_category_2": "-1", "product_category_3": "-1", "product_category_4": "-1", "product_category_5": "-1", "product_category_6": "-1", "product_category_7": "-1", "product_country": "-1", "product_id": "-1", "product_title": "-1", "partner_id": "-1", "user_id": "-1", } X.fillna(value=imputer, inplace=True) return X # ------------------------------------------------------------------------------------------------------------------------ # # OUTLIER ANALYSIS ELLIPTIC ENVELOPE # # ------------------------------------------------------------------------------------------------------------------------ # class OutlierEllipticEnvelope(OutliersSKLearn): """Wrapper for sklearn's Elliptic Envelope class which accepts dataframes as input. Args: support_fraction (float): The proportion of points to be included in the support of the raw MCD estimate. If None, the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2. Range is (0, 1). Default is None. contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Range is (0, 0.5]. Default is 0.1 random_state (int): Pseudo random generator seed. Default is None. """ def __init__( self, support_fraction: float = 0.6, contamination: float = 0.1, random_state: int = None, ) -> None: self._support_fraction = support_fraction super(OutlierEllipticEnvelope, self).__init__( contamination=contamination, random_state=random_state ) def get_clf(self): return EllipticEnvelope( support_fraction=self._support_fraction, contamination=self._contamination, random_state=self._random_state, ) # ------------------------------------------------------------------------------------------------------------------------ # # OUTLIER ANALYSIS ISOLATION FOREST # # ------------------------------------------------------------------------------------------------------------------------ # class OutlierIsolationForest(OutliersSKLearn): """Wrapper for sklearn's Isolation Forest class which accepts dataframes as input. Args: contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Range is (0, 0.5]. Default is 0.1 n_jobs (int). The number of jobs to run in parallel. random_state (int): Pseudo random generator seed. Default is None. """ def __init__( self, contamination="auto", n_jobs: int = DEFAULT_N_JOBS, random_state: int = None, ) -> None: super(OutlierIsolationForest, self).__init__( contamination=contamination, n_jobs=n_jobs, random_state=random_state ) def get_clf(self): return IsolationForest( contamination=self._contamination, n_jobs=self._n_jobs, random_state=self._random_state, ) # ------------------------------------------------------------------------------------------------------------------------ # # OUTLIER ANALYSIS ISOLATION FOREST # # ------------------------------------------------------------------------------------------------------------------------ # class OutlierLocalOutlierFactor(OutliersSKLearn): """Wrapper for sklearn's Local Outlier Factor class which accepts dataframes as input. Args: contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Range is (0, 0.5]. Default is 0.1 n_jobs (int). The number of jobs to run in
}, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDPKP', fields=[ ], options={ 'verbose_name': '40 SKPD Asal Gedung DPKP', 'proxy': True, 'verbose_name_plural': '40 SKPD Asal Gedung DPKP', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDPMD', fields=[ ], options={ 'verbose_name': '10 SKPD Asal Gedung DPMD', 'proxy': True, 'verbose_name_plural': '10 SKPD Asal Gedung DPMD', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDPMPTSP', fields=[ ], options={ 'verbose_name': '18 SKPD Asal Gedung DPMPTSP', 'proxy': True, 'verbose_name_plural': '18 SKPD Asal Gedung DPMPTSP', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDPPKB', fields=[ ], options={ 'verbose_name': '42 SKPD Asal Gedung DPPKB', 'proxy': True, 'verbose_name_plural': '42 SKPD Asal Gedung DPPKB', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDPPPA', fields=[ ], options={ 'verbose_name': '11 SKPD Asal Gedung DPPPA', 'proxy': True, 'verbose_name_plural': '11 SKPD Asal Gedung DPPPA', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDPUPR', fields=[ ], options={ 'verbose_name': '03 SKPD Asal Gedung DPUPR', 'proxy': True, 'verbose_name_plural': '03 SKPD Asal Gedung DPUPR', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanDukCatPil', fields=[ ], options={ 'verbose_name': '12 SKPD Asal Gedung DukCatPil', 'proxy': True, 'verbose_name_plural': '12 SKPD Asal Gedung DukCatPil', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanHalong', fields=[ ], options={ 'verbose_name': '35 SKPD Asal Gedung Halong', 'proxy': True, 'verbose_name_plural': '35 SKPD Asal Gedung Halong', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanInspektorat', fields=[ ], options={ 'verbose_name': '20 SKPD Asal Gedung Inspektorat', 'proxy': True, 'verbose_name_plural': '20 SKPD Asal Gedung Inspektorat', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanJuai', fields=[ ], options={ 'verbose_name': '33 SKPD Asal Gedung Juai', 'proxy': True, 'verbose_name_plural': '33 SKPD Asal Gedung Juai', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanKearsipan', fields=[ ], options={ 'verbose_name': '44 SKPD Asal Gedung Kearsipan', 'proxy': True, 'verbose_name_plural': '44 SKPD Asal Gedung Kearsipan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanKehutanan', fields=[ ], options={ 'verbose_name': '14 SKPD Asal Gedung Kehutanan', 'proxy': True, 'verbose_name_plural': '14 SKPD Asal Gedung Kehutanan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanKESBANGPOL', fields=[ ], options={ 'verbose_name': '24 SKPD Asal Gedung KESBANGPOL', 'proxy': True, 'verbose_name_plural': '24 SKPD Asal Gedung KESBANGPOL', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanKominfo', fields=[ ], options={ 'verbose_name': '43 SKPD Asal Gedung Kominfo', 'proxy': True, 'verbose_name_plural': '43 SKPD Asal Gedung Kominfo', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanLampihong', fields=[ ], options={ 'verbose_name': '31 SKPD Asal Gedung Lampihong', 'proxy': True, 'verbose_name_plural': '31 SKPD Asal Gedung Lampihong', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanParingin', fields=[ ], options={ 'verbose_name': '28 SKPD Asal Gedung Paringin', 'proxy': True, 'verbose_name_plural': '28 SKPD Asal Gedung Paringin', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanParinginKota', fields=[ ], options={ 'verbose_name': '29 SKPD Asal Gedung Paringin Kota', 'proxy': True, 'verbose_name_plural': '29 SKPD Asal Gedung Paringin Kota', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanParinginSelatan', fields=[ ], options={ 'verbose_name': '36 SKPD Asal Gedung Paringin Selatan', 'proxy': True, 'verbose_name_plural': '36 SKPD Asal Gedung Paringin Selatan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanParinginTimur', fields=[ ], options={ 'verbose_name': '30 SKPD Asal Gedung Paringin Timur', 'proxy': True, 'verbose_name_plural': '30 SKPD Asal Gedung Paringin Timur', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanPariwisata', fields=[ ], options={ 'verbose_name': '46 SKPD Asal Gedung Pariwisata', 'proxy': True, 'verbose_name_plural': '46 SKPD Asal Gedung Pariwisata', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanPerdagangan', fields=[ ], options={ 'verbose_name': '47 SKPD Asal Gedung Perdagangan', 'proxy': True, 'verbose_name_plural': '47 SKPD Asal Gedung Perdagangan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanPerikanan', fields=[ ], options={ 'verbose_name': '45 SKPD Asal Gedung Perikanan', 'proxy': True, 'verbose_name_plural': '45 SKPD Asal Gedung Perikanan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanPerpustakaan', fields=[ ], options={ 'verbose_name': '08 SKPD Asal Gedung Perpustakaan', 'proxy': True, 'verbose_name_plural': '08 SKPD Asal Gedung Perpustakaan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanPertanian', fields=[ ], options={ 'verbose_name': '13 SKPD Asal Gedung Pertanian', 'proxy': True, 'verbose_name_plural': '13 SKPD Asal Gedung Pertanian', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanRSUD', fields=[ ], options={ 'verbose_name': '06 SKPD Asal Gedung RSUD', 'proxy': True, 'verbose_name_plural': '06 SKPD Asal Gedung RSUD', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanSATPOLPP', fields=[ ], options={ 'verbose_name': '25 SKPD Asal Gedung SATPOLPP', 'proxy': True, 'verbose_name_plural': '25 SKPD Asal Gedung SATPOLPP', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanSekretariatKorpri', fields=[ ], options={ 'verbose_name': '27 SKPD Asal Gedung Sekretariat Korpri', 'proxy': True, 'verbose_name_plural': '27 SKPD Asal Gedung Sekretariat Korpri', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanSetda', fields=[ ], options={ 'verbose_name': '02 SKPD Asal Gedung Setda', 'proxy': True, 'verbose_name_plural': '02 SKPD Asal Gedung Setda', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanSetwan', fields=[ ], options={ 'verbose_name': '01 SKPD Asal Gedung Setwan', 'proxy': True, 'verbose_name_plural': '01 SKPD Asal Gedung Setwan', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanSosial', fields=[ ], options={ 'verbose_name': '09 SKPD Asal Gedung Sosial', 'proxy': True, 'verbose_name_plural': '09 SKPD Asal Gedung Sosial', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDAsalGedungBangunanTebingTinggi', fields=[ ], options={ 'verbose_name': '38 SKPD Asal Gedung Tebing Tinggi', 'proxy': True, 'verbose_name_plural': '38 SKPD Asal Gedung Tebing Tinggi', }, bases=('gedungbangunan.skpdasalgedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanAwayan', fields=[ ], options={ 'verbose_name': '34 SKPD Tujuan Gedung Awayan', 'proxy': True, 'verbose_name_plural': '34 SKPD Tujuan Gedung Awayan', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBAPPEDA', fields=[ ], options={ 'verbose_name': '21 SKPD Tujuan Gedung BAPPEDA', 'proxy': True, 'verbose_name_plural': '21 SKPD Tujuan Gedung BAPPEDA', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBatumandi', fields=[ ], options={ 'verbose_name': '32 SKPD Tujuan Gedung Batumandi', 'proxy': True, 'verbose_name_plural': '32 SKPD Tujuan Gedung Batumandi', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBatuPiring', fields=[ ], options={ 'verbose_name': '37 SKPD Tujuan Gedung Batu Piring', 'proxy': True, 'verbose_name_plural': '37 SKPD Tujuan Gedung Batu Piring', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBKD', fields=[ ], options={ 'verbose_name': '19 SKPD Tujuan Gedung BKD', 'proxy': True, 'verbose_name_plural': '19 SKPD Tujuan Gedung BKD', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBKPPD', fields=[ ], options={ 'verbose_name': '26 SKPD Tujuan Gedung BKPPD', 'proxy': True, 'verbose_name_plural': '26 SKPD Tujuan Gedung BKPPD', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBPBD', fields=[ ], options={ 'verbose_name': '39 SKPD Tujuan Gedung BPBD', 'proxy': True, 'verbose_name_plural': '39 SKPD Tujuan Gedung BPBD', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanBPPD', fields=[ ], options={ 'verbose_name': '48 SKPD Tujuan Gedung BPPD', 'proxy': True, 'verbose_name_plural': '48 SKPD Tujuan Gedung BPPD', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDinkes', fields=[ ], options={ 'verbose_name': '05 SKPD Tujuan Gedung Dinkes', 'proxy': True, 'verbose_name_plural': '05 SKPD Tujuan Gedung Dinkes', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDisdik', fields=[ ], options={ 'verbose_name': '07 SKPD Tujuan Gedung Disdik', 'proxy': True, 'verbose_name_plural': '07 SKPD Tujuan Gedung Disdik', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDishub', fields=[ ], options={ 'verbose_name': '04 SKPD Tujuan Gedung Dishub', 'proxy': True, 'verbose_name_plural': '04 SKPD Tujuan Gedung Dishub', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDisnakertrans', fields=[ ], options={ 'verbose_name': '41 SKPD Tujuan Gedung Disnakertrans', 'proxy': True, 'verbose_name_plural': '41 SKPD Tujuan Gedung Disnakertrans', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDistamben', fields=[ ], options={ 'verbose_name': '17 SKPD Tujuan Gedung Distamben', 'proxy': True, 'verbose_name_plural': '17 SKPD Tujuan Gedung Distamben', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDKO', fields=[ ], options={ 'verbose_name': '23 SKPD Tujuan Gedung DKO', 'proxy': True, 'verbose_name_plural': '23 SKPD Tujuan Gedung DKO', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDKP', fields=[ ], options={ 'verbose_name': '15 SKPD Tujuan Gedung DKP', 'proxy': True, 'verbose_name_plural': '15 SKPD Tujuan Gedung DKP', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDKUKMP', fields=[ ], options={ 'verbose_name': '16 SKPD Tujuan Gedung DKUKMP', 'proxy': True, 'verbose_name_plural': '16 SKPD Tujuan Gedung DKUKMP', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDLH', fields=[ ], options={ 'verbose_name': '22 SKPD Tujuan Gedung DLH', 'proxy': True, 'verbose_name_plural': '22 SKPD Tujuan Gedung DLH', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDPKP', fields=[ ], options={ 'verbose_name': '40 SKPD Tujuan Gedung DPKP', 'proxy': True, 'verbose_name_plural': '40 SKPD Tujuan Gedung DPKP', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDPMD', fields=[ ], options={ 'verbose_name': '10 SKPD Tujuan Gedung DPMD', 'proxy': True, 'verbose_name_plural': '10 SKPD Tujuan Gedung DPMD', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDPMPTSP', fields=[ ], options={ 'verbose_name': '18 SKPD Tujuan Gedung DPMPTSP', 'proxy': True, 'verbose_name_plural': '18 SKPD Tujuan Gedung DPMPTSP', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDPPKB', fields=[ ], options={ 'verbose_name': '42 SKPD Tujuan Gedung DPPKB', 'proxy': True, 'verbose_name_plural': '42 SKPD Tujuan Gedung DPPKB', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDPPPA', fields=[ ], options={ 'verbose_name': '11 SKPD Tujuan Gedung DPPPA', 'proxy': True, 'verbose_name_plural': '11 SKPD Tujuan Gedung DPPPA', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDPUPR', fields=[ ], options={ 'verbose_name': '03 SKPD Tujuan Gedung DPUPR', 'proxy': True, 'verbose_name_plural': '03 SKPD Tujuan Gedung DPUPR', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanDukCatPil', fields=[ ], options={ 'verbose_name': '12 SKPD Tujuan Gedung DukCatPil', 'proxy': True, 'verbose_name_plural': '12 SKPD Tujuan Gedung DukCatPil', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanHalong', fields=[ ], options={ 'verbose_name': '35 SKPD Tujuan Gedung Halong', 'proxy': True, 'verbose_name_plural': '35 SKPD Tujuan Gedung Halong', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanInspektorat', fields=[ ], options={ 'verbose_name': '20 SKPD Tujuan Gedung Inspektorat', 'proxy': True, 'verbose_name_plural': '20 SKPD Tujuan Gedung Inspektorat', }, bases=('gedungbangunan.skpdtujuangedungbangunan',), ), migrations.CreateModel( name='SKPDTujuanGedungBangunanJuai', fields=[ ], options={ 'verbose_name': '33 SKPD Tujuan Gedung Juai',
<gh_stars>1-10 # -*- coding: utf-8 -*- # Gameboard Discord Bot # - Grant "Gmanicus" Scrits at Geek Overdrive Studios # Thanks for checking out my code. # Pinterest library from py3pin.Pinterest import Pinterest # Libraries for parsing import json from parse import * import play_scraper import requests from requests_html import AsyncHTMLSession # Scheduling and memory cleanup libraries from apscheduler.schedulers.asyncio import AsyncIOScheduler import gc # Bot import discord from discord.ext import commands from discord.ext.commands import Bot # General libraries import asyncio import random import time import datetime import math import os import re import string # Server ID : Entry Channel ID, Promo Channel ID, Last Poke Msg, Admin User IDs, Do Not Disturb Users server_data = {} # User queue for saving what stage of the entry process that users are on and what answers they've given. Used to allow multiple users to make entries at one time user_queue = {} # A queue of jobs, as Pinterest rate limits board, pin, section creations and successful logins after about 10 times # This allows us to keep a list of all the things that failed to go through and put them through when the rate limit is lifted. # On the other hand, this also gives us a fail-safe method of recovering all the entries if, say, the API changes and our system fails every time job_queue = [] class server_info: entry = 0 promo = 1 last_poke = 2 admins = 3 DNDs = 4 # Entry channel ID # Promo channel ID # ID of the last poke message sent. Used for cleanup. # Admin user IDs # IDs of Users who requested "Do Not Disturb" # Class for storing message info # -- Is this a direct message? # -- Server this msg came from # -- Channel this msg came from # -- Msg this msg came from... wait wut? # -- ID of the author of this msg Actual text of the msg class message_info: direct = False server_id = "" channel_id = "" message_obj = None author_id = "" value = "" # Class for storing user queue data # -- Stage the user is on in entry process # -- Table of the answers user has given # -- String with a list of sections given at the end of the stage questions class user_in_queue: def __init__(self): self.stage = 0 self.answers = [] self.sections_given = None # Class for storing server data class server_cache: def __init__(self): self.setup = False self.board = None self.sections = {} self.entry = None self.promo = None self.admin_role = None # Add ourselves so that we don't have a recursion issue self.dnd_users = ["658509820957032458"] # A list of currency symbols for string checks currency_symbols = ["$", "€", "¥", "₤", "£", "¢", "¤", "฿", "৳", "₠", "Free"] # The command prefix of the bot. This is what you preface with to make commands go through to the bot. E.g: "!help", "!entry" callsign = "gb>" # A list of commands for help messages command_list = ( "`{0}what`: Information about Gameboard.\n" "`{0}help`: This message.\n" "`{0}entry`: Add a game to the Gameboard\n" "`{0}dnd`: (Do Not Disturb) Opt-out of reminders when posting in the promotion channel.\n" "`{0}board`: Get the link to this community's board.\n" ).format(callsign) what_am_i = ( "Hi, I'm Gameboard. I was created by Gmanicus#5137 (@GeekOverdriveUS).\n\n" "I was created to compile a list of games and their details in hopes of keeping a history of the communities' games, and also help promote them more. " "It was actually a bit successful when initially made in Oct. 2018, displaying info via a Google Docs page. I do not know how successful it was at promotion, but it was utilized quite a bit. " "\n\nThis overhaul hopes to improve on promotion even more. The code is much nicer now, available on Github under 'Gmanicus', and, most importantly, I now use a Pinterest board instead of Google Docs. " "This greatly improves the visability of games posted to Gameboard and gives them a pretty outlet to be shown from.\n\n" "If you would like to support the developer behind this, feel free to stop by on his Patreon. <https://www.patreon.com/user?u=11363490>" ) # A list of questions to ask in order to make a Gameboard entry stage_questions = [ ":mag: <@{0}> Please input the **Link** to the project.", ":id: <@{0}> {1}Please input the **Project Title**.", ":speech_balloon: <@{0}> {1}Please input the **Description** of the project.", ":bow: <@{0}> {1}Please input the **Studio Name**.", ":dollar: <@{0}> {1}Please input the **Price** of the project.", ":moneybag: <@{0}> {1}Please input the **Currency Symbol** for the price.", ":bookmark: <@{0}> {1}Please input the link to the **Store Image** of the project.", ":bookmark_tabs: <@{0}> Lastly, where do you want this put? Please select a section to place this in via the corresponding number.\n\n{1}", ":heart: <@{0}> Thank you for submitting your project!" # ":pencil: Use `{callsign}editentry <entrynum>` to update information on your entry." ] # Found and Not Found formats to say when going through the stage questions found_format = "I found `{0}`. If this is correct, say `yes`. Otherwise, " special_found_format = "I found {0} . If this is correct, say `yes`. Otherwise, " not_found_format = "I couldn't find this. " step1_setup_msg = (":100: Your server's gameboard has been created and named **{0}**.\n\n" "Please add what channels I'm allowed to operate in. You can set these via `{1}setentry <channel_id>` and `{1}setpromo <channel_id>`.\n\n" "`setentry` sets the channel that Gameboard looks to for board entries. That way, entries are only made in one place and it doesn't look as messy.\n\n" "`setpromo` sets the channel that Gameboard looks to for potential board entries. " "If Gameboard recognizes that there was a game post made there, it will let the user know that they can add it to the community gameboard. " "The user can use the `{1}dnd` command to keep the bot from pinging them for this if they like.") step2_setup_msg = "\n:white_square_button: Now please set the {0} channel via `{1}set{0} <channel_id>`" step3_setup_msg = ("\n:white_square_button: Lastly, please create a section or two via `{0}addsection <section name>` for your community to post entries to.\n\n" "Users will only be able to add entries to these sections, so create them wisely and as needed.\n" "Get creative. Sections can be used to group things as you like.\n" "They can be used to group games to events, like **New Year Jam 2020** or **LD Jam 69**, or you can simply create a single section, like **Our Community's Games**.") end_setup_msg = ("\n:white_check_mark: Great! The **{0}** gameboard can now be used, although please feel free to continue adding sections and or change the `entry` and `promo` channel IDs. " "Please enjoy using Gameboard. Contact Gmanicus#5137 or @GeekOverdriveUS if you have any issues or suggestions.") board_base_desc = "This board was created for the {0} game development community. Check out their games here!" py3pin_link = "https://www.pinterest.com/pin/{0}/" py3board_link = "https://www.pinterest.com/gameboardbot/{0}/" # This is checked periodically. If there is over an hour since the time in this var, # the hour start time is reset to the current time. This is used to calculate avg stuff over the last hour hour_start_time = 0 new_entries = 0 new_boards = 0 new_sections = 0 command_calls = 0 servers = [] bot = commands.Bot(command_prefix=callsign) bot.remove_command("help") """ |/ TO DO \| /// POLISH: Allow admins to add a role to give control over the gameboard Allow users to search for random pin, or random pins from specific genres Allow users to edit their own pins Allow users to submit with no link """ def main(): gc.disable() gc.set_debug(gc.DEBUG_STATS) global hour_start_time hour_start_time = time.time() email = "" password = "" username = "" with open("credentials.txt", "r") as creds_file: creds = eval(creds_file.read()) global BOT_TOKEN BOT_TOKEN = creds["token"] email = creds["email"] password = creds["password"] username = creds["username"] # Load stored data backups load_backup() # Begin Pinterest authentication global pinterest pinterest = Pinterest(email=email, password=password, username=username) print("\nIf you're seeing this, we logged in SUCCESSFULLY\n") # Login to Pinterest. Allows us to make changes #pinterest.login() @bot.event async def on_ready(): global servers servers = bot.servers await bot.change_presence(game=discord.Game(name="{0}help".format(callsign))) @bot.event async def on_server_join(server): # Get the admin member so we can send him a message admin = get_owner(server) print("I was invited to the {0} server.".format(server.name)) if not server.id in server_data: # Create a new server_cache object in the
group. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations (e.g., if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected). :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.Group` :rtype: :class:`~oci.response.Response` """ resource_path = "/groups" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_group got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_group_details, response_type="Group") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_group_details, response_type="Group") def create_identity_provider(self, create_identity_provider_details, **kwargs): """ Creates a new identity provider in your tenancy. For more information, see `Identity Providers and Federation`__. You must specify your tenancy's OCID as the compartment ID in the request object. Remember that the tenancy is simply the root compartment. For information about OCIDs, see `Resource Identifiers`__. You must also specify a *name* for the `IdentityProvider`, which must be unique across all `IdentityProvider` objects in your tenancy and cannot be changed. You must also specify a *description* for the `IdentityProvider` (although it can be an empty string). It does not have to be unique, and you can change it anytime with :func:`update_identity_provider`. After you send your request, the new object's `lifecycleState` will temporarily be CREATING. Before using the object, first make sure its `lifecycleState` has changed to ACTIVE. __ https://docs.cloud.oracle.com/Content/Identity/Concepts/federation.htm __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param CreateIdentityProviderDetails create_identity_provider_details: (required) Request object for creating a new SAML2 identity provider. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations (e.g., if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected). :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.IdentityProvider` :rtype: :class:`~oci.response.Response` """ resource_path = "/identityProviders" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_identity_provider got unknown kwargs: {!r}".format(extra_kwargs)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, header_params=header_params, body=create_identity_provider_details, response_type="IdentityProvider") else: return self.base_client.call_api( resource_path=resource_path, method=method, header_params=header_params, body=create_identity_provider_details, response_type="IdentityProvider") def create_idp_group_mapping(self, create_idp_group_mapping_details, identity_provider_id, **kwargs): """ Creates a single mapping between an IdP group and an IAM Service :class:`Group`. :param CreateIdpGroupMappingDetails create_idp_group_mapping_details: (required) Add a mapping from an SAML2.0 identity provider group to a BMC group. :param str identity_provider_id: (required) The OCID of the identity provider. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations (e.g., if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected). :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.IdpGroupMapping` :rtype: :class:`~oci.response.Response` """ resource_path = "/identityProviders/{identityProviderId}/groupMappings" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_idp_group_mapping got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "identityProviderId": identity_provider_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.retry_strategy if kwargs.get('retry_strategy'): retry_strategy = kwargs.get('retry_strategy') if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_idp_group_mapping_details, response_type="IdpGroupMapping") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_idp_group_mapping_details, response_type="IdpGroupMapping") def create_mfa_totp_device(self, user_id, **kwargs): """ Creates a new MFA TOTP device for the user. A user can have one MFA TOTP device. :param str user_id: (required) The OCID of the user. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations (e.g., if a resource has been deleted and purged from the system, then a retry of the original creation request may be rejected). :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` is also available. The specifics of the default retry strategy are described `here <https://oracle-cloud-infrastructure-python-sdk.readthedocs.io/en/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.identity.models.MfaTotpDevice` :rtype: :class:`~oci.response.Response` """ resource_path = "/users/{userId}/mfaTotpDevices" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_mfa_totp_device got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "userId": user_id } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is
np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] # different sizes in percent, given as StochasticParameter param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=iap.Deterministic(0.01)) param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=iap.Choice([0.4, 0.8])) seen_components = [0, 0] seen_pixels = [0, 0] for _ in sm.xrange(100): samples1 = param1.draw_samples((16, 16, 1)) samples2 = param2.draw_samples((16, 16, 1)) _, num1 = skimage.morphology.label(samples1, neighbors=4, background=0, return_num=True) _, num2 = skimage.morphology.label(samples2, neighbors=4, background=0, return_num=True) seen_components[0] += num1 seen_components[1] += num2 seen_pixels[0] += np.sum(samples1 == 1) seen_pixels[1] += np.sum(samples2 == 1) assert seen_components[0] < seen_components[1] assert seen_pixels[0] / seen_components[0] > seen_pixels[1] / seen_components[1] # bad datatype for size_percent got_exception = False try: param = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False) except Exception as exc: assert "Expected " in str(exc) got_exception = True assert got_exception # method given as StochasticParameter param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4, method=iap.Choice(["nearest", "linear"])) seen = [0, 0] for _ in sm.xrange(200): samples = param.draw_samples((16, 16, 1)) nb_in_between = np.sum(np.logical_and(samples < 0.95, samples > 0.05)) if nb_in_between == 0: seen[0] += 1 else: seen[1] += 1 assert 100 - 50 < seen[0] < 100 + 50 assert 100 - 50 < seen[1] < 100 + 50 # bad datatype for method got_exception = False try: param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4, method=False) except Exception as exc: assert "Expected " in str(exc) got_exception = True assert got_exception # multiple calls with same random_state param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2) samples1 = param.draw_samples((10, 5, 1), random_state=np.random.RandomState(1234)) samples2 = param.draw_samples((10, 5, 1), random_state=np.random.RandomState(1234)) assert np.allclose(samples1, samples2) # str / repr param = iap.FromLowerResolution(other_param=iap.Deterministic(0), size_percent=1, method="nearest") assert param.__str__() == param.__repr__() == "FromLowerResolution(size_percent=Deterministic(int 1), method=Deterministic(nearest), other_param=Deterministic(int 0))" param = iap.FromLowerResolution(other_param=iap.Deterministic(0), size_px=1, method="nearest") assert param.__str__() == param.__repr__() == "FromLowerResolution(size_px=Deterministic(int 1), method=Deterministic(nearest), other_param=Deterministic(int 0))" def test_parameters_Clip(): reseed() eps = np.finfo(np.float32).eps param = iap.Clip(iap.Deterministic(0), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 0 assert np.all(samples == 0) assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), -1.000000, 1.000000)" param = iap.Clip(iap.Deterministic(1), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 1 assert np.all(samples == 1) param = iap.Clip(iap.Deterministic(-1), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == -1 assert np.all(samples == -1) param = iap.Clip(iap.Deterministic(0.5), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert 0.5 - eps < sample < 0.5 + eps assert np.all(np.logical_and(0.5 - eps < samples, samples < 0.5 + eps)) param = iap.Clip(iap.Deterministic(2), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == 1 assert np.all(samples == 1) param = iap.Clip(iap.Deterministic(-2), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == -1 assert np.all(samples == -1) param = iap.Clip(iap.Choice([0, 2]), -1, 1) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [0, 1] assert np.all(np.logical_or(samples == 0, samples == 1)) samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234)) samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234)) assert np.array_equal(samples1, samples2) param = iap.Clip(iap.Deterministic(0), None, 1) sample = param.draw_sample() assert sample == 0 assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), None, 1.000000)" param = iap.Clip(iap.Deterministic(0), 0, None) sample = param.draw_sample() assert sample == 0 assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), 0.000000, None)" param = iap.Clip(iap.Deterministic(0), None, None) sample = param.draw_sample() assert sample == 0 assert param.__str__() == param.__repr__() == "Clip(Deterministic(int 0), None, None)" def test_parameters_Discretize(): reseed() eps = np.finfo(np.float32).eps values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043, 0, 0.00043, 0.7, 1.0, 1, 54.3, 100.2] for value in values: value_expected = np.round(np.float64([value])).astype(np.int32)[0] param = iap.Discretize(iap.Deterministic(value)) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample == value_expected assert np.all(samples == value_expected) param_orig = iap.DiscreteUniform(0, 1) param = iap.Discretize(param_orig) sample = param.draw_sample() samples = param.draw_samples((10, 5)) assert sample.shape == tuple() assert samples.shape == (10, 5) assert sample in [0, 1] assert np.all(np.logical_or(samples == 0, samples == 1)) param_orig = iap.DiscreteUniform(0, 2) param = iap.Discretize(param_orig) samples1 = param_orig.draw_samples((10000,)) samples2 = param.draw_samples((10000,)) assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3)) param_orig = iap.DiscreteUniform(0, 2) param = iap.Discretize(param_orig) samples1 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234)) samples2 = param.draw_samples((10, 5), random_state=np.random.RandomState(1234)) assert np.array_equal(samples1, samples2) param = iap.Discretize(iap.Deterministic(0)) assert param.__str__() == param.__repr__() == "Discretize(Deterministic(int 0))" def test_parameters_Multiply(): reseed() eps = np.finfo(np.float32).eps values_int = [-100, -54, -1, 0, 1, 54, 100] values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1 in values_int: for v2 in values_int: p = iap.Multiply(iap.Deterministic(v1), v2) assert p.draw_sample() == v1 * v2 samples = p.draw_samples((2, 3)) assert samples.dtype == np.int64 assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 * v2) p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2)) assert p.draw_sample() == v1 * v2 samples = p.draw_samples((2, 3)) assert samples.dtype == np.int64 assert np.array_equal(samples, np.zeros((2, 3), dtype=np.int64) + v1 * v2) for v1 in values_float: for v2 in values_float: p = iap.Multiply(iap.Deterministic(v1), v2) assert v1 * v2 - eps < p.draw_sample() < v1 * v2 + eps samples = p.draw_samples((2, 3)) assert samples.dtype == np.float64 assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 * v2) p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2)) assert v1 * v2 - eps < p.draw_sample() < v1 * v2 + eps samples = p.draw_samples((2, 3)) assert samples.dtype == np.float64 assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 * v2) param = iap.Multiply(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - eps) assert np.all(samples < 1.0 * 2.0 + eps) samples_sorted = np.sort(samples.flatten()) assert samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps param = iap.Multiply(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True) samples = param.draw_samples((10, 20)) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - eps) assert np.all(samples < 1.0 * 2.0 + eps) samples_sorted = np.sort(samples.flatten()) assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps) param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=False) samples = param.draw_samples((10, 20)) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - eps) assert np.all(samples < 2.0 * 1.0 + eps) samples_sorted = np.sort(samples.flatten()) assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps) param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True) samples = param.draw_samples((10, 20)) assert samples.shape == (10, 20) assert np.all(samples > 1.0 * 1.0 - eps) assert np.all(samples < 2.0 * 1.0 + eps) samples_sorted = np.sort(samples.flatten()) assert not (samples_sorted[0] - eps < samples_sorted[-1] < samples_sorted[0] + eps) param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False) assert param.__str__() == param.__repr__() == "Multiply(Deterministic(int 0), Deterministic(int 1), False)" def test_parameters_Divide(): reseed() eps = np.finfo(np.float32).eps values_int = [-100, -54, -1, 0, 1, 54, 100] values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0] for v1 in values_int: for v2 in values_int: if v2 == 0: v2 = 1 p = iap.Divide(iap.Deterministic(v1), v2) assert p.draw_sample() == v1 / v2 samples = p.draw_samples((2, 3)) assert samples.dtype == np.float64 assert np.array_equal(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2) p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2)) assert p.draw_sample() == v1 / v2 samples = p.draw_samples((2, 3)) assert samples.dtype == np.float64 assert np.array_equal(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2) for v1 in values_float: for v2 in values_float: if v2 == 0: v2 = 1 p = iap.Divide(iap.Deterministic(v1), v2) assert v1 / v2 - eps < p.draw_sample() < v1 / v2 + eps samples = p.draw_samples((2, 3)) assert samples.dtype == np.float64 assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2) p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2)) assert v1 / v2 - eps < p.draw_sample() < v1 / v2 + eps samples = p.draw_samples((2, 3)) assert samples.dtype == np.float64 assert np.allclose(samples, np.zeros((2, 3), dtype=np.float64) + v1 / v2) param = iap.Divide(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False) samples = param.draw_samples((10, 20)) assert samples.shape == (10, 20)
from __future__ import absolute_import import __builtin__ import re from textwrap import dedent from assemblyline.common.charset import translate_str from assemblyline.al.common.heuristics import Heuristic from assemblyline.al.common.result import Result, ResultSection, SCORE from assemblyline.al.common.result import TAG_TYPE as FC_VALUE_TYPE from assemblyline.al.common.result import TAG_WEIGHT as TAG_SCORE from assemblyline.al.service.base import ServiceBase from al_services.alsvc_cleaver.codepages import CODEPAGE_MAP # Initialize imports createParser = None guessParser = None OfficeRootEntry = None FragmentGroup = None MissingField = None Int8 = None RawBytes = None ParserError = None HEADER_SIZE = None CompObj = None SummaryParser = None PropertyContent = None PropertyIndex = None SummaryFieldSet = None extractMetadata = None RootSeekableFieldSet = None hachoir_log = None getBacktrace = None WordDocumentFieldSet = None StringInputStream = None hachoir_config = None DummyObject = None decode_lnk = None # Next, we have hijack the hachoir_parser.misc.msoffice.OfficeRootEntry.parseProperty # function to fix a problem, see the code below. # noinspection PyPep8Naming,PyCallingNonCallable def myParseProperty(self, property_index, p_property, name_prefix): ole2 = self.ole2 if not p_property["size"].value: return if p_property["size"].value >= ole2["header/threshold"].value: return name = "%s[]" % name_prefix first = None previous = None size = 0 start = p_property["start"].value chain = ole2.getChain(start, True) blocksize = ole2.ss_size desc_format = "Small blocks %s..%s (%s)" while True: try: block = chain.next() contiguous = False # buggy line: if not first: if first is None: # <-- fixed line first = block contiguous = True # buggy line: if previous and block == (previous+1): if previous is not None and block == (previous + 1): # <-- fixed line contiguous = True if contiguous: previous = block size += blocksize continue except StopIteration: block = None self.seekSBlock(first) desc = desc_format % (first, previous, previous - first + 1) size = min(size, p_property["size"].value * 8) if name_prefix in ("summary", "doc_summary"): yield SummaryFieldSet(self, name, desc, size=size) elif name_prefix == "word_doc": yield WordDocumentFieldSet(self, name, desc, size=size) elif property_index == 1: yield CompObj(self, "comp_obj", desc, size=size) else: yield RawBytes(self, name, size // 8, desc) if block is None: break first = block previous = block size = ole2.sector_size # We have hijack the hachoir_parser.misc.msoffice.FragmentGroup.createInputStream # function to fix a problem, see the code below. # noinspection PyPep8Naming,PyCallingNonCallable def myCreateInputStream(self): # FIXME: Use lazy stream creation data = [] for item in self.items: # bug here by not checking for None if item["rawdata"].value is not None: data.append(item["rawdata"].value) data = "".join(data) # FIXME: Use smarter code to send arguments args = {"ole2": self.items[0].root} tags = {"class": self.parser, "args": args} tags = tags.iteritems() return StringInputStream(data, "<fragment group>", tags=tags) # noinspection PyUnresolvedReferences,PyShadowingNames def do_delayed_imports(): global createParser, guessParser, OfficeRootEntry, FragmentGroup, MissingField, Int8, RawBytes, ParserError, \ HEADER_SIZE, CompObj, SummaryParser, PropertyContent, PropertyIndex, SummaryFieldSet, extractMetadata, \ RootSeekableFieldSet, hachoir_log, getBacktrace, WordDocumentFieldSet, StringInputStream from hachoir_parser.guess import createParser, guessParser from hachoir_parser.misc.msoffice import OfficeRootEntry, FragmentGroup from hachoir_core.field.field import MissingField from hachoir_core.field import Int8, RawBytes from hachoir_core.field.basic_field_set import ParserError from hachoir_parser.misc.ole2 import HEADER_SIZE from hachoir_parser.misc.msoffice_summary import CompObj, SummaryParser, PropertyContent, PropertyIndex, \ SummaryFieldSet from hachoir_metadata import extractMetadata from hachoir_core.field.seekable_field_set import RootSeekableFieldSet from hachoir_core.log import log as hachoir_log from hachoir_core.error import getBacktrace from hachoir_parser.misc.word_doc import WordDocumentFieldSet from hachoir_core.stream.input import StringInputStream import hachoir_parser.version import hachoir_core.version import hachoir_metadata.version import hachoir_core.config as hachoir_config from al_services.alsvc_cleaver.parse_lnk import decode_lnk FragmentGroup.createInputStream = myCreateInputStream OfficeRootEntry.parseProperty = myParseProperty PropertyIndex.DOCUMENT_PROPERTY[17] = "NumOfChars" # noinspection PyBroadException try: del PropertyIndex.DOCUMENT_PROPERTY[18] except: pass PropertyIndex.DOCUMENT_PROPERTY[19] = "SharedDoc" PropertyIndex.DOCUMENT_PROPERTY[20] = "LinkBase" PropertyIndex.DOCUMENT_PROPERTY[21] = "HLinks" PropertyIndex.DOCUMENT_PROPERTY[22] = "HyperLinksChanged" PropertyIndex.DOCUMENT_PROPERTY[23] = "Version" PropertyIndex.DOCUMENT_PROPERTY[24] = "VBASignature" PropertyIndex.DOCUMENT_PROPERTY[26] = "ContentType" PropertyIndex.DOCUMENT_PROPERTY[27] = "ContentStatus" PropertyIndex.DOCUMENT_PROPERTY[28] = "Language" PropertyIndex.DOCUMENT_PROPERTY[29] = "DocVersion" class DummyObject(Int8): # noinspection PyPep8Naming,PyMethodMayBeStatic def createValue(self): return 66 for k, v in locals().iteritems(): globals()[k] = v # # hachoirOpenFileHelper() & hachoirCloseFileHelper() are used to workaround defect #33 # in hachoir that results in file handles being left open # realOpenFunction = None hachoirOpenedFiles = {} # noinspection PyPep8Naming def hachoirOpenFileHelper(name, mode='r', buffering=-1): global realOpenFunction if realOpenFunction is None: raise Exception("*** Error: realOpenFunction() was not assigned! ***") fd = realOpenFunction(name, mode, buffering) hachoirOpenedFiles[name] = fd return fd # noinspection PyPep8Naming def hachoirCloseFileHelper(name): # noinspection PyBroadException try: fd = hachoirOpenedFiles[name] fd.close() return except: pass ######################################################### # Scan Execution Class # ######################################################### # noinspection PyPep8Naming,PyShadowingBuiltins,PyCallingNonCallable,PyTypeChecker class Cleaver(ServiceBase): AL_Cleaver_001 = Heuristic("AL_Cleaver_001", "OLE_SUMMARY_CODEPAGE", ".*", dedent("""\ Identifying the CodePage for the file. Used for identification purposes. """)) AL_Cleaver_002 = Heuristic("AL_Cleaver_002", "OLE_SUMMARY_LASTPRINTED", ".*", "") AL_Cleaver_003 = Heuristic("AL_Cleaver_003", "OLE_SUMMARY_CREATETIME", ".*", "") AL_Cleaver_004 = Heuristic("AL_Cleaver_004", "OLE_SUMMARY_LASTSAVEDTIME", ".*", "") AL_Cleaver_005 = Heuristic("AL_Cleaver_005", "OLE_SUMMARY_TITLE", ".*", "") AL_Cleaver_006 = Heuristic("AL_Cleaver_006", "OLE_SUMMARY_SUBJECT", ".*", "") AL_Cleaver_007 = Heuristic("AL_Cleaver_007", "OLE_SUMMARY_AUTHOR", ".*", "") AL_Cleaver_008 = Heuristic("AL_Cleaver_008", "OLE_SUMMARY_SUBJECT", ".*", "") AL_Cleaver_009 = Heuristic("AL_Cleaver_009", "OLE_SUMMARY_COMMENTS", ".*", "") AL_Cleaver_010 = Heuristic("AL_Cleaver_010", "OLE_SUMMARY_LASTSAVEDBY", ".*", "") AL_Cleaver_011 = Heuristic("AL_Cleaver_011", "OLE_SUMMARY_MANAGER", ".*", "") AL_Cleaver_012 = Heuristic("AL_Cleaver_012", "OLE_SUMMARY_COMPANY", ".*", "") AL_Cleaver_013 = Heuristic("AL_Cleaver_013", "Root[0] Does Not Exist", ".*", "") AL_Cleaver_014 = Heuristic("AL_Cleaver_014", "CLSID Not Null GUID", ".*", dedent("""\ For a root or storage class ID, checking to see if it isn't an NULL GUID GUID: 00000000-0000-0000-0000-000000000000 """)) AL_Cleaver_015 = Heuristic("AL_Cleaver_015", "OLE Creation Time", ".*", dedent("""\ Checking the creation time stamp against the standard 1601-01-01 00:00:00. If they don't match the time will be noted to the user. """)) AL_Cleaver_016 = Heuristic("AL_Cleaver_016", "OLE Lastmod Time", ".*", dedent("""\ Checking the lastmod time stamp against the standard 1601-01-01 00:00:00. If they don't match the time will be noted to the user. """)) AL_Cleaver_017 = Heuristic("AL_Cleaver_017", "CompObj", ".*", dedent("""\ Check if the name is CompObj and the type of the file is not stream type """)) AL_Cleaver_018 = Heuristic("AL_Cleaver_018", "Missing Field", ".*", dedent("""\ This is caused when an error is thrown when Hachoir lib could not get a field from the file. This file is either corrupted, patched or exploiting a vulnerability. """)) AL_Cleaver_019 = Heuristic("AL_Cleaver_019", "Cannot Find Property of Type", ".*", dedent("""\ This is caused when a parser error is thrown when Hachoir lib could not parse a property from the file. This file is either corrupted, patched or exploiting a vulnerability. """)) AL_Cleaver_020 = Heuristic("AL_Cleaver_020", "Overflowing Field", ".*", dedent("""\ This is caused when a parser error is thrown when Hachoir lib could not read a field from the file since it it overflowing. This file is either corrupted, patched or exploiting a vulnerability """)) AL_Cleaver_021 = Heuristic("AL_Cleaver_021", "Could not Access Field", ".*", dedent("""\ This is caused when a parser error is thrown when Hachoir lib could not access a field from the file. This file is either corrupted, patched or exploiting a vulnerability. """)) AL_Cleaver_022 = Heuristic("AL_Cleaver_022", "FAT Chain - Loop", ".*", dedent("""\ This is caused when a parser error is thrown when Hachoir lib found a loop when navigating through the file. It should be either BFAT or SFAT. This file is either corrupted, patched or exploiting a vulnerability. """)) AL_Cleaver_023 = Heuristic("AL_Cleaver_023", "SFAT Invalid Block Index", ".*", dedent("""\ This is caused when a parser error is thrown when Hachoir lib finds an invalid block index in the file. This file is either corrupted, patched or exploiting a vulnerability """)) AL_Cleaver_024 = Heuristic("AL_Cleaver_024", "OLE2: Invalid endian value", ".*", dedent("""\ The stream endian field is not valid. This file is either corrupted, patched or exploiting a vulnerability """)) AL_Cleaver_025 = Heuristic("AL_Cleaver_025", "Failure to Parse Whole File", ".*", dedent("""\ The Hachoir lib wasn't able to parse the whole file for some unknown reason. """)) SERVICE_CATEGORY = 'Static Analysis' SERVICE_DESCRIPTION = "This service extracts metadata from files, mostly OLE2 files," \ " using python's hachoir library." SERVICE_ENABLED = True SERVICE_REVISION = ServiceBase.parse_revision('$Id: 8dca8af77d35ec22146a535009fb07254adbc2f9 $') SERVICE_VERSION = '1' SERVICE_CPU_CORES = 0.25 SERVICE_RAM_MB = 128 def __init__(self, cfg=None): super(Cleaver, self).__init__(cfg) do_delayed_imports() self.additional_parsing_fields = {} self.ole2parser = None self.office_root_entry_parser = None self.children = {} self.parent = {} self.property_dict = {} self.current_file_res = None self.relative_path = "" self.filename = "" self.current_section = None self.current_codepage = None hachoir_log.use_buffer = True self.invalid_streams = [] self.invalid_properties_count = 0 self.bad_link_re = None def start(self): self.bad_link_re = re.compile("http[s]?://|powershell|cscript|wscript|mshta|<script") def get_parser(self, field_type): # from ol2 parser if field_type == 'Property': return self.parse_property elif field_type == 'CustomFragment': return self.parse_custom_fragment # from msoffice_summary parser elif field_type == 'SummaryFieldSet': return self.parse_summary_field_set elif field_type == 'SummarySection': return self.parse_summary_section elif field_type == 'PropertyContent': return self.parse_property_content elif field_type == 'CompObj': return self.parse_comp_obj elif field_type == 'SummaryParser': return self.parse_summary_field_set PARSING_MODE_CACHE = 0 PARSING_MODE_DISPLAY = 1 GUID_DESC = { "GUID v0 (0):
(Sequence[EvaluatableNode]) -> EvaluatableNode return Struct(self.type_, new_children) def _evaluate_node(self, values): # type: (List[TypedSeries]) -> TypedSeries value_types = [value.type_ for value in values] type_ = implicitly_coerce(self.type_, BQStructType([None] * len(values), value_types)) if self.type_: if not isinstance(type_, BQStructType): raise RuntimeError("STRUCT types coerced to non-STRUCT type {}".format(type_)) for i, (declared_type, coerced_type) in enumerate(zip(self.type_.types, type_.types)): if declared_type and declared_type != coerced_type: raise ValueError('Struct field {} has type {} which does not coerce to {}' .format(i + 1, # enumerate is 0-up, we want to report 1-up coerced_type, declared_type)) structs = list(zip(*[value.series for value in values])) return TypedSeries(pd.Series(structs, index=values[0].series.index), type_) @classmethod def create_from_typeless(cls, maybe_named_fields): # type: (Sequence[Tuple[EvaluatableNode, Union[_EmptyNode, str]]]) -> Struct '''Creates a Struct from the typeless grammar syntax STRUCT(value [AS name], ...) Args: maybe_named_fields: A list of pairs. The first element is a field's value, an expression (evaluatable node), the second is an optional name for the field. Returns: A Struct abstract syntax tree node. ''' children, maybe_names = list(zip(*maybe_named_fields)) maybe_names = tuple(maybe_name if not isinstance(maybe_name, _EmptyNode) else None for maybe_name in maybe_names) return Struct(BQStructType(maybe_names, [None] * len(maybe_named_fields)), children) @classmethod def create_from_typed(cls, type_, expressions): # type: (BQStructType, Sequence[EvaluatableNode]) -> Struct '''Creates a Struct from the typed grammar syntax STRUCT<[name] type, ...>(value, ...). Args: type_: A declared STRUCT type for this structure. expressions: The values of the fields of the structure. Returns: A Struct abstract syntax tree node. ''' return Struct(type_, expressions) @classmethod def create_from_tuple(cls, first_expression, other_expressions): # type: (EvaluatableNode, Sequence[EvaluatableNode]) -> Struct '''Creates a Struct from the tuple grammar syntax (value, value, ...) Args: first_expression: The evaluatable node of the first field in the structure. other_expressions: The evaluatable nodes of the rest of the fields in the structure (if any). Returns: A Struct abstract syntax tree node. ''' return Struct(None, [first_expression] + list(other_expressions)) _SeriesMaybeGrouped = Union[pd.Series, pd.core.groupby.SeriesGroupBy] _FunctionType = Callable[[List[_SeriesMaybeGrouped]], _SeriesMaybeGrouped] _OverClauseType = Tuple[Union[_EmptyNode, Sequence[EvaluatableNode]], Union[_EmptyNode, List[Tuple[EvaluatableNode, Union[_EmptyNode, str]]]]] class _Function(object): '''Base class for functions.''' __metaclass__ = ABCMeta @classmethod def name(cls): # type: () -> str return cls.__name__.upper() # _result_type=None means that the result type will be the same as the argument types. e.g. # summing a column of floats gives a float, summing a column of ints gives an int. _result_type = None # type: Optional[BQType] def compute_result_type(self, argument_types): # type: (Sequence[BQType]) -> BQType '''Computes the type of the result of applying this function. Args: argument_types: The types of the evaluated arguments to this function. Returns: If specified, the result type for this function, otherwise returns a common type for all the arguments. ''' return self._result_type or implicitly_coerce(*argument_types) class _NonAggregatingFunction(_Function): '''Base class for regular functions (not aggregating).''' @abstractmethod def function(self, values): # type: (List[pd.Series]) -> pd.Series '''Computes a column from a list of argument columns. Args: values: A list of Pandas Serieses, i.e. columns of values to operate on. Returns: A single column of values as a Pandas Series. ''' class _AggregatingFunction(_Function): '''Base class for aggregating functions.''' @abstractmethod def aggregating_function(self, values): # type: (List[pd.Series]) -> LiteralType '''Collapses columns of values into a single number. Args: values: A list of Pandas Serieses, i.e. columns of values to operate on. Returns: A single Python value computed from the input arguments. ''' class Array_agg(_AggregatingFunction): '''An ARRAY_AGG function, aggregating a column of results into an ARRAY-valued cell.''' @classmethod def create_function_call( cls, distinct, # type: Union[_EmptyNode, str] expression, # type: EvaluatableNode nulls, # type: AbstractSyntaxTreeNode order_by, # type: AbstractSyntaxTreeNode limit, # type: AbstractSyntaxTreeNode over_clause, # type: _OverClauseType ): # type: (...) -> EvaluatableNode '''Creates an ARRAY_AGG function call based on the inputs from the grammar. Args: distinct: 'DISTINCT' if the function should return only distinct rows, empty otherwise. expression: The column to aggregate nulls: 'IGNORE' if NULLs should not be returned; 'RESPECT' or empty otherwise. order_by: A syntax subtree describing how to order the rows. Not implemented. limit: A syntax subtree giving how many rows to return at most. Not implemented. over_clause: The window to evaluate the function over as an analytic function. ''' function = Array_agg(distinct, nulls, order_by, limit) if isinstance(over_clause, _EmptyNode): return _AggregatingFunctionCall(function, [expression]) else: return _AnalyticFunctionCall(function, [expression], over_clause) def __init__(self, distinct, # type: Union[_EmptyNode, str] nulls, # type: AbstractSyntaxTreeNode order_by, # type: AbstractSyntaxTreeNode limit, # type: AbstractSyntaxTreeNode ): # type: (...) -> None if distinct == 'DISTINCT': self.distinct = True elif distinct is EMPTY_NODE: self.distinct = False else: raise ValueError("Invalid syntax: ARRAY_AGG({}...)".format(distinct)) if nulls == 'IGNORE': self.drop_nulls = True elif nulls in ('RESPECT', EMPTY_NODE): self.drop_nulls = False else: raise ValueError("Invalid Syntax: ARRAY_AGG(...{}...)".format(nulls)) self.order_by = order_by self.limit = limit def compute_result_type(self, argument_types): # type: (Sequence[BQType]) -> BQType computed_argument_type = implicitly_coerce(*argument_types) if not isinstance(computed_argument_type, BQScalarType): raise ValueError("ARRAYs are only supported of scalar types") # TODO: support ARRAYs of structs. return BQArray(computed_argument_type) def aggregating_function(self, values): # type: (List[pd.Series]) -> Tuple if len(values) != 1: raise ValueError("ARRAY_AGG takes exactly one argument, not {}" .format(len(values))) result = values[0] if self.distinct: result = result.drop_duplicates() if self.drop_nulls: result = result.dropna() if self.order_by is not EMPTY_NODE: raise NotImplementedError("ARRAY_AGG(ORDER BY) is not implemented") if self.limit is not EMPTY_NODE: raise NotImplementedError("ARRAY_AGG(LIMIT) is not implemented") return tuple(result) _CounteeType = Union[str, # a literal * i.e. COUNT(*) Tuple[Union[str, _EmptyNode], # optional modifier, e.g. DISTINCT EvaluatableNode]] # the thing counted. class Count(_AggregatingFunction): '''A COUNT function, for example: SELECT COUNT(*) FROM Table ''' _result_type = BQScalarType.INTEGER @classmethod def create_count_function_call(cls, countee, # type: _CounteeType over_clause # type: _OverClauseType ): # type: (...) -> EvaluatableNode '''Creates a Count expression. COUNT has a factory creation method, unlike other _Function subtypes, because it has unique syntax (COUNT(*), COUNT(DISTINCT expr)), and because it has state in addition to its child nodes (whether or not to count only distinct rows). This method is called by the grammar to create a FunctionCall syntax tree node of the appropriate type (aggregating or analytic) with a Count expression having the appropriate state. Args: countee: Either a single string '*' or a tuple: an optional string 'DISTINCT' and a required expression to count over_clause: An optional OVER clause Returns: An AggregatingFunctionCall expression, if the over clause isn't present, or an _AnalyticFunctionCall expression, if it is, either way using this function. ''' # Treat count(*) as if it were count(1), which is equivalent. if isinstance(countee, str): if countee != '*': raise ValueError("Invalid expression COUNT({})".format(countee)) countee = (EMPTY_NODE, Value(1, BQScalarType.INTEGER)) maybe_distinct, argument = countee if maybe_distinct == 'DISTINCT': distinct = True elif maybe_distinct == EMPTY_NODE: distinct = False else: raise NotImplementedError("Non-DISTINCT modifiers for COUNT are not implemented:" " {}".format(maybe_distinct)) if isinstance(over_clause, _EmptyNode): return _AggregatingFunctionCall(Count(distinct), [argument]) else: return _AnalyticFunctionCall(Count(distinct), [argument], over_clause) def __init__(self, distinct): # type: (bool) -> None self.distinct = distinct def aggregating_function(self, values): # type: (List[pd.Series]) -> int value, = values value = value.dropna() # COUNT counts non-NULL rows if self.distinct: value = set(value) return len(value) class Mod(_NonAggregatingFunction): '''The modulus of two columns of numbers, i.e. remainder after a is divided by b.''' def function(self, values): a, b = values return a.mod(b) class Sum(_AggregatingFunction): '''The sum of a column.''' def aggregating_function(self, values): # type: (List[pd.Series]) -> LiteralType value, = values return value.sum() class Max(_AggregatingFunction): '''The maximum value of a column.''' def aggregating_function(self, values): # type: (List[pd.Series]) -> LiteralType value, = values return value.max() class Min(_AggregatingFunction): '''The minimum value of a column.''' def aggregating_function(self, values): # type: (List[pd.Series]) -> LiteralType value, = values return value.min() class Concat(_NonAggregatingFunction): '''The concatenation of a series of strings.''' _result_type = BQScalarType.STRING def function(self, values): # type: (List[pd.Series]) -> pd.Series return reduce(operator.add, values) class Timestamp(_NonAggregatingFunction): '''The conversion of a column of values to timestamps.''' _result_type = BQScalarType.TIMESTAMP def function(self, values): # type: (List[pd.Series]) -> pd.Series return values[0].apply(pd.Timestamp) class Current_Timestamp(_NonAggregatingFunction): '''The current time.''' _result_type = BQScalarType.TIMESTAMP def function(self, values): # No-argument functions are given a constant argument in order to # determine the number of rows to return, and with what index. constant, = values return pd.Series([pd.Timestamp.now()]*len(constant), index=constant.index) class Row_Number(_NonAggregatingFunction): '''A numbering of the rows in a column.''' _result_type = BQScalarType.INTEGER def function(self, values): # type: (List[pd.Series]) -> pd.Series value, = values return pd.Series(range(1, len(value) +
<filename>Packs/AzureSQLManagement/Integrations/AzureSQLManagement/AzureSQLManagement.py import demistomock as demisto from CommonServerPython import * from CommonServerUserPython import * import urllib3 import copy # Disable insecure warnings urllib3.disable_warnings() ''' CONSTANTS ''' DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' API_VERSION = '2019-06-01-preview' ''' CLIENT CLASS ''' class Client: """Client class to interact with the service API """ @logger def __init__(self, app_id, subscription_id, resource_group_name, verify, proxy, azure_ad_endpoint='https://login.microsoftonline.com'): self.resource_group_name = resource_group_name if '@' in app_id: app_id, refresh_token = app_id.split('@') integration_context = get_integration_context() integration_context.update(current_refresh_token=refresh_token) set_integration_context(integration_context) base_url = f'https://management.azure.com/subscriptions/{subscription_id}' client_args = { 'self_deployed': True, # We always set the self_deployed key as True because when not using a self # deployed machine, the DEVICE_CODE flow should behave somewhat like a self deployed # flow and most of the same arguments should be set, as we're !not! using OProxy. 'auth_id': app_id, 'token_retrieval_url': 'https://login.microsoftonline.com/organizations/oauth2/v2.0/token', 'grant_type': DEVICE_CODE, # disable-secrets-detection 'base_url': base_url, 'verify': verify, 'proxy': proxy, 'resource': 'https://management.core.windows.net', # disable-secrets-detection 'scope': 'https://management.azure.com/user_impersonation offline_access user.read', 'ok_codes': (200, 201, 202, 204), 'azure_ad_endpoint': azure_ad_endpoint } self.ms_client = MicrosoftClient(**client_args) @logger def http_request(self, method: str, url_suffix: str = None, full_url: str = None, params: dict = {}, data: dict = None, resp_type: str = 'json') -> requests.Response: if not full_url: params['api-version'] = API_VERSION return self.ms_client.http_request(method=method, url_suffix=url_suffix, full_url=full_url, json_data=data, params=params, resp_type=resp_type) @logger def azure_sql_servers_list(self): return self.http_request('GET', '/providers/Microsoft.Sql/servers') @logger def azure_sql_db_list(self, server_name: str): return self.http_request('GET', f'resourceGroups/{self.resource_group_name}/providers/Microsoft.Sql/servers/' f'{server_name}/databases') @logger def azure_sql_db_audit_policy_list(self, server_name: str, db_name: str): return self.http_request('GET', f'resourceGroups/{self.resource_group_name}/providers/Microsoft.Sql/servers/' f'{server_name}/databases/{db_name}/auditingSettings') @logger def azure_sql_db_threat_policy_get(self, server_name: str, db_name: str): return self.http_request('GET', f'resourceGroups/{self.resource_group_name}/providers/Microsoft.Sql/servers/' f'{server_name}/databases/{db_name}/securityAlertPolicies/default') @logger def azure_sql_db_audit_policy_create_update(self, server_name: str, db_name: str, state: str, audit_actions_groups: List[str], is_azure_monitor_target_enabled: bool, is_storage_secondary_key_in_use: bool, queue_delay_ms: str, retention_days: str, storage_account_access_key: str, storage_account_subscription_id: str, storage_endpoint: str): properties = assign_params(state=state, auditActionsAndGroups=audit_actions_groups, isAzureMonitorTargetEnabled=is_azure_monitor_target_enabled, isStorageSecondaryKeyInUse=is_storage_secondary_key_in_use, queueDelayMs=queue_delay_ms, retentionDays=retention_days, storageAccountAccessKey=storage_account_access_key, storageAccountSubscriptionId=storage_account_subscription_id, storageEndpoint=storage_endpoint) request_body = {'properties': properties} if properties else {} return self.http_request(method='PUT', url_suffix=f'resourceGroups/{self.resource_group_name}/providers' f'/Microsoft.Sql/servers/{server_name}/databases/' f'{db_name}/auditingSettings/default', data=request_body) def azure_sql_db_threat_policy_create_update(self, server_name: str, db_name: str, state: str, disabled_alerts: List[str], email_account_admins: str, email_addresses: List[str], retention_days: str, storage_account_access_key: str, use_server_default: str, storage_endpoint: str): properties = assign_params(state=state, retentionDays=retention_days, storageAccountAccessKey=storage_account_access_key, storageEndpoint=storage_endpoint, disabledAlerts=disabled_alerts, emailAccountAdmins=email_account_admins, emailAddresses=email_addresses, useServerDefault=use_server_default) request_body = {'properties': properties} if properties else {} return self.http_request(method='PUT', url_suffix=f'resourceGroups/{self.resource_group_name}/providers' f'/Microsoft.Sql/servers/{server_name}/databases/' f'{db_name}/securityAlertPolicies/default', data=request_body) @logger def azure_sql_servers_list_command(client: Client, args: Dict[str, str]) -> CommandResults: """azure-sql-servers-list command returns a list of all servers Args: client: AzureSQLManagement Client to use limit: The maximum number of servers returned to the War Room. Default is 50. offset: Offset in the data set. Default is 0. Returns: A ``CommandResults`` object that is then passed to ``return_results``, that contains a list of all servers """ offset_int = int(args.get('offset', '0')) limit_int = int(args.get('limit', '50')) server_list_raw = client.azure_sql_servers_list() server_list_fixed = copy.deepcopy(server_list_raw.get('value', '')[offset_int:(offset_int + limit_int)]) for server in server_list_fixed: if properties := server.get('properties', {}): server.update(properties) del server['properties'] human_readable = tableToMarkdown(name='Servers List', t=server_list_fixed, headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='AzureSQL.Server', outputs_key_field='id', outputs=server_list_fixed, raw_response=server_list_raw ) @logger def azure_sql_db_list_command(client: Client, args: Dict[str, str]) -> CommandResults: """azure-sql-db-list command returns a list of all databases for server Args: client: AzureSQLManagement Client to use server_name: server name for which we want to receive list of databases limit: The maximum number of databases returned to the War Room. Default is 50. offset: Offset in the data set. Default is 0. Returns: A ``CommandResults`` object that is then passed to ``return_results``, that contains a list of all databases for server """ offset_int = int(args.get('offset', '0')) limit_int = int(args.get('limit', '50')) database_list_raw = client.azure_sql_db_list(args.get('server_name')) database_list_fixed = copy.deepcopy(database_list_raw.get('value', '')[offset_int:(offset_int + limit_int)]) for db in database_list_fixed: properties = db.get('properties', {}) if properties: db.update(properties) del db['properties'] human_readable = tableToMarkdown(name='Database List', t=database_list_fixed, headers=['name', 'location', 'status', 'managedBy'], headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='AzureSQL.DB', outputs_key_field='id', outputs=database_list_fixed, raw_response=database_list_raw ) @logger def azure_sql_db_audit_policy_list_command(client: Client, args: Dict[str, str]) -> CommandResults: """azure_sql_db_audit_policy_list command returns a list of auditing settings of a database Args: client: AzureSQLManagement Client to use server_name: server name for which we want to receive list of auditing settings db_name: database for which we want to receive list of auditing settings limit: The maximum number of audit policies returned to the War Room. Default is 50. offset: Offset in the data set. Default is 0. Returns: A ``CommandResults`` object that is then passed to ``return_results``, that contains a list of auditing settings of a database """ server_name = args.get('server_name') db_name = args.get('db_name') offset_int = int(args.get('offset', '0')) limit_int = int(args.get('limit', '50')) audit_list_raw = client.azure_sql_db_audit_policy_list(server_name, db_name) audit_list_fixed = copy.deepcopy(audit_list_raw.get('value', '')[offset_int:(offset_int + limit_int)]) for db in audit_list_fixed: db['serverName'] = server_name db['databaseName'] = db_name if properties := db.get('properties', {}): db.update(properties) del db['properties'] human_readable = tableToMarkdown(name='Database Audit Settings', t=audit_list_fixed, headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='AzureSQL.DBAuditPolicy', outputs_key_field='id', outputs=audit_list_fixed, raw_response=audit_list_raw ) @logger def azure_sql_db_audit_policy_create_update_command(client: Client, args: Dict[str, str]) -> CommandResults: """azure_sql_db_audit_policy_create_update command upadates and creates audit policies related to the server and database Args: client: AzureSQLManagement Client to use server_name: server name for which we want to create or update auditing settings db_name: database for which we want to create or update auditing settings state: state of the policy audit_actions_groups: Comma-separated Actions-Groups and Actions to audit. is_azure_monitor_target_enabled: Is audit events are sent to Azure Monitor is_storage_secondary_key_in_use: Is storageAccountAccessKey value is the storage's secondary key queue_delay_ms: Time in milliseconds that can elapse before audit actions are forced to be processed. retention_days: Number of days to keep the policy in the audit logs. storage_account_access_key: identifier key of the auditing storage account storage_account_subscription_id: storage subscription Id storage_endpoint: Storage endpoint. Returns: A ``CommandResults`` object that is then passed to ``return_results``, that contains an updated audit policy """ server_name = args.get('server_name') db_name = args.get('db_name') state = args.get('state') audit_actions_groups = argToList(args.get('audit_actions_groups', '')) is_azure_monitor_target_enabled = args.get('is_azure_monitor_target_enabled', '') is_storage_secondary_key_in_use = args.get('is_storage_secondary_key_in_use', '') queue_delay_ms = args.get('queue_delay_ms', '') retention_days = args.get('retention_days', '') storage_account_access_key = args.get('storage_account_access_key', '') storage_account_subscription_id = args.get('storage_account_subscription_id', '') storage_endpoint = args.get('storage_endpoint', '') raw_response = client.azure_sql_db_audit_policy_create_update(server_name=server_name, db_name=db_name, state=state, audit_actions_groups=audit_actions_groups, is_azure_monitor_target_enabled=is_azure_monitor_target_enabled, is_storage_secondary_key_in_use=is_storage_secondary_key_in_use, queue_delay_ms=queue_delay_ms, retention_days=retention_days, storage_account_access_key=storage_account_access_key, storage_account_subscription_id=storage_account_subscription_id, storage_endpoint=storage_endpoint) fixed_response = copy.deepcopy(raw_response) if properties := fixed_response.get('properties', {}): fixed_response['serverName'] = server_name fixed_response['databaseName'] = db_name fixed_response.update(properties) del fixed_response['properties'] human_readable = tableToMarkdown(name='Create Or Update Database Auditing Settings', t=fixed_response, headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='AzureSQL.DBAuditPolicy', outputs_key_field='id', outputs=fixed_response, raw_response=raw_response ) @logger def azure_sql_db_threat_policy_get_command(client: Client, args: Dict[str, str]) -> CommandResults: """azure_sql_db_threat_policy_get command returns a threat detection policy of a database Args: client: AzureSQLManagement Client to use server_name: server name for which we want to receive threat detection policies db_name: database for which we want to receive threat detection policies Returns: A ``CommandResults`` object that is then passed to ``return_results``, that contains a threat detection policy of a database """ server_name = args.get('server_name') db_name = args.get('db_name') threat_raw = client.azure_sql_db_threat_policy_get(server_name, db_name) threat_fixed = copy.deepcopy(threat_raw) if properties := threat_fixed.get('properties', {}): threat_fixed['serverName'] = server_name threat_fixed['databaseName'] = db_name threat_fixed.update(properties) del threat_fixed['properties'] human_readable = tableToMarkdown(name='Database Threat Detection Policies', t=threat_fixed, headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='AzureSQL.DBThreatPolicy', outputs_key_field='id', outputs=threat_fixed, raw_response=threat_raw ) @logger def azure_sql_db_threat_policy_create_update_command(client: Client, args: Dict[str, str]) -> CommandResults: """azure_sql_db_audit_policy_create_update command upadates and creates threat policy related to the server and database Args: client: AzureSQLManagement Client to use server_name: server name for which we want to create or update auditing settings db_name: database for which we want to create or update auditing settings state: satate of the policy disabled_alerts: Comma-separated list of alerts that are disabled, or "none" to disable no alerts. email_account_admins: The alert is sent to the account administrators. email_addresses: Comma-separated list of e-mail addresses to which the alert is sent. retention_days: Number of days to keep the policy in the audit logs. storage_account_access_key: identifier key of the auditing storage account use_server_default: Whether to use the default server policy or not. storage_endpoint: Storage endpoint. Returns: A ``CommandResults`` object that is then passed to ``return_results``, that contains an updated threat policy """ server_name = args['server_name'] db_name = args['db_name'] state = args['state'] retention_days = args.get('retention_days', '') email_account_admins = args.get('email_account_admins', '') email_addresses = argToList(args.get('email_addresses', '')) storage_account_access_key = args.get('storage_account_access_key', '') use_server_default = args.get('use_server_default', '') storage_endpoint = args.get('storage_endpoint', '') disabled_alerts = [""] if 'None' in argToList(args.get('disabled_alerts', '')) \ else argToList(args.get('disabled_alerts', '')) raw_response = client.azure_sql_db_threat_policy_create_update(server_name=server_name, db_name=db_name, state=state, retention_days=retention_days, disabled_alerts=disabled_alerts, email_account_admins=email_account_admins, email_addresses=email_addresses, storage_account_access_key=storage_account_access_key, use_server_default=use_server_default, storage_endpoint=storage_endpoint) fixed_response = copy.deepcopy(raw_response) if properties := fixed_response.get('properties', {}): fixed_response['serverName'] = server_name fixed_response['databaseName'] = db_name fixed_response.update(properties) del fixed_response['properties'] human_readable = tableToMarkdown(name='Create Or Update Database Threat Detection Policies', t=fixed_response, headerTransform=pascalToSpace, removeNull=True) return CommandResults( readable_output=human_readable, outputs_prefix='AzureSQL.DBThreatPolicy', outputs_key_field='id', outputs=fixed_response, raw_response=raw_response ) @logger def test_connection(client: Client) ->
accessrule_identifier = 'testString' # Invoke method response = service.get_account_access_rule( accessrule_identifier, headers={} ) # Check for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_get_account_access_rule_value_error() #-------------------------------------------------------- @responses.activate def test_get_account_access_rule_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString') mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}' responses.add(responses.GET, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values accessrule_identifier = 'testString' # Pass in all but one required param and check for a ValueError req_param_dict = { "accessrule_identifier": accessrule_identifier, } for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.get_account_access_rule(**req_copy) #----------------------------------------------------------------------------- # Test Class for update_account_access_rule #----------------------------------------------------------------------------- class TestUpdateAccountAccessRule(): # Preprocess the request URL to ensure the mock response will be found. def preprocess_url(self, request_url: str): if re.fullmatch('.*/+', request_url) is None: return request_url else: return re.compile(request_url.rstrip('/') + '/+') #-------------------------------------------------------- # update_account_access_rule() #-------------------------------------------------------- @responses.activate def test_update_account_access_rule_all_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString') mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values accessrule_identifier = 'testString' mode = 'block' notes = 'This rule is added because of event X that occurred on date xyz' # Invoke method response = service.update_account_access_rule( accessrule_identifier, mode=mode, notes=notes, headers={} ) # Check for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 # Validate body params req_body = json.loads(str(responses.calls[0].request.body, 'utf-8')) assert req_body['mode'] == 'block' assert req_body['notes'] == 'This rule is added because of event X that occurred on date xyz' #-------------------------------------------------------- # test_update_account_access_rule_required_params() #-------------------------------------------------------- @responses.activate def test_update_account_access_rule_required_params(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString') mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values accessrule_identifier = 'testString' # Invoke method response = service.update_account_access_rule( accessrule_identifier, headers={} ) # Check for correct operation assert len(responses.calls) == 1 assert response.status_code == 200 #-------------------------------------------------------- # test_update_account_access_rule_value_error() #-------------------------------------------------------- @responses.activate def test_update_account_access_rule_value_error(self): # Set up mock url = self.preprocess_url(base_url + '/v1/testString/firewall/access_rules/rules/testString') mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"id": "92f17202ed8bd63d69a66b86a49a8f6b", "notes": "This rule is set because of an event that occurred and caused X.", "allowed_modes": ["block"], "mode": "block", "scope": {"type": "account"}, "created_on": "2019-01-01T12:00:00", "modified_on": "2019-01-01T12:00:00", "configuration": {"target": "ip", "value": "ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ"}}}' responses.add(responses.PATCH, url, body=mock_response, content_type='application/json', status=200) # Set up parameter values accessrule_identifier = 'testString' # Pass in all but one required param and check for a ValueError req_param_dict = { "accessrule_identifier": accessrule_identifier, } for param in req_param_dict.keys(): req_copy = {key:val if key is not param else None for (key,val) in req_param_dict.items()} with pytest.raises(ValueError): service.update_account_access_rule(**req_copy) # endregion ############################################################################## # End of Service: InstanceLevelFirewallAccessRules ############################################################################## ############################################################################## # Start of Model Tests ############################################################################## # region #----------------------------------------------------------------------------- # Test Class for AccountAccessRuleInputConfiguration #----------------------------------------------------------------------------- class TestAccountAccessRuleInputConfiguration(): #-------------------------------------------------------- # Test serialization/deserialization for AccountAccessRuleInputConfiguration #-------------------------------------------------------- def test_account_access_rule_input_configuration_serialization(self): # Construct a json representation of a AccountAccessRuleInputConfiguration model account_access_rule_input_configuration_model_json = {} account_access_rule_input_configuration_model_json['target'] = 'ip' account_access_rule_input_configuration_model_json['value'] = 'ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ' # Construct a model instance of AccountAccessRuleInputConfiguration by calling from_dict on the json representation account_access_rule_input_configuration_model = AccountAccessRuleInputConfiguration.from_dict(account_access_rule_input_configuration_model_json) assert account_access_rule_input_configuration_model != False # Construct a model instance of AccountAccessRuleInputConfiguration by calling from_dict on the json representation account_access_rule_input_configuration_model_dict = AccountAccessRuleInputConfiguration.from_dict(account_access_rule_input_configuration_model_json).__dict__ account_access_rule_input_configuration_model2 = AccountAccessRuleInputConfiguration(**account_access_rule_input_configuration_model_dict) # Verify the model instances are equivalent assert account_access_rule_input_configuration_model == account_access_rule_input_configuration_model2 # Convert model instance back to dict and verify no loss of data account_access_rule_input_configuration_model_json2 = account_access_rule_input_configuration_model.to_dict() assert account_access_rule_input_configuration_model_json2 == account_access_rule_input_configuration_model_json #----------------------------------------------------------------------------- # Test Class for AccountAccessRuleObjectConfiguration #----------------------------------------------------------------------------- class TestAccountAccessRuleObjectConfiguration(): #-------------------------------------------------------- # Test serialization/deserialization for AccountAccessRuleObjectConfiguration #-------------------------------------------------------- def test_account_access_rule_object_configuration_serialization(self): # Construct a json representation of a AccountAccessRuleObjectConfiguration model account_access_rule_object_configuration_model_json = {} account_access_rule_object_configuration_model_json['target'] = 'ip' account_access_rule_object_configuration_model_json['value'] = 'ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ' # Construct a model instance of AccountAccessRuleObjectConfiguration by calling from_dict on the json representation account_access_rule_object_configuration_model = AccountAccessRuleObjectConfiguration.from_dict(account_access_rule_object_configuration_model_json) assert account_access_rule_object_configuration_model != False # Construct a model instance of AccountAccessRuleObjectConfiguration by calling from_dict on the json representation account_access_rule_object_configuration_model_dict = AccountAccessRuleObjectConfiguration.from_dict(account_access_rule_object_configuration_model_json).__dict__ account_access_rule_object_configuration_model2 = AccountAccessRuleObjectConfiguration(**account_access_rule_object_configuration_model_dict) # Verify the model instances are equivalent assert account_access_rule_object_configuration_model == account_access_rule_object_configuration_model2 # Convert model instance back to dict and verify no loss of data account_access_rule_object_configuration_model_json2 = account_access_rule_object_configuration_model.to_dict() assert account_access_rule_object_configuration_model_json2 == account_access_rule_object_configuration_model_json #----------------------------------------------------------------------------- # Test Class for AccountAccessRuleObjectScope #----------------------------------------------------------------------------- class TestAccountAccessRuleObjectScope(): #-------------------------------------------------------- # Test serialization/deserialization for AccountAccessRuleObjectScope #-------------------------------------------------------- def test_account_access_rule_object_scope_serialization(self): # Construct a json representation of a AccountAccessRuleObjectScope model account_access_rule_object_scope_model_json = {} account_access_rule_object_scope_model_json['type'] = 'account' # Construct a model instance of AccountAccessRuleObjectScope by calling from_dict on the json representation account_access_rule_object_scope_model = AccountAccessRuleObjectScope.from_dict(account_access_rule_object_scope_model_json) assert account_access_rule_object_scope_model != False # Construct a model instance of AccountAccessRuleObjectScope by calling from_dict on the json representation account_access_rule_object_scope_model_dict = AccountAccessRuleObjectScope.from_dict(account_access_rule_object_scope_model_json).__dict__ account_access_rule_object_scope_model2 = AccountAccessRuleObjectScope(**account_access_rule_object_scope_model_dict) # Verify the model instances are equivalent assert account_access_rule_object_scope_model == account_access_rule_object_scope_model2 # Convert model instance back to dict and verify no loss of data account_access_rule_object_scope_model_json2 = account_access_rule_object_scope_model.to_dict() assert account_access_rule_object_scope_model_json2 == account_access_rule_object_scope_model_json #----------------------------------------------------------------------------- # Test Class for DeleteAccountAccessRuleRespResult #----------------------------------------------------------------------------- class TestDeleteAccountAccessRuleRespResult(): #-------------------------------------------------------- # Test serialization/deserialization for DeleteAccountAccessRuleRespResult #-------------------------------------------------------- def test_delete_account_access_rule_resp_result_serialization(self): # Construct a json representation of a DeleteAccountAccessRuleRespResult model delete_account_access_rule_resp_result_model_json = {} delete_account_access_rule_resp_result_model_json['id'] = 'f1aba936b94213e5b8dca0c0dbf1f9cc' # Construct a model instance of DeleteAccountAccessRuleRespResult by calling from_dict on the json representation delete_account_access_rule_resp_result_model = DeleteAccountAccessRuleRespResult.from_dict(delete_account_access_rule_resp_result_model_json) assert delete_account_access_rule_resp_result_model != False # Construct a model instance of DeleteAccountAccessRuleRespResult by calling from_dict on the json representation delete_account_access_rule_resp_result_model_dict = DeleteAccountAccessRuleRespResult.from_dict(delete_account_access_rule_resp_result_model_json).__dict__ delete_account_access_rule_resp_result_model2 = DeleteAccountAccessRuleRespResult(**delete_account_access_rule_resp_result_model_dict) # Verify the model instances are equivalent assert delete_account_access_rule_resp_result_model == delete_account_access_rule_resp_result_model2 # Convert model instance back to dict and verify no loss of data delete_account_access_rule_resp_result_model_json2 = delete_account_access_rule_resp_result_model.to_dict() assert delete_account_access_rule_resp_result_model_json2 == delete_account_access_rule_resp_result_model_json #----------------------------------------------------------------------------- # Test Class for ListAccountAccessRulesRespResultInfo #----------------------------------------------------------------------------- class TestListAccountAccessRulesRespResultInfo(): #-------------------------------------------------------- # Test serialization/deserialization for ListAccountAccessRulesRespResultInfo #-------------------------------------------------------- def test_list_account_access_rules_resp_result_info_serialization(self): # Construct a json representation of a ListAccountAccessRulesRespResultInfo model list_account_access_rules_resp_result_info_model_json = {} list_account_access_rules_resp_result_info_model_json['page'] = 1 list_account_access_rules_resp_result_info_model_json['per_page'] = 2 list_account_access_rules_resp_result_info_model_json['count'] = 1 list_account_access_rules_resp_result_info_model_json['total_count'] = 200 # Construct a model instance of ListAccountAccessRulesRespResultInfo by calling from_dict on the json representation list_account_access_rules_resp_result_info_model = ListAccountAccessRulesRespResultInfo.from_dict(list_account_access_rules_resp_result_info_model_json) assert list_account_access_rules_resp_result_info_model != False # Construct a model instance of ListAccountAccessRulesRespResultInfo by calling from_dict on the json representation list_account_access_rules_resp_result_info_model_dict = ListAccountAccessRulesRespResultInfo.from_dict(list_account_access_rules_resp_result_info_model_json).__dict__ list_account_access_rules_resp_result_info_model2 = ListAccountAccessRulesRespResultInfo(**list_account_access_rules_resp_result_info_model_dict) # Verify the model instances are equivalent assert list_account_access_rules_resp_result_info_model == list_account_access_rules_resp_result_info_model2 # Convert model instance back to dict and verify no loss of data list_account_access_rules_resp_result_info_model_json2 = list_account_access_rules_resp_result_info_model.to_dict() assert list_account_access_rules_resp_result_info_model_json2 == list_account_access_rules_resp_result_info_model_json #----------------------------------------------------------------------------- # Test Class for AccountAccessRuleObject #----------------------------------------------------------------------------- class TestAccountAccessRuleObject(): #-------------------------------------------------------- # Test serialization/deserialization for AccountAccessRuleObject #-------------------------------------------------------- def test_account_access_rule_object_serialization(self): # Construct dict forms of any model objects needed in order to build this model. account_access_rule_object_configuration_model = {} # AccountAccessRuleObjectConfiguration account_access_rule_object_configuration_model['target'] = 'ip' account_access_rule_object_configuration_model['value'] = 'ip example 198.51.100.4; ip_range example 198.51.100.4/16 ; asn example AS12345; country example AZ' account_access_rule_object_scope_model = {} # AccountAccessRuleObjectScope account_access_rule_object_scope_model['type'] = 'account' # Construct a json representation of a AccountAccessRuleObject model account_access_rule_object_model_json = {} account_access_rule_object_model_json['id'] = '92f17202ed8bd63d69a66b86a49a8f6b' account_access_rule_object_model_json['notes'] = 'This rule is set because of an event that occurred and caused X.' account_access_rule_object_model_json['allowed_modes'] = ['block'] account_access_rule_object_model_json['mode'] = 'block' account_access_rule_object_model_json['scope'] = account_access_rule_object_scope_model account_access_rule_object_model_json['created_on'] = '2020-01-28T18:40:40.123456Z' account_access_rule_object_model_json['modified_on'] = '2020-01-28T18:40:40.123456Z' account_access_rule_object_model_json['configuration'] = account_access_rule_object_configuration_model # Construct a model instance of AccountAccessRuleObject by calling from_dict on the json representation account_access_rule_object_model = AccountAccessRuleObject.from_dict(account_access_rule_object_model_json) assert account_access_rule_object_model != False # Construct a model instance of AccountAccessRuleObject by calling from_dict on the json representation account_access_rule_object_model_dict = AccountAccessRuleObject.from_dict(account_access_rule_object_model_json).__dict__ account_access_rule_object_model2 = AccountAccessRuleObject(**account_access_rule_object_model_dict) # Verify the model instances are equivalent assert account_access_rule_object_model == account_access_rule_object_model2 # Convert model instance back to dict
<filename>scripts/functions.py import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm from statsmodels.graphics.tsaplots import plot_acf, plot_pacf from statsmodels.tsa.arima_model import ARIMA import statsmodels.api as sm from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.model_selection import ( TimeSeriesSplit, cross_validate, cross_val_score, GridSearchCV, RandomizedSearchCV, ) from sklearn.metrics import mean_squared_error from sklearn.preprocessing import MinMaxScaler from lightgbm import LGBMRegressor from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM import tensorflow as tf from borrowed_functions import lag_df, ts_predict, plot_ts, hms_string SEED = 0 def split_data(data, num=30): """ Splits data into train, valid, test datasets Parameters: ----------- data: pandas.DataFrame the data to split num: int (default: 30) the number of observations in valid/test Return: -------- four pandas.DataFrame train, valid, test, train_valid datasets """ train_valid, test = ( data.iloc[: - num], data.iloc[- num:], ) train, valid = ( train_valid.iloc[: - num], train_valid.iloc[- num:] ) return train, valid, test, train_valid # class Splitter(): # def __init__(self, n_splits=5, n=30): # self.n_splits = n_splits # self.n = n # def split(self, data, y=None, groups=None): # splits = range(data.shape[0]) # splits = np.array_split(np.array(splits), self.n_splits) # splits = [(l[:- self.n], l[- self.n :]) for l in splits] # for X, y in splits: # yield X, y # def get_n_splits(self, X=None, y=None, groups=None): # return self.n_splits def plot_forecast(train, obs, fc_series, title, lower_series=None, upper_series=None): """ Plots forecast series and saves the plot Parameters: ----------- train: pandas.DataFrame the training data obs: pandas.DataFrame the observations to predict fc_series: pandas.Series the predicted series title: str the plot title lower_series: pandas.Series (default: None) the lower 95% confidential bound for predicted series upper_series: pandas.Series (default: None) the upper 95% confidential bound for predicted series """ # Plot plt.plot(train.close, label="training") plt.plot(obs.close, label="observation") plt.plot(fc_series, label="prediction") if lower_series is not None: plt.fill_between( lower_series.index, lower_series, upper_series, color="k", alpha=0.15 ) plt.title(f"{title} Observation vs Prediction") plt.xlabel('Exchange Rate') plt.ylabel('Day') plt.legend(loc="upper right", fontsize=16) plt.savefig(f'../results/{title.lower().replace(", ", "_").replace(" (", "_").replace("(", "_").replace(")", "").replace(" ", "_")}.png', bbox_inches='tight') plt.show() ############################################################################### ####################### Functions for ARIMA models ############################ ############################################################################### def arima_predict(model_fit, num, index, auto=False): """ Returns predictions from a ARIMA model Parameters: ----------- model_fit: ARIMA model the fitted ARIMA model num: int the number of observations to predict index: np.ndarray the index of predicted series auto: bool (default: False) whether the model is auto trained Return: -------- three pandas.Series prediction series, the lower 95% confidential bound for predicted series, the upper 95% confidential bound for predicted series """ if not auto: # Forecast with 95% conf fc, se, conf = model_fit.forecast(num, alpha=0.05) else: model_fit.plot_diagnostics(figsize=(10, 10)) plt.show() # Forecast try: fc, conf = model_fit.predict(n_periods=int(num), return_conf_int=True) except: fc = model_fit.predict(index[0], index[-1]) # Make as pandas series fc_series = pd.Series(fc, index=index) try: lower_series = pd.Series(conf[:, 0], index=index) upper_series = pd.Series(conf[:, 1], index=index) except: lower_series = None upper_series = None return fc_series, lower_series, upper_series def evaluate_model(pred, obs, index): """ Returns evaluation scores of MAPE, RMSE and Min-Max Error Parameters: ----------- pred: pandas.Series the predicted series obs: pandas.Series the observation series index: str the name of the model Return: -------- pandas.DataFrame evaluation scores """ scores = {} # Mean Absolute Percentage Error scores["MAPE"] = [np.mean(np.abs(pred - obs) / np.abs(obs))] # Root Mean Squared Error scores["RMSE"] = [np.mean((pred - obs) ** 2) ** 0.5] mins = np.amin(np.hstack([pred[:, None], obs[:, None]]), axis=1) maxs = np.amax(np.hstack([pred[:, None], obs[:, None]]), axis=1) scores["Min-Max Error"] = [1 - np.mean(mins / maxs)] return pd.DataFrame(scores, index=[index]) def plot_trans_train(trans_train, trans): """ Plot transformed data Parameters: ----------- trans_train: pandas.DataFrame the transformed data trans: str the transformation method """ plt.figure(figsize=(12, 5), dpi=100) plt.plot(trans_train.close, label="transformed train") plt.title(f"Train after differencing and {trans} transformation") plt.legend(loc="upper left", fontsize=8) plt.show() ############################################################################### ############## Functions for classic supervised learning models ############### ############################################################################### def cross_validation(key, n_splits, response_col, trans_train, param_grid): """ Returns the best model using cross-validation Parameters: ----------- key: str the model name n_splits: int the number of data splits response_col: list the name of the response column trans_train: pandas.DataFrame the transformed data param_grid: dict the hyperparameters and corresponding values to try Return: -------- machine learning model, int the trained best regressor, the number of lag used in the model """ regressor = { "RandomForestRegressor": RandomForestRegressor(random_state=SEED), "KNeighborsRegressor": KNeighborsRegressor(), "GradientBoostingRegressor": GradientBoostingRegressor(random_state=SEED), "LGBMRegressor": LGBMRegressor(random_state=SEED), } model = regressor[key] # tscv = Splitter(n_splits=n_splits) tscv = TimeSeriesSplit(n_splits=n_splits) l = [] cv_mean = [] cv_std = [] for lag in range(1, 21): df_lag = lag_df(trans_train, lag, response_col).dropna() cv_score = cross_validate( model, df_lag.drop(columns=response_col), df_lag[response_col[0]], cv=tscv, scoring="neg_root_mean_squared_error", ) l.append(lag) cv_mean.append(round(cv_score["test_score"].mean(), 3)) cv_std.append(round(cv_score["test_score"].std(), 3)) results = ( pd.DataFrame({"lag": l, "cv_mean": cv_mean, "cv_std": cv_std}) .set_index("lag") .sort_values(by="cv_mean", ascending=False) .head(5) ) print(results) lag = results.index[0] df_lag = lag_df(trans_train, lag, response_col).dropna() model = GridSearchCV( model, param_grid, scoring="neg_root_mean_squared_error", cv=tscv ) model.fit(df_lag.drop(columns=response_col), df_lag[response_col[0]]) print(f"The best hyperparameters when lag = {lag}:\n{model.best_params_}") return model, lag def regressor_predict( model, train_trans, lag, response_col, index, start, log=True, lamb=None ): """ Returns predictions from a regressor Parameters: ----------- model: machine learning model the regressor for prediction trans_train: pandas.DataFrame the transformed data lag: int the number of lag response_col: list the name of the response column index: np.ndarray the index of predicted series start: float the first input data for prediction log: bool (default: True) whether log transformation is used lamb: float (default: None) the lamb used for Box-Cox transformation Return: -------- pandas.Series prediction series """ df_lag = lag_df(train_trans, lag, response_col).dropna() # starting data for first prediction input_data = df_lag.iloc[-1, : lag].to_numpy() predictions = ts_predict(input_data, model, len(index)) if log: predict = start * np.exp(np.cumsum(predictions)) else: predict = np.exp(np.log(lamb * (start + np.cumsum(predictions)) + 1) / lamb) return pd.Series(predict, index=index) def analyze_regressor( regressor, title, train, test, n_splits, response_col, trans_train, param_grid, index, start, log=True, lamb=None, ): """ Returns the best regressor and evaluation scores Parameters: ----------- regressor: str the model name title: str the plot title train: pandas.DataFrame the training data test: pandas.DataFrame the testing data n_splits: int the number of data splits response_col: list the name of the response column trans_train: pandas.DataFrame the transformed data param_grid: dict the hyperparameters and corresponding values to try index: np.ndarray the index of predicted series start: float the first input data for prediction log: bool (default: True) whether log transformation is used lamb: float (default: None) the lamb used for Box-Cox transformation Return: -------- machine learning model, pandas.DataFrame the regressor, evaluation scores """ print( f"Performing cross-validation to optimzie the lag and", f"hyperparameters for the {regressor} regressor ...", ) model, lag = cross_validation( regressor, n_splits, response_col, trans_train, param_grid ) predict = regressor_predict( model, trans_train, lag, response_col, index, start, log=log, lamb=lamb, ) plot_forecast(train, test, predict, title) scores = evaluate_model(predict, test.squeeze(), title) return model, scores ############################################################################### ########################## Functions for LSTM models ########################## ############################################################################### def reshape_data(data): """ Reshapes data for training a LSTM model Parameters: ----------- data: np.ndarray the data to reshape Return: -------- np.ndarray, np.ndarray explanatory data, response data """ X = np.reshape(data[:, 1:], (data.shape[0], 1, data.shape[1] - 1)) return X, data[:, 0] def train_lstm(train_X, train_y, lag, epochs, verbose=0): """ Returns a trained LSTM model Parameters: ----------- train_X: np.ndarray the explanatory data to train train_y: np.ndarray the response data to train lag: int the number of lag epochs: int the number of epochs for training verbose: int (default: 0) the print options of model.fit Return: -------- tf.keras.Model the LSTM model """ # create and fit the LSTM network model = Sequential() # model.add(Dense(lag + 1, activation="relu")) model.add(LSTM(lag + 1, input_shape=(1, lag))) # model.add(Dense(1, activation="relu")) model.add(Dense(1)) model.compile(loss="mean_squared_error", optimizer="adam") model.fit(train_X, train_y, epochs=epochs, batch_size=1, verbose=verbose) return model def lstm_predict(start, model, scaler, index, lag, n=30, responses=1): """ Returns the LSTM model predictions Parameters: ----------- start: float the first input data for prediction model: tf.keras.Model the LSTM model scaler: sklearn.preprocessing.MinMaxScaler the scaler used for data transformation index: np.ndarray the index of predicted series lag: int the number of lag n: int (default: 30) the number of predictions responses: int (default: 1) the number of reponses Return: -------- pandas.Series prediction series """ predict = ts_predict(start, model, n=n, responses=responses, lag=lag) # invert predictions predict = scaler.inverse_transform(predict) fc_series = pd.Series(predict.flatten(), index=index) return fc_series def get_inversed_data(data, scaler): """ Returns inversed data Parameters: ----------- data: pandas.DataFrame the data
value): """ The SNMP Write Community String is like a password. It is sent along with each SNMP Set-Request and allows (or denies) chaning MIBs values. :type value: string """ self.attributes['ApV2.SNMP Write Community'] = value @property def snmp_v3_user(self): """ :rtype: str """ return self.attributes['ApV2.SNMP V3 User'] if 'ApV2.SNMP V3 User' in self.attributes else None @snmp_v3_user.setter def snmp_v3_user(self, value): """ Relevant only in case SNMP V3 is in use. :type value: str """ self.attributes['ApV2.SNMP V3 User'] = value @property def snmp_v3_password(self): """ :rtype: string """ return self.attributes['ApV2.SNMP V3 Password'] if 'ApV2.SNMP V3 Password' in self.attributes else None @snmp_v3_password.setter def snmp_v3_password(self, value): """ Relevant only in case SNMP V3 is in use. :type value: string """ self.attributes['ApV2.SNMP V3 Password'] = value @property def snmp_v3_private_key(self): """ :rtype: str """ return self.attributes['ApV2.SNMP V3 Private Key'] if 'ApV2.SNMP V3 Private Key' in self.attributes else None @snmp_v3_private_key.setter def snmp_v3_private_key(self, value): """ Relevant only in case SNMP V3 is in use. :type value: str """ self.attributes['ApV2.SNMP V3 Private Key'] = value @property def snmp_v3_authentication_protocol(self): """ :rtype: str """ return self.attributes['ApV2.SNMP V3 Authentication Protocol'] if 'ApV2.SNMP V3 Authentication Protocol' in self.attributes else None @snmp_v3_authentication_protocol.setter def snmp_v3_authentication_protocol(self, value='No Authentication Protocol'): """ Relevant only in case SNMP V3 is in use. :type value: str """ self.attributes['ApV2.SNMP V3 Authentication Protocol'] = value @property def snmp_v3_privacy_protocol(self): """ :rtype: str """ return self.attributes['ApV2.SNMP V3 Privacy Protocol'] if 'ApV2.SNMP V3 Privacy Protocol' in self.attributes else None @snmp_v3_privacy_protocol.setter def snmp_v3_privacy_protocol(self, value='No Privacy Protocol'): """ Relevant only in case SNMP V3 is in use. :type value: str """ self.attributes['ApV2.SNMP V3 Privacy Protocol'] = value @property def snmp_version(self): """ :rtype: str """ return self.attributes['ApV2.SNMP Version'] if 'ApV2.SNMP Version' in self.attributes else None @snmp_version.setter def snmp_version(self, value=''): """ The version of SNMP to use. Possible values are v1, v2c and v3. :type value: str """ self.attributes['ApV2.SNMP Version'] = value @property def enable_snmp(self): """ :rtype: bool """ return self.attributes['ApV2.Enable SNMP'] if 'ApV2.Enable SNMP' in self.attributes else None @enable_snmp.setter def enable_snmp(self, value=True): """ If set to True and SNMP isn???t enabled yet in the device the Shell will automatically enable SNMP in the device when Autoload command is called. SNMP must be enabled on the device for the Autoload command to run successfully. True by default. :type value: bool """ self.attributes['ApV2.Enable SNMP'] = value @property def disable_snmp(self): """ :rtype: bool """ return self.attributes['ApV2.Disable SNMP'] if 'ApV2.Disable SNMP' in self.attributes else None @disable_snmp.setter def disable_snmp(self, value=False): """ If set to True SNMP will be disabled automatically by the Shell after the Autoload command execution is completed. False by default. :type value: bool """ self.attributes['ApV2.Disable SNMP'] = value @property def console_server_ip_address(self): """ :rtype: str """ return self.attributes['ApV2.Console Server IP Address'] if 'ApV2.Console Server IP Address' in self.attributes else None @console_server_ip_address.setter def console_server_ip_address(self, value): """ The IP address of the console server, in IPv4 format. :type value: str """ self.attributes['ApV2.Console Server IP Address'] = value @property def console_user(self): """ :rtype: str """ return self.attributes['ApV2.Console User'] if 'ApV2.Console User' in self.attributes else None @console_user.setter def console_user(self, value): """ :type value: str """ self.attributes['ApV2.Console User'] = value @property def console_port(self): """ :rtype: float """ return self.attributes['ApV2.Console Port'] if 'ApV2.Console Port' in self.attributes else None @console_port.setter def console_port(self, value): """ The port on the console server, usually TCP port, which the device is associated with. :type value: float """ self.attributes['ApV2.Console Port'] = value @property def console_password(self): """ :rtype: string """ return self.attributes['ApV2.Console Password'] if 'ApV2.Console Password' in self.attributes else None @console_password.setter def console_password(self, value): """ :type value: string """ self.attributes['ApV2.Console Password'] = value @property def cli_connection_type(self): """ :rtype: str """ return self.attributes['ApV2.CLI Connection Type'] if 'ApV2.CLI Connection Type' in self.attributes else None @cli_connection_type.setter def cli_connection_type(self, value='Auto'): """ The CLI connection type that will be used by the driver. Possible values are Auto, Console, SSH, Telnet and TCP. If Auto is selected the driver will choose the available connection type automatically. Default value is Auto. :type value: str """ self.attributes['ApV2.CLI Connection Type'] = value @property def cli_tcp_port(self): """ :rtype: float """ return self.attributes['ApV2.CLI TCP Port'] if 'ApV2.CLI TCP Port' in self.attributes else None @cli_tcp_port.setter def cli_tcp_port(self, value): """ TCP Port to user for CLI connection. If kept empty a default CLI port will be used based on the chosen protocol, for example Telnet will use port 23. :type value: float """ self.attributes['ApV2.CLI TCP Port'] = value @property def backup_location(self): """ :rtype: str """ return self.attributes['ApV2.Backup Location'] if 'ApV2.Backup Location' in self.attributes else None @backup_location.setter def backup_location(self, value): """ Used by the save/restore orchestration to determine where backups should be saved. :type value: str """ self.attributes['ApV2.Backup Location'] = value @property def backup_type(self): """ :rtype: str """ return self.attributes['ApV2.Backup Type'] if 'ApV2.Backup Type' in self.attributes else None @backup_type.setter def backup_type(self, value='File System'): """ Supported protocols for saving and restoring of configuration and firmware files. Possible values are 'File System' 'FTP' and 'TFTP'. Default value is 'File System'. :type value: str """ self.attributes['ApV2.Backup Type'] = value @property def backup_user(self): """ :rtype: str """ return self.attributes['ApV2.Backup User'] if 'ApV2.Backup User' in self.attributes else None @backup_user.setter def backup_user(self, value): """ Username for the storage server used for saving and restoring of configuration and firmware files. :type value: str """ self.attributes['ApV2.Backup User'] = value @property def backup_password(self): """ :rtype: string """ return self.attributes['ApV2.Backup Password'] if 'ApV2.Backup Password' in self.attributes else None @backup_password.setter def backup_password(self, value): """ Password for the storage server used for saving and restoring of configuration and firmware files. :type value: string """ self.attributes['ApV2.Backup Password'] = value @property def name(self): """ :rtype: str """ return self._name @name.setter def name(self, value): """ :type value: str """ self._name = value @property def cloudshell_model_name(self): """ :rtype: str """ return self._cloudshell_model_name @cloudshell_model_name.setter def cloudshell_model_name(self, value): """ :type value: str """ self._cloudshell_model_name = value @property def system_name(self): """ :rtype: str """ return self.attributes['CS_GenericResource.System Name'] if 'CS_GenericResource.System Name' in self.attributes else None @system_name.setter def system_name(self, value): """ A unique identifier for the device, if exists in the device terminal/os. :type value: str """ self.attributes['CS_GenericResource.System Name'] = value @property def vendor(self): """ :rtype: str """ return self.attributes['CS_GenericResource.Vendor'] if 'CS_GenericResource.Vendor' in self.attributes else None @vendor.setter def vendor(self, value=''): """ The name of the device manufacture. :type value: str """ self.attributes['CS_GenericResource.Vendor'] = value @property def contact_name(self): """ :rtype: str """ return self.attributes['CS_GenericResource.Contact Name'] if 'CS_GenericResource.Contact Name' in self.attributes else None @contact_name.setter def contact_name(self, value): """ The name of a contact registered in the device. :type value: str """ self.attributes['CS_GenericResource.Contact Name'] = value @property def location(self): """ :rtype: str """ return self.attributes['CS_GenericResource.Location'] if 'CS_GenericResource.Location' in self.attributes else None @location.setter def location(self, value=''): """ The device physical location identifier. For example Lab1/Floor2/Row5/Slot4. :type value: str """ self.attributes['CS_GenericResource.Location'] = value @property def model(self): """ :rtype: str """ return self.attributes['CS_GenericResource.Model'] if 'CS_GenericResource.Model' in self.attributes else None @model.setter def model(self, value=''): """ The device model. This information is typically used for abstract resource filtering. :type value: str """ self.attributes['CS_GenericResource.Model'] = value @property def model_name(self): """ :rtype: str """ return self.attributes['CS_GenericResource.Model Name'] if 'CS_GenericResource.Model Name' in self.attributes else None @model_name.setter def model_name(self, value=''): """ The catalog name of the device model. This attribute will be displayed in CloudShell instead of the CloudShell model. :type value: str """ self.attributes['CS_GenericResource.Model Name'] = value class ResourcePort(object): def __init__(self, name): """ """ self.attributes = {} self.resources = {} self._cloudshell_model_name = 'ApV2.ResourcePort' self._name = name def add_sub_resource(self, relative_path, sub_resource): self.resources[relative_path] = sub_resource @classmethod def create_from_context(cls, context): """ Creates an instance of NXOS by given context :param context: cloudshell.shell.core.driver_context.ResourceCommandContext :type context: cloudshell.shell.core.driver_context.ResourceCommandContext :return: :rtype ResourcePort """ result = ResourcePort(name=context.resource.name) for attr in context.resource.attributes: result.attributes[attr] = context.resource.attributes[attr] return result def create_autoload_details(self, relative_path=''): """ :param relative_path: :type relative_path: str :return """ resources = [AutoLoadResource(model=self.resources[r].cloudshell_model_name, name=self.resources[r].name, relative_address=self._get_relative_path(r, relative_path)) for r in self.resources] attributes = [AutoLoadAttribute(relative_path, a, self.attributes[a]) for a in self.attributes] autoload_details = AutoLoadDetails(resources, attributes) for r in self.resources: curr_path =
# Copyright (c) 2016 Baidu, Inc. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from py_paddle import swig_paddle, DataProviderWrapperConverter from paddle.trainer.PyDataProviderWrapper import DenseSlot from paddle.trainer.config_parser import parse_config TEST_DATA = [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.215686, 0.533333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.67451, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.886275, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.192157, 0.070588, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.670588, 0.992157, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.117647, 0.933333, 0.858824, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.090196, 0.858824, 0.992157, 0.831373, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.141176, 0.992157, 0.992157, 0.611765, 0.054902, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.258824, 0.992157, 0.992157, 0.529412, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.368627, 0.992157, 0.992157, 0.419608, 0.003922, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.094118, 0.835294, 0.992157, 0.992157, 0.517647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.603922, 0.992157, 0.992157, 0.992157, 0.603922, 0.545098, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0.447059, 0.992157, 0.992157, 0.956863, 0.062745, 0, 0, 0, 0, 0, 0, 0, 0, 0.011765, 0.666667, 0.992157, 0.992157, 0.992157, 0.992157, 0.992157, 0.745098, 0.137255, 0, 0, 0, 0, 0, 0.152941, 0.866667, 0.992157, 0.992157, 0.521569, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.992157, 0.803922, 0.352941, 0.745098, 0.992157, 0.945098, 0.317647, 0, 0, 0, 0, 0.580392, 0.992157, 0.992157, 0.764706, 0.043137, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.776471, 0.043137, 0, 0.007843, 0.27451, 0.882353, 0.941176, 0.176471, 0, 0, 0.180392, 0.898039, 0.992157, 0.992157, 0.313725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.070588, 0.992157, 0.992157, 0.713725, 0, 0, 0, 0, 0.627451, 0.992157, 0.729412, 0.062745, 0, 0.509804, 0.992157, 0.992157, 0.776471, 0.035294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.494118, 0.992157, 0.992157, 0.968627, 0.168627, 0, 0, 0, 0.423529, 0.992157, 0.992157, 0.364706, 0, 0.717647, 0.992157, 0.992157, 0.317647, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.533333, 0.992157, 0.984314, 0.945098, 0.603922, 0, 0, 0, 0.003922, 0.466667, 0.992157, 0.988235, 0.976471, 0.992157, 0.992157, 0.788235, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.686275, 0.882353, 0.364706, 0, 0, 0, 0, 0, 0, 0.098039, 0.588235, 0.992157, 0.992157, 0.992157, 0.980392, 0.305882, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.101961, 0.67451, 0.321569, 0, 0, 0, 0, 0, 0, 0, 0.105882, 0.733333, 0.976471, 0.811765, 0.713725, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.65098, 0.992157, 0.321569, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.25098, 0.007843, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0.94902, 0.219608, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.968627, 0.764706, 0.152941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.498039, 0.25098, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.298039, 0.333333, 0.333333, 0.333333, 0.337255, 0.333333, 0.333333, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.027451, 0.223529, 0.776471, 0.964706, 0.988235, 0.988235, 0.988235, 0.992157, 0.988235, 0.988235, 0.780392, 0.098039, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.14902, 0.698039, 0.988235, 0.992157, 0.988235, 0.901961, 0.87451, 0.568627, 0.882353, 0.976471, 0.988235, 0.988235, 0.501961, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.188235, 0.647059, 0.988235, 0.988235, 0.745098, 0.439216, 0.098039, 0, 0, 0, 0.572549, 0.988235, 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.933333, 0.992157, 0.941176, 0.247059, 0, 0, 0, 0, 0, 0, 0.188235, 0.898039, 0.992157, 0.992157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.039216, 0.639216, 0.933333, 0.988235, 0.913725, 0.278431, 0, 0, 0, 0, 0, 0, 0, 0.113725, 0.843137, 0.988235, 0.988235, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.235294, 0.988235, 0.992157, 0.988235, 0.815686, 0.07451, 0, 0, 0, 0, 0, 0, 0, 0.333333, 0.988235, 0.988235, 0.552941, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.211765, 0.878431, 0.988235, 0.992157, 0.701961, 0.329412, 0.109804, 0, 0, 0, 0, 0, 0, 0, 0.698039, 0.988235, 0.913725, 0.145098, 0, 0, 0, 0, 0, 0, 0, 0, 0,
import numpy as np import xarray as xr from . import Tidegauge, general_utils import matplotlib.dates as mdates import utide as ut import scipy.signal as signal from coast import stats_util, crps_util class TidegaugeAnalysis: """ This contains analysis methods suitable for use with the dataset structure of Tidegauge() """ def __init__(self): return @classmethod def match_missing_values(cls, data_array1, data_array2, fill_value=np.nan): """ Will match any missing values between two tidegauge_multiple datasets. Where missing values (defined by fill_value) are found in either dataset they are also placed in the corresponding location in the other dataset. Returns two new tidegaugeMultiple objects containing the new ssh data. Datasets must contain ssh variables and only ssh will be masked. """ if data_array2.dims[0] == "t_dim": data_array2 = data_array2.transpose() if data_array1.dims[0] == "t_dim": data_array1 = data_array1.transpose() if np.isnan(fill_value): ind1 = np.isnan(data_array1.values) ind2 = np.isnan(data_array2.values) else: ind1 = data_array1.values == fill_value ind2 = data_array2.values == fill_value ds1 = data_array1.where(~ind2) ds2 = data_array2.where(~ind1) return Tidegauge(dataset=ds1.to_dataset()), Tidegauge(dataset=ds2.to_dataset()) @classmethod def harmonic_analysis_utide( cls, data_array, min_datapoints=1000, nodal=False, trend=False, method="ols", conf_int="linear", Rayleigh_min=0.95, ): """ Does a harmonic analysis for each timeseries inside this object using the utide library. All arguments except min_datapoints are arguments that are passed to ut.solve(). Please see the utide website for more information: https://pypi.org/project/UTide/ Utide will by default do it's harmonic analysis using a set of harmonics determined using the Rayleigh criterion. This changes the number of harmonics depending on the length and frequency of the time series. Output from this routine is not a new dataset, but a list of utide analysis object. These are structures containing, amongst other things, amplitudes, phases, constituent names and confidence intervals. This list can be passed to reconstruct_tide_utide() in this object to create a new TidegaugeMultiple object containing reconstructed tide data. INPUTS data_array : Xarray data_array from a coast.Tidegauge() object e.g. tidegauge.dataset.ssh min_datapoints : If a time series has less than this value number of datapoints, then omit from the analysis. <all_others> : Inputs to utide.solve(). See website above. OUTPUTS A list of utide structures from the solve() routine. If a location is omitted, it will contain [] for it's entry. """ # Make name shorter for computations and get dimension lengths ds = data_array n_port = ds.sizes["id_dim"] n_time = ds.sizes["t_dim"] # Harmonic analysis datenums -- for utide to work correctly time = mdates.date2num(ds.time.values) # Create empty list of analyses analyses = [] # Loop over ports for pp in range(0, n_port): # Temporary in-loop datasets ds_port = ds.isel(id_dim=pp).load() number_of_nan = np.sum(np.isnan(ds_port.values)) # If not enough datapoints for analysis then append an empty list if (n_time - number_of_nan) < min_datapoints: analyses.append([]) continue # Do harmonic analysis using UTide uts_obs = ut.solve( time, ds_port.values, lat=ds_port.latitude.values, nodal=nodal, trend=trend, method=method, conf_int=conf_int, Rayleigh_min=Rayleigh_min, ) analyses.append(uts_obs) return analyses @classmethod def reconstruct_tide_utide(cls, data_array, utide_solution_list, constit=None, output_name="reconstructed"): """ Use the tarray of times to reconstruct a time series series using a list of utide analysis objects. This list can be obtained using harmonic_analysis_utide(). Specify constituents to use in the reconstruction by passing a list of strings such as 'M2' to the constit argument. This won't work if a specified constituent is not present in the analysis. """ # Get dimension lengths n_port = len(utide_solution_list) n_time = len(data_array.time) # Harmonic analysis datenums -- needed for utide time = mdates.date2num(data_array.time) # Get coordinates from data_array and convert to Dataset for output reconstructed = np.zeros((n_port, n_time)) * np.nan # Loop over ports for pp in np.arange(n_port): # Reconstruct full tidal signal using utide pp_solution = utide_solution_list[pp] if len(pp_solution) == 0: continue # Call utide.reconstruct tide = np.array(ut.reconstruct(time, pp_solution, constit=constit).h) reconstructed[pp] = tide # Create output dataset and return it in new Tidegauge object. ds_out = xr.Dataset(data_array.coords) ds_out[output_name] = (["id_dim", "t_dim"], reconstructed) return Tidegauge(dataset=ds_out) @classmethod def calculate_non_tidal_residuals( cls, data_array_ssh, data_array_tide, apply_filter=True, window_length=25, polyorder=3 ): """ Calculate non tidal residuals by subtracting values in data_array_tide from data_array_ssh. You may optionally apply a filter to the non tidal residual data by setting apply_filter = True. This uses the scipy.signal.savgol_filter function, which you ay pass window_length and poly_order. """ # NTR: Calculate non tidal residuals ntr = data_array_ssh - data_array_tide n_port = data_array_ssh.sizes["id_dim"] # NTR: Apply filter if wanted if apply_filter: for pp in range(n_port): ntr[pp, :] = signal.savgol_filter(ntr[pp, :], 25, 3) # Create output Tidegauge object and return ds_coords = data_array_ssh.coords.to_dataset() ds_coords["ntr"] = ntr return Tidegauge(dataset=ds_coords) @classmethod def threshold_statistics(cls, dataset, thresholds=np.arange(-0.4, 2, 0.1), peak_separation=12): """ Do some threshold statistics for all variables with a time dimension inside this tidegauge_multiple object. Specifically, this routine will calculate: peak_count : The number of indepedent peaks over each specified threshold. Independent peaks are defined using the peak_separation argument. This is the number of datapoints either side of a peak within which data is ommited for further peak search. time_over_threshold : The total time spent over each threshold This is NOT an integral, but simple a count of all points over threshold. dailymax_count : A count of the number of daily maxima over each threshold monthlymax_count : A count of the number of monthly maxima over each threshold. Output is a xarray dataset containing analysed variables. The name of each analysis variable is constructed using the original variable name and one of the above analysis categories. """ # Set up working datasets and lists ds = dataset ds_thresh = xr.Dataset(ds.coords) ds_thresh["threshold"] = ("threshold", thresholds) var_list = list(ds.keys()) n_thresholds = len(thresholds) n_port = ds.sizes["id_dim"] # Loop over vars in the input dataset for vv in var_list: empty_thresh = np.zeros((n_port, n_thresholds)) * np.nan ds_thresh["peak_count_" + vv] = (["id_dim", "threshold"], np.array(empty_thresh)) ds_thresh["time_over_threshold_" + vv] = (["id_dim", "threshold"], np.array(empty_thresh)) ds_thresh["dailymax_count_" + vv] = (["id_dim", "threshold"], np.array(empty_thresh)) ds_thresh["monthlymax_count_" + vv] = (["id_dim", "threshold"], np.array(empty_thresh)) for pp in range(n_port): # Identify NTR peaks for threshold analysis data_pp = ds[vv].isel(id_dim=pp) if np.sum(np.isnan(data_pp.values)) == ds.sizes["t_dim"]: continue pk_ind, _ = signal.find_peaks(data_pp.values.copy(), distance=peak_separation) pk_values = data_pp[pk_ind] # Threshold Analysis for nn in range(0, n_thresholds): # Calculate daily and monthly maxima for threshold analysis ds_daily = data_pp.groupby("time.day") ds_daily_max = ds_daily.max(skipna=True) ds_monthly = data_pp.groupby("time.month") ds_monthly_max = ds_monthly.max(skipna=True) threshn = thresholds[nn] # NTR: Threshold Frequency (Peaks) ds_thresh["peak_count_" + vv][pp, nn] = np.sum(pk_values >= threshn) # NTR: Threshold integral (Time over threshold) ds_thresh["time_over_threshold_" + vv][pp, nn] = np.sum(data_pp >= threshn) # NTR: Number of daily maxima over threshold ds_thresh["dailymax_count_" + vv][pp, nn] = np.sum(ds_daily_max.values >= threshn) # NTR: Number of monthly maxima over threshold ds_thresh["monthlymax_count_" + vv][pp, nn] = np.sum(ds_monthly_max.values >= threshn) return ds_thresh @staticmethod def demean_timeseries(dataset): """ Subtract time means from all variables within this tidegauge_multiple object. This is done independently for each id_dim location. """ demeaned = dataset - dataset.mean(dim="t_dim") return Tidegauge(dataset=demeaned) @classmethod def difference(cls, dataset1, dataset2, absolute_diff=True, square_diff=True): """ Calculates differences between two tide gauge objects datasets. Will calculate differences, absolute differences and square differences between all common variables within each object. Each object should have the same sized dimensions. When calling this routine, the differencing is done as follows: dataset1.difference(dataset2) This will do dataset1 - dataset2. Output is a new tidegauge object containing differenced variables. """ # Get all differences and save coordintes for later differenced = dataset1 - dataset2 diff_vars = list(differenced.keys()) save_coords = list(dataset1.coords.keys()) # Loop oer all variables for vv in diff_vars: differenced = differenced.rename({vv: "diff_" + vv}) # Calculate absolute differences maybe if absolute_diff: abs_tmp = np.fabs(differenced) diff_vars = list(abs_tmp.keys()) for vv in diff_vars: abs_tmp = abs_tmp.rename({vv: "abs_" + vv}) else: abs_tmp = xr.Dataset() # Calculate squared differences maybe if square_diff: sq_tmp = np.square(differenced) diff_vars = list(sq_tmp.keys()) for vv in diff_vars: sq_tmp = sq_tmp.rename({vv: "square_" + vv}) else: sq_tmp = xr.Dataset() # Merge all differences into one differenced = xr.merge((differenced, abs_tmp, sq_tmp, dataset1[save_coords])) return Tidegauge(dataset=differenced) @staticmethod def find_high_and_low_water(data_array, method="comp", **kwargs): """ Finds high and low water for a given variable. Returns in a new TIDEGAUGE object with similar data format to a TIDETABLE. If this Tidegauge object contains more than one location (id_dim > 1) then a list
MangadexClient """ with open(file_name, "r", encoding="utf-8") as file: data = load(file) username = data.get("username", None) password = data.get("password", None) refresh_token = data.get("refresh_token", None) api_url = data.get("api_url", DEFAULT_API_URL) anonymous = cls._boolean(str(data.get("anonymous", "false")).lower()) sleep_on_ratelimit = cls._boolean(str(data.get("sleep_on_ratelimit", "true")).lower()) return cls( username=username, password=password, refresh_token=refresh_token, sleep_on_ratelimit=sleep_on_ratelimit, api_url=api_url, anonymous=anonymous, ) # Dunder methods def __init__( self, *, username: Optional[str] = None, password: Optional[str] = None, refresh_token: Optional[str] = None, sleep_on_ratelimit: bool = True, session: aiohttp.ClientSession = None, api_url: str = DEFAULT_API_URL, anonymous: bool = False, **session_kwargs, ): self.username = username self.password = password if (username, password).count(None) == 1: raise ValueError("Either both username and password have to be specified or neither have to be specified.") self.refresh_token = refresh_token self.sleep_on_ratelimit = sleep_on_ratelimit self.api_base = api_url self.session = session or aiohttp.ClientSession(**session_kwargs) self.anonymous_mode = anonymous or not (username or password or refresh_token) if anonymous: self.username = self.password = self.refresh_token = None self.ratelimits = Ratelimits(*ratelimit_data) self.tag_cache = TagDict() self.user = ClientUser(self) self._request_count = 0 self._request_second_start = datetime.utcnow() # Use utcnow to keep everything using UTF+0 and also helps # with daylight savings. self._request_lock = asyncio.Lock() self._session_token: Optional[str] = None self._session_token_acquired: Optional[datetime] = datetime(year=2000, month=1, day=1) # This is the time when the token is acquired. The client will automatically vacate the token at 15 minutes # and 10 seconds. self._request_tried_refresh_token = False # This is needed so the request method does not enter an infinite loop with the refresh token async def __aenter__(self): """Allow the client to be used with ``async with`` syntax similar to :class:`aiohttp.ClientSession`.""" await self.session.__aenter__() return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType] ): """Exit the client. This will also close the underlying session object.""" self.username = self.password = self.refresh_token = self.session_token = None self.anonymous_mode = True await self.session.__aexit__(exc_type, exc_val, exc_tb) def __repr__(self) -> str: """Provide a string representation of the client. :return: The string representation :rtype: str """ return f"{type(self).__name__}(anonymous={self.anonymous_mode!r}, username={self.username!r})" # Request methods async def request( self, method: str, url: str, *, params: Optional[Mapping[str, Optional[Union[str, Sequence[str], bool, float]]]] = None, json: Any = None, with_auth: bool = True, retries: int = 3, allow_non_successful_codes: bool = False, add_includes: bool = False, **session_request_kwargs, ) -> aiohttp.ClientResponse: """Perform a request. .. warning:: All requests have to be released, otherwise connections will not be reused. Make sure to call :meth:`aiohttp.ClientResponse.release` on the object returned by the method if you do not read data from the response. .. note:: The request method will log all URLs that are requested. Enable logging on the ``asyncdex`` logger to view them. These requests are made under the ``INFO`` level. Retries are also logged on the ``WARNING`` level. .. versionchanged:: 0.3 Added a global (shared between all requests made in the client) ratelimit. .. versionchanged:: 0.4 Added better handling of string items. :param method: The HTTP method to use for the request. :type method: str :param url: The URL to use for the request. May be either an absolute URL or a URL relative to the base MangaDex API URL. :type url: str :param params: Optional query parameters to append to the URL. If one of the values of the parameters is an array, the elements will be automatically added to the URL in the order that the array elements appear in. :type params: Mapping[str, Union[str, Sequence[str]]] :param json: JSON data to pass in a POST request. :type json: Any :param with_auth: Whether or not to append the session token to the request headers. Requests made without the header will behave as if the client is in anonymous mode. Defaults to ``True``. :type with_auth: bool :param retries: The amount of times to retry. The function will recursively call itself, subtracting ``1`` from the original count until retries run out. :type retries: int :param allow_non_successful_codes: Whether or not to allow non-success codes (4xx codes that aren't 401/429) to pass through instead of raising an error. Defaults to ``False``. :type allow_non_successful_codes: bool :param add_includes: Whether or not to add the list of allowed reference expansions to the request. Defaults to ``False``. :type add_includes: bool :param session_request_kwargs: Optional keyword arguments to pass to :meth:`aiohttp.ClientSession.request`. :raises: :class:`.Unauthorized` if the endpoint requires authentication and sufficient parameters for authentication were not provided to the client. :raises: :class`aiohttp.ClientResponseError` if the response is a 4xx or 5xx code after multiple retries or if it will not be retried and ``allow_non_successful_codes`` is ``False``. :return: The response. :rtype: aiohttp.ClientResponse """ if url.startswith("/"): # Add the base URL if the base URL is not an absolute URL. url = self.api_base + url params = dict(params) if params else {} if add_includes: includes = [] for key, val in permission_model_mapping.items(): if self.user.permission_check(key): includes.append(val) params["includes"] = includes if params: # Strategy: Put all the parts into a list, and then use "&".join(<arr>) to add all the parts together param_parts = [] for name, value in params.items(): if not isinstance(value, str) and hasattr(value, "__iter__"): for item in value: param_parts.append(f"{name}[]={item}") elif isinstance(value, str): param_parts.append(f"{name}={value}") else: param_parts.append(f"{name}={convert_obj_to_json(value)}") url += "?" + "&".join(param_parts) headers = {} if with_auth and not self.anonymous_mode: if self.session_token is None: await self.get_session_token() headers["Authorization"] = f"Bearer {self.session_token}" path_obj = None if url.startswith(self.api_base): # We only want the ratelimit to only apply to the API urls. async with self._request_lock: # I decided not to throw exceptions for these 1-second ratelimits. self._request_count += 1 time_now = datetime.utcnow() time_difference = (time_now - self._request_second_start).total_seconds() if time_difference <= 1.25 and self._request_count >= 5: # Hopefully this will stop excess retries # which cripple pagers. logger.warning("Sleeping for 1.25 seconds.") await asyncio.sleep(1.25) elif time_difference > 1: self._request_count = 0 self._request_second_start = time_now if self.sleep_on_ratelimit: path_obj = await self.ratelimits.sleep(remove_prefix(self.api_base, url), method) else: time_to_sleep, path_obj = await self.ratelimits.check(remove_prefix(self.api_base, url), method) if time_to_sleep > 0 and path_obj: raise Ratelimit(path_obj.path.name, path_obj.ratelimit_amount, path_obj.ratelimit_expires) logger.info("Making %s request to %s", method, url) resp = await self.session.request(method, url, headers=headers, json=json, **session_request_kwargs) if path_obj: path_obj.update(resp) do_retry = False if url.startswith(self.api_base): try: await resp.read() except Exception: pass if resp.status == 401: # Unauthorized if self.refresh_token and not self._request_tried_refresh_token: # Invalid session token self._request_tried_refresh_token = True await self.get_session_token() do_retry = True self._request_tried_refresh_token = False elif self.username and self.password: # Invalid refresh token await self.login() if path_obj.path.name == "/auth/refresh": return # Just drop it for now because the login endpoint took care of it do_retry = True else: try: raise Unauthorized(method, url, resp) finally: self._request_tried_refresh_token = False resp.close() elif resp.status in [403, 412]: site_key = resp.headers.get("X-Captcha-Sitekey", "") if site_key: raise Captcha(site_key, method, url, resp) elif resp.status == 429: # Ratelimit error. This should be handled by ratelimits but I'll handle it here as # well. if resp.headers.get("x-ratelimit-retry-after", ""): diff = ( datetime.utcfromtimestamp(int(resp.headers["x-ratelimit-retry-after"])) - datetime.utcnow() ).total_seconds() logger.warning("Sleeping for %s seconds.", diff) await asyncio.sleep(diff) else: logger.warning("Sleeping for 1.25 seconds.") await asyncio.sleep( 1.25 ) # This is probably the result of multiple devices, so sleep for a second. Will # give up on the 4th try though if it is persistent. do_retry = True if resp.status // 100 == 5: # 5xx do_retry = True if do_retry: if retries > 0: logger.warning("Retrying %s request to %s because of HTTP code %s", method, url, resp.status) return await self.request( method, url, json=json, with_auth=with_auth, retries=retries - 1, **session_request_kwargs ) else: json_data = None try: json_data = await resp.json() except Exception: pass finally: raise HTTPException(method, url, resp, json=json_data) elif not allow_non_successful_codes: if not resp.ok: json_data = None try: json_data = await resp.json() except Exception as e: logger.warning("%s while trying to see response: %s", type(e).__name__, e) finally: raise HTTPException(method, url, resp, json=json_data) return resp async def _one_off(self, method, url, *, params=None, json=None, with_auth=True, retries=3, **kwargs): """Use for one-off requests where we do not care about the response.""" r = await self.request(method, url, params=params, json=json, with_auth=with_auth, retries=retries, **kwargs) r.close() async def _get_json(self, method, url, *, params=None, json=None, with_auth=True, retries=3, **kwargs): """Used for getting the json quickly when we don't care about request
""" This module provides PKCS#11 integration for pyHanko, by providing a wrapper for `python-pkcs11 <https://github.com/danni/python-pkcs11>`_ that can be seamlessly plugged into a :class:`~.signers.PdfSigner`. """ import asyncio import binascii import logging from typing import Optional, Set from asn1crypto import x509 from asn1crypto.algos import RSASSAPSSParams from cryptography.hazmat.primitives import hashes from pyhanko.config import PKCS11SignatureConfig from pyhanko.sign.general import ( CertificateStore, SigningError, SimpleCertificateStore, get_pyca_cryptography_hash, ) from pyhanko.sign.signers import Signer try: from pkcs11 import Attribute, ObjectClass, PKCS11Error, Session from pkcs11 import lib as pkcs11_lib except ImportError as e: # pragma: nocover raise ImportError( "pyhanko.sign.pkcs11 requires pyHanko to be installed with " "the [pkcs11] option. You can install missing " "dependencies by running \"pip install 'pyHanko[pkcs11]'\".", e ) __all__ = [ 'PKCS11Signer', 'open_pkcs11_session', 'PKCS11SigningContext' ] logger = logging.getLogger(__name__) def open_pkcs11_session(lib_location, slot_no=None, token_label=None, user_pin=None) -> Session: """ Open a PKCS#11 session :param lib_location: Path to the PKCS#11 module. :param slot_no: Slot number to use. If not specified, the first slot containing a token labelled ``token_label`` will be used. :param token_label: Label of the token to use. If ``None``, there is no constraint. :param user_pin: User PIN to use. .. note:: Some PKCS#11 implementations do not require PIN when the token is opened, but will prompt for it out-of-band when signing. :return: An open PKCS#11 session object. """ lib = pkcs11_lib(lib_location) slots = lib.get_slots() token = None if slot_no is None: for slot in slots: try: token = slot.get_token() if token_label is None or token.label == token_label: break except PKCS11Error: continue if token is None: raise PKCS11Error( f'No token with label {token_label} found' if token_label is not None else 'No token found' ) else: token = slots[slot_no].get_token() if token_label is not None and token.label != token_label: raise PKCS11Error(f'Token in slot {slot_no} is not {token_label}.') kwargs = {} if user_pin is not None: kwargs['user_pin'] = user_pin return token.open(**kwargs) def _pull_cert(pkcs11_session: Session, label: Optional[str] = None, cert_id: Optional[bytes] = None): query_params = { Attribute.CLASS: ObjectClass.CERTIFICATE } if label is not None: query_params[Attribute.LABEL] = label if cert_id is not None: query_params[Attribute.ID] = cert_id q = pkcs11_session.get_objects(query_params) # need to run through the full iterator to make sure the operation # terminates results = list(q) if len(results) == 1: cert_obj = results[0] return x509.Certificate.load(cert_obj[Attribute.VALUE]) else: info_strs = [] if label is not None: info_strs.append(f"label '{label}'") if cert_id is not None: info_strs.append( f"ID '{binascii.hexlify(cert_id).decode('ascii')}'" ) qualifier = f" with {', '.join(info_strs)}" if info_strs else "" if not results: err = f"Could not find cert{qualifier}." else: err = f"Found more than one cert{qualifier}." raise PKCS11Error(err) def _hash_fully(digest_algorithm): md_spec = get_pyca_cryptography_hash(digest_algorithm) def _h(data: bytes): h = hashes.Hash(md_spec) h.update(data) return h.finalize() return _h # TODO: perhaps attempt automatic key discovery if the labels aren't provided? class PKCS11Signer(Signer): """ Signer implementation for PKCS11 devices. :param pkcs11_session: The PKCS11 session object to use. :param cert_label: The label of the certificate that will be used for signing, to be pulled from the PKCS#11 token. :param cert_id: ID of the certificate object that will be used for signing, to be pulled from the PKCS#11 token. :param signing_cert: The signer's certificate. If the signer's certificate is provided via this parameter, the ``cert_label`` and ``cert_id`` parameters will not be used to retrieve the signer's certificate. :param ca_chain: Set of other relevant certificates (as :class:`.asn1crypto.x509.Certificate` objects). :param key_label: The label of the key that will be used for signing. Defaults to the value of ``cert_label`` if left unspecified and ``key_id`` is also unspecified. .. note:: At least one of ``key_id``, ``key_label`` and ``cert_label`` must be supplied. :param key_id: ID of the private key object (optional). :param other_certs_to_pull: List labels of other certificates to pull from the PKCS#11 device. Defaults to the empty tuple. If ``None``, pull *all* certificates. :param bulk_fetch: Boolean indicating the fetching strategy. If ``True``, fetch all certs and filter the unneeded ones. If ``False``, fetch the requested certs one by one. Default value is ``True``, unless ``other_certs_to_pull`` has one or fewer elements, in which case it is always treated as ``False``. :param use_raw_mechanism: Use the 'raw' equivalent of the selected signature mechanism. This is useful when working with tokens that do not support a hash-then-sign mode of operation. .. note:: This functionality is only available for ECDSA at this time. Support for other signature schemes will be added on an as-needed basis. """ def __init__(self, pkcs11_session: Session, cert_label: Optional[str] = None, signing_cert: x509.Certificate = None, ca_chain=None, key_label: Optional[str] = None, prefer_pss=False, other_certs_to_pull=(), bulk_fetch=True, key_id: Optional[bytes] = None, cert_id: Optional[bytes] = None, use_raw_mechanism=False): """ Initialise a PKCS11 signer. """ if signing_cert is None and cert_id is None and cert_label is None: raise SigningError( "Please specify a signer's certificate through the " "'cert_id', 'signing_cert' and/or 'cert_label' options" ) self.cert_label = cert_label self.key_id = key_id self.cert_id = cert_id self._signing_cert = signing_cert if key_id is None and key_label is None: if cert_label is None: raise SigningError( "If 'cert_label' is None, then 'key_label' or 'key_id' " "must be provided." ) key_label = cert_label self.key_label = key_label self.pkcs11_session = pkcs11_session cs = SimpleCertificateStore() self._cert_registry: CertificateStore = cs if ca_chain is not None: cs.register_multiple(ca_chain) if signing_cert is not None: cs.register(signing_cert) self.other_certs = other_certs_to_pull self._other_certs_loaded = False if other_certs_to_pull is not None and len(other_certs_to_pull) <= 1: self.bulk_fetch = False else: self.bulk_fetch = bulk_fetch self.use_raw_mechanism = use_raw_mechanism self._key_handle = None self._loaded = False self.__loading_event = None super().__init__(prefer_pss=prefer_pss) def _init_cert_registry(self): # it's conceivable that one might want to load this separately from # the key data, so we allow for that. if not self._other_certs_loaded: certs = self._load_other_certs() self._cert_registry.register_multiple(certs) self._other_certs_loaded = True return self._cert_registry cert_registry = property(_init_cert_registry) @property def signing_cert(self): self._load_objects() return self._signing_cert async def async_sign_raw(self, data: bytes, digest_algorithm: str, dry_run=False) -> bytes: if dry_run: # allocate 4096 bits for the fake signature return b'0' * 512 await self.ensure_objects_loaded() from pkcs11 import MGF, Mechanism, SignMixin kh: SignMixin = self._key_handle kwargs = {} digest_algorithm = digest_algorithm.lower() signature_mechanism = self.get_signature_mechanism(digest_algorithm) signature_algo = signature_mechanism.signature_algo pre_sign_transform = None post_sign_transform = None if signature_algo == 'rsassa_pkcs1v15': if self.use_raw_mechanism: raise NotImplementedError( "RSASSA-PKCS1v15 not available in raw mode" ) kwargs['mechanism'] = { 'sha1': Mechanism.SHA1_RSA_PKCS, 'sha224': Mechanism.SHA224_RSA_PKCS, 'sha256': Mechanism.SHA256_RSA_PKCS, 'sha384': Mechanism.SHA384_RSA_PKCS, 'sha512': Mechanism.SHA512_RSA_PKCS, }[digest_algorithm] elif signature_algo == 'dsa': if self.use_raw_mechanism: raise NotImplementedError("DSA not available in raw mode") kwargs['mechanism'] = { 'sha1': Mechanism.DSA_SHA1, 'sha224': Mechanism.DSA_SHA224, 'sha256': Mechanism.DSA_SHA256, # These can't be used in CMS IIRC (since the key sizes required # to meaningfully use them are ridiculous), # but they're in the PKCS#11 spec, so let's add them for # completeness 'sha384': Mechanism.DSA_SHA384, 'sha512': Mechanism.DSA_SHA512, }[digest_algorithm] from pkcs11.util.dsa import encode_dsa_signature post_sign_transform = encode_dsa_signature elif signature_algo == 'ecdsa': if self.use_raw_mechanism: kwargs['mechanism'] = Mechanism.ECDSA pre_sign_transform = _hash_fully(digest_algorithm) else: # TODO test these (unsupported in SoftHSMv2 right now) kwargs['mechanism'] = { 'sha1': Mechanism.ECDSA_SHA1, 'sha224': Mechanism.ECDSA_SHA224, 'sha256': Mechanism.ECDSA_SHA256, 'sha384': Mechanism.ECDSA_SHA384, 'sha512': Mechanism.ECDSA_SHA512, }[digest_algorithm] from pkcs11.util.ec import encode_ecdsa_signature post_sign_transform = encode_ecdsa_signature elif signature_algo == 'rsassa_pss': if self.use_raw_mechanism: raise NotImplementedError( "RSASSA-PSS not available in raw mode" ) params: RSASSAPSSParams = signature_mechanism['parameters'] assert digest_algorithm == \ params['hash_algorithm']['algorithm'].native # unpack PSS parameters into PKCS#11 language kwargs['mechanism'] = { 'sha1': Mechanism.SHA1_RSA_PKCS_PSS, 'sha224': Mechanism.SHA224_RSA_PKCS_PSS, 'sha256': Mechanism.SHA256_RSA_PKCS_PSS, 'sha384': Mechanism.SHA384_RSA_PKCS_PSS, 'sha512': Mechanism.SHA512_RSA_PKCS_PSS, }[digest_algorithm] pss_digest_param = { 'sha1': Mechanism.SHA_1, 'sha224': Mechanism.SHA224, 'sha256': Mechanism.SHA256, 'sha384': Mechanism.SHA384, 'sha512': Mechanism.SHA512, }[digest_algorithm] pss_mgf_param = { 'sha1': MGF.SHA1, 'sha224': MGF.SHA224, 'sha256': MGF.SHA256, 'sha384': MGF.SHA384, 'sha512': MGF.SHA512 }[params['mask_gen_algorithm']['parameters']['algorithm'].native] pss_salt_len = params['salt_length'].native kwargs['mechanism_param'] = ( pss_digest_param, pss_mgf_param, pss_salt_len ) else: raise PKCS11Error( f"Signature algorithm '{signature_algo}' is not supported." ) if pre_sign_transform is not None: data = pre_sign_transform(data) def _perform_signature(): signature = kh.sign(data, **kwargs) if post_sign_transform is not None: signature = post_sign_transform(signature) return signature loop = asyncio.get_running_loop() return await loop.run_in_executor(None, _perform_signature) def _load_other_certs(self) -> Set[x509.Certificate]: return set(self.__pull()) def __pull(self): other_cert_labels = self.other_certs if other_cert_labels is not None and len(other_cert_labels) == 0: # if there's nothing to fetch, bail. # Recall: None -> fetch everything, so we check the length # explicitly return if other_cert_labels is None or self.bulk_fetch: # first, query all certs q = self.pkcs11_session.get_objects({ Attribute.CLASS: ObjectClass.CERTIFICATE }) logger.debug("Pulling all certificates from PKCS#11 token...") for cert_obj in q: label = cert_obj[Attribute.LABEL] if other_cert_labels is None or
import warnings import networkx as nx import numpy as np import scipy.sparse as sp from sklearn import metrics class DataUtils: def __init__(self, graph_file): with np.load(graph_file, allow_pickle=True) as loader: loader = dict(loader) self.A = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']), shape=loader['adj_shape']) self.X = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']), shape=loader['attr_shape']) self.labels = loader['labels'] self.val_edges = loader['val_edges'] self.val_ground_truth = loader['val_ground_truth'] self.test_edges = loader['test_edges'] self.test_ground_truth = loader['test_ground_truth'] self.g = nx.from_scipy_sparse_matrix(self.A) self.num_of_nodes = self.g.number_of_nodes() self.num_of_edges = self.g.number_of_edges() self.edges_raw = self.g.edges(data=True) # edges_arr = np.array([(a, b) for a, b, c in self.edges_raw]) # self.edges_is_hom = self.labels[edges_arr[:, 0]] == self.labels[edges_arr[:, 1]] self.nodes_raw = self.g.nodes(data=True) self.edge_distribution = np.array([attr['weight'] for _, _, attr in self.edges_raw], dtype=np.float32) self.edge_distribution /= np.sum(self.edge_distribution) self.edge_sampling = AliasSampling(prob=self.edge_distribution) self.node_negative_distribution = np.power( np.array([self.g.degree(node, weight='weight') for node, _ in self.nodes_raw], dtype=np.float32), 0.75) self.node_negative_distribution /= np.sum(self.node_negative_distribution) self.node_sampling = AliasSampling(prob=self.node_negative_distribution) self.node_index = {} self.node_index_reversed = {} for index, (node, _) in enumerate(self.nodes_raw): self.node_index[node] = index self.node_index_reversed[index] = node self.edges = [(self.node_index[u], self.node_index[v]) for u, v, _ in self.edges_raw] def fetch_next_batch(self, labels_to_use, batch_size=16, K=10): u_i = [] u_j = [] label = [] is_hom = [] for edge_index in self.edge_sampling.sampling(batch_size): edge = self.edges[edge_index] if self.labels[edge[0]] in labels_to_use and self.labels[edge[1]] in labels_to_use: if self.g.__class__ == nx.Graph: if np.random.rand() > 0.5: edge = (edge[1], edge[0]) u_i.append(edge[0]) u_j.append(edge[1]) label.append(1) is_hom.append(self.labels[edge[0]] == self.labels[edge[1]]) for i in range(K): while True: negative_node = self.node_sampling.sampling() if self.labels[negative_node] in labels_to_use: if not self.g.has_edge( self.node_index_reversed[negative_node], self.node_index_reversed[edge[0]]): break u_i.append(edge[0]) u_j.append(negative_node) label.append(-1) is_hom.append(self.labels[edge[0]] == self.labels[negative_node]) return u_i, u_j, label, is_hom def embedding_mapping(self, embedding): return {node: embedding[self.node_index[node]] for node, _ in self.nodes_raw} class AliasSampling: # Reference: LINE source code from https://github.com/snowkylin/line # Reference: https://en.wikipedia.org/wiki/Alias_method def __init__(self, prob): self.n = len(prob) self.U = np.array(prob) * self.n self.K = [i for i in range(len(prob))] overfull, underfull = [], [] for i, U_i in enumerate(self.U): if U_i > 1: overfull.append(i) elif U_i < 1: underfull.append(i) while len(overfull) and len(underfull): i, j = overfull.pop(), underfull.pop() self.K[j] = i self.U[i] = self.U[i] - (1 - self.U[j]) if self.U[i] > 1: overfull.append(i) elif self.U[i] < 1: underfull.append(i) def sampling(self, n=1): x = np.random.rand(n) i = np.floor(self.n * x) y = self.n * x - i i = i.astype(np.int32) res = [i[k] if y[k] < self.U[i[k]] else self.K[i[k]] for k in range(n)] if n == 1: return res[0] else: return res def train_val_test_split(graph_file, p_test=0.10, p_val=0.05): with np.load(graph_file, allow_pickle=True) as loader: loader = dict(loader) A = sp.csr_matrix((loader['adj_data'], loader['adj_indices'], loader['adj_indptr']), shape=loader['adj_shape']) X = sp.csr_matrix((loader['attr_data'], loader['attr_indices'], loader['attr_indptr']), shape=loader['attr_shape']) if 'labels' in loader.keys(): labels = loader['labels'] else: labels = None train_ones, val_ones, val_zeros, test_ones, test_zeros = _train_val_test_split_adjacency(A=A, p_test=p_test, p_val=p_val, neg_mul=1, every_node=True, connected=False, undirected=( A != A.T).nnz == 0) if p_val > 0: val_edges = np.row_stack((val_ones, val_zeros)) val_ground_truth = A[val_edges[:, 0], val_edges[:, 1]].A1 val_ground_truth = np.where(val_ground_truth > 0, 1, val_ground_truth) if p_test > 0: test_edges = np.row_stack((test_ones, test_zeros)) test_ground_truth = A[test_edges[:, 0], test_edges[:, 1]].A1 test_ground_truth = np.where(test_ground_truth > 0, 1, test_ground_truth) if p_val == 0: val_edges = test_edges val_ground_truth = test_ground_truth A = edges_to_sparse(train_ones, A.shape[0]) return A, X, labels, val_edges, val_ground_truth, test_edges, test_ground_truth def _train_val_test_split_adjacency(A, p_val=0.10, p_test=0.05, seed=0, neg_mul=1, every_node=True, connected=False, undirected=False, use_edge_cover=True, set_ops=True, asserts=False): # Reference: G2G source code from https://github.com/abojchevski/graph2gauss assert p_val + p_test > 0 assert A.min() == 0 # no negative edges assert A.diagonal().sum() == 0 # no self-loops assert not np.any(A.sum(0).A1 + A.sum(1).A1 == 0) # no dangling nodes is_undirected = (A != A.T).nnz == 0 if undirected: assert is_undirected # make sure is directed A = sp.tril(A).tocsr() # consider only upper triangular A.eliminate_zeros() else: if is_undirected: warnings.warn('Graph appears to be undirected. Did you forgot to set undirected=True?') np.random.seed(seed) E = A.nnz N = A.shape[0] s_train = int(E * (1 - p_val - p_test)) idx = np.arange(N) # hold some edges so each node appears at least once if every_node: if connected: assert sp.csgraph.connected_components(A)[0] == 1 # make sure original graph is connected A_hold = sp.csgraph.minimum_spanning_tree(A) else: A.eliminate_zeros() # makes sure A.tolil().rows contains only indices of non-zero elements d = A.sum(1).A1 if use_edge_cover: hold_edges = edge_cover(A) # make sure the training percentage is not smaller than len(edge_cover)/E when every_node is set to True min_size = hold_edges.shape[0] if min_size > s_train: raise ValueError('Training percentage too low to guarantee every node. Min train size needed {:.2f}' .format(min_size / E)) else: # make sure the training percentage is not smaller than N/E when every_node is set to True if N > s_train: raise ValueError('Training percentage too low to guarantee every node. Min train size needed {:.2f}' .format(N / E)) hold_edges_d1 = np.column_stack( (idx[d > 0], np.row_stack(map(np.random.choice, A[d > 0].tolil().rows)))) if np.any(d == 0): hold_edges_d0 = np.column_stack((np.row_stack(map(np.random.choice, A[:, d == 0].T.tolil().rows)), idx[d == 0])) hold_edges = np.row_stack((hold_edges_d0, hold_edges_d1)) else: hold_edges = hold_edges_d1 if asserts: assert np.all(A[hold_edges[:, 0], hold_edges[:, 1]]) assert len(np.unique(hold_edges.flatten())) == N A_hold = edges_to_sparse(hold_edges, N) A_hold[A_hold > 1] = 1 A_hold.eliminate_zeros() A_sample = A - A_hold s_train = s_train - A_hold.nnz else: A_sample = A idx_ones = np.random.permutation(A_sample.nnz) ones = np.column_stack(A_sample.nonzero()) train_ones = ones[idx_ones[:s_train]] test_ones = ones[idx_ones[s_train:]] # return back the held edges if every_node: train_ones = np.row_stack((train_ones, np.column_stack(A_hold.nonzero()))) n_test = len(test_ones) * neg_mul if set_ops: # generate slightly more completely random non-edge indices than needed and discard any that hit an edge # much faster compared a while loop # in the future: estimate the multiplicity (currently fixed 1.3/2.3) based on A_obs.nnz if undirected: random_sample = np.random.randint(0, N, [int(2.3 * n_test), 2]) random_sample = random_sample[random_sample[:, 0] > random_sample[:, 1]] else: random_sample = np.random.randint(0, N, [int(1.3 * n_test), 2]) random_sample = random_sample[random_sample[:, 0] != random_sample[:, 1]] # discard ones random_sample = random_sample[A[random_sample[:, 0], random_sample[:, 1]].A1 == 0] # discard duplicates random_sample = random_sample[np.unique(random_sample[:, 0] * N + random_sample[:, 1], return_index=True)[1]] # only take as much as needed test_zeros = np.row_stack(random_sample)[:n_test] assert test_zeros.shape[0] == n_test else: test_zeros = [] while len(test_zeros) < n_test: i, j = np.random.randint(0, N, 2) if A[i, j] == 0 and (not undirected or i > j) and (i, j) not in test_zeros: test_zeros.append((i, j)) test_zeros = np.array(test_zeros) # split the test set into validation and test set s_val_ones = int(len(test_ones) * p_val / (p_val + p_test)) s_val_zeros = int(len(test_zeros) * p_val / (p_val + p_test)) val_ones = test_ones[:s_val_ones] test_ones = test_ones[s_val_ones:] val_zeros = test_zeros[:s_val_zeros] test_zeros = test_zeros[s_val_zeros:] if undirected: # put (j, i) edges for every (i, j) edge in the respective sets and form back original A symmetrize = lambda x: np.row_stack((x, np.column_stack((x[:, 1], x[:, 0])))) train_ones = symmetrize(train_ones) val_ones = symmetrize(val_ones) val_zeros = symmetrize(val_zeros) test_ones = symmetrize(test_ones) test_zeros = symmetrize(test_zeros) A = A.maximum(A.T) if asserts: set_of_train_ones = set(map(tuple, train_ones)) assert train_ones.shape[0] + test_ones.shape[0] + val_ones.shape[0] == A.nnz assert (edges_to_sparse(np.row_stack((train_ones, test_ones, val_ones)), N) != A).nnz == 0 assert set_of_train_ones.intersection(set(map(tuple, test_ones))) == set() assert set_of_train_ones.intersection(set(map(tuple, val_ones))) == set() assert set_of_train_ones.intersection(set(map(tuple, test_zeros))) == set() assert set_of_train_ones.intersection(set(map(tuple, val_zeros))) == set() assert len(set(map(tuple, test_zeros))) == len(test_ones) * neg_mul assert len(set(map(tuple, val_zeros))) == len(val_ones) * neg_mul assert not connected or sp.csgraph.connected_components(A_hold)[0] == 1 assert not every_node or ((A_hold - A) > 0).sum() == 0 return train_ones, val_ones, val_zeros, test_ones, test_zeros def edge_cover(A): # Reference: G2G source code from https://github.com/abojchevski/graph2gauss N = A.shape[0] d_in = A.sum(0).A1 d_out = A.sum(1).A1 # make sure to include singleton nodes (nodes with one incoming or one outgoing edge) one_in = np.where((d_in == 1) & (d_out == 0))[0] one_out = np.where((d_in == 0) & (d_out == 1))[0] edges = [] edges.append(np.column_stack((A[:, one_in].argmax(0).A1, one_in))) edges.append(np.column_stack((one_out, A[one_out].argmax(1).A1))) edges = np.row_stack(edges) edge_cover_set = set(map(tuple, edges)) nodes = set(edges.flatten()) # greedly add other edges such that both end-point are not yet in the edge_cover_set cands = np.column_stack(A.nonzero()) for u, v in cands[d_in[cands[:, 1]].argsort()]: if u not in nodes and v not in nodes and u != v: edge_cover_set.add((u, v)) nodes.add(u) nodes.add(v) if len(nodes) == N: break # add a single edge for the rest of the nodes not covered so far not_covered = np.setdiff1d(np.arange(N), list(nodes)) edges = [list(edge_cover_set)] not_covered_out = not_covered[d_out[not_covered] > 0] if len(not_covered_out) > 0: edges.append(np.column_stack((not_covered_out, A[not_covered_out].argmax(1).A1))) not_covered_in = not_covered[d_out[not_covered] ==
<gh_stars>1000+ #On the name of ALLAH and may the blessing and peace of Allah #be upon the Messenger of Allah <NAME>. #Author : <NAME> #Date : 07/03/09 #version :2.6.1 """ collections module's extras in python 2.6.1 were used in my program, DVMextrapolating DVMgordonsModel and CAPM subclasses of namedtuple Python class provide the cost of equity the calculation of the dividend growth g in two different ways, and the value of the company if the cost of equity Ke is known. I used an utility method and the try/exceptions statements to raise errors """ import math as m from collections import namedtuple class MyError: """ Demonstrate imporper operation on negative number""" def _negativeNumberException(self,*args): """ Utility method to raise a negative number exception""" for item in args: if item <0: raise ValueError,\ " <The value %s should be a positive number " % item class DVMextrapolating(namedtuple('DVMextrapolating','dividend_just_paid,dividend_n_years,n,share_price,Ke'),MyError): """ DVMeModel class inherits from tuple and MyError class """ #set __slots__ to an empty tuple keep memory requirements low __slots__ = () #Pick Myerror method _negativeNumberException =MyError._negativeNumberException @property def g_extrapolatingModel(self): """ Compute g using extrapolating """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.n) return "%2.2f" % ((float(m.pow((self.dividend_just_paid/self.dividend_n_years),(1/float(self.n)))) -1)) #Raise TypeError if input is not numerical except TypeError: print "\n<The entered value is not a number" #division by zero raises ZeroDivisionError exception except ZeroDivisionError: raise ZeroDivisionError, "\n<Please check and re-enter the values" @property def valueOfShare(self): """ Compute the share value """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.Ke) return "%2.2f" % (((self.dividend_just_paid* (1+float(self.g_extrapolatingModel)))/(self.Ke-float(self.g_extrapolatingModel)))) #Raise TypeError if input is not numerical except TypeError: print "\n<The entered value is not a number" #division by zero raises ZeroDivisionError exception except ZeroDivisionError: raise ZeroDivisionError, "\n<Please check and re-enter the values" @property def costOfEquity(self): """ Compute cost of equity using DVM Model """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.dividend_just_paid,self.share_price) return "%2.1f" % ((((self.dividend_just_paid* (1+float(self.g_extrapolatingModel))/self.share_price))+ float(self.g_extrapolatingModel))*100) #Raise TypeError if input is not numerical except TypeError: print "\n<The entered value is not a number" #division by zero raises ZeroDivisionError exception except ZeroDivisionError: raise ZeroDivisionError, "\n<Please check and re-enter the values" def __str__(self): """ String representation of DVMeModel""" if self.Ke == None: return "\n< Extrapolating Growth Model g = %s\n \ \n< Cost of equity Ke = %s \n\ \n< Market value of the share Po = %s" % \ (self.g_extrapolatingModel,(self.costOfEquity+'%'),('$'+ str(self.share_price))) else: return "\n< Extrapolating Growth Model g = %s\n \ \n< Cost of equity Ke = %s \n\ \n< Market value of the share Po = %s" % \ (self.g_extrapolatingModel,self.Ke,('$'+ str(self.valueOfShare))) class DVMgordonsModel(namedtuple('DVMgordonsModel','dividend_just_paid,return_on_equity,dividend_payout,share_price,Ke'),MyError): """ DVMgModel class inherits from tuple and MyError classes """ #set __slots__ to an empty tuple keep memory requirements low __slots__ = () #Pick Myerror method _negativeNumberException =MyError._negativeNumberException @property def g_gordonsModel(self): """ Compute g using Gordons growth Model """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.return_on_equity,self.dividend_payout) return self.return_on_equity * (1-self.dividend_payout) #Raise TypeError if input is not numerical except TypeError: print "\n<The entered value is not a number" @property def valueOfShare(self): """ Compute the share value """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.dividend_just_paid,self.Ke) return "%2.2f" % (((self.dividend_just_paid* (1+float(self.g_gordonsModel)))/(self.Ke-self.g_gordonsModel))) #Raise TypeError if input is not numerical except TypeError: print "\n<The entered value is not a number" #division by zero raises ZeroDivisionError exception except ZeroDivisionError: raise ZeroDivisionError, "\n<Please check and re-enter the values" @property def costOfEquity(self): """ Compute cost of equity using DVM Model """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.dividend_just_paid,self.share_price) return "%2.1f" % ((((self.dividend_just_paid* (1+float(self.g_gordonsModel)))/(self.share_price))+ float(self.g_gordonsModel))*100 ) #Raise TypeError if input is not numerical except TypeError: print "\n<The entered value is not a number" #division by zero raises ZeroDivisionError exception except ZeroDivisionError: raise ZeroDivisionError, "\n<Please check and re-enter the values" def __str__(self): """ String representation of DVMgModel""" if self.Ke == None: return "\n< Gordon's Growth Model g = %s\n \ \n< Cost of equity Ke = %s \n\ \n< Market value of the share Po = %s" % \ (self.g_gordonsModel,(self.costOfEquity+'%'),('$'+ str(self.share_price))) else: return "\n< Gordon's Growth Model g = %s\n \ \n< Cost of equity Ke = %s \n\ \n< Market value of the share Po = %s" % \ (self.g_gordonsModel,self.Ke,('$'+ str(self.valueOfShare))) class CAPM(namedtuple('CAPM','Rf,Beta,Rm'),MyError): """ CAPM class inherits from tuple and MyError class """ #set __slots__ to an empty tuple keep memory requirements low __slots__ = () #Pick Myerror method _negativeNumberException =MyError._negativeNumberException @property def Ke(self): """ Compute cost of equity using CAPM model """ try: #Test for negative numbers input and raise the exception self._negativeNumberException(self.Rf,self.Beta,self.Rm) return self.Rf + self.Beta*(self.Rm - self.Rf) #Raise ValueError if input is not numerical except TypeError: print "\n<The entered value is not a number" def __str__(self): """ String representation of CAPM""" return "\n< Ke = %s" % self.Ke+"%" if __name__ == '__main__': a = CAPM('Rf','Beta','Rm') b = [7,0.7,17] a = a._make(b) print "\n"+"\4"*43 print a print "\n"+"\4"*43 c = DVMextrapolating('dividend_just_paid','dividend_n_years','n','share_price','Ke') d = [0.24,0.1525,4,None,a.Ke/100] c = c._make(d) print c print "\n"+"\4"*43 e = DVMgordonsModel('dividend_just_paid','return_on_equity','dividend_payout','share_price','Ke') f = [0.18,0.2,0.72,None,0.127] e = e._make(f) print e print "\n"+"\4"*43 g = [0.25,0.17,7,17.50,None] c = c._make(g) print c print "\n"+"\4"*43 h = [0.17,0.3,0.37,1.77,None] e = e._make(h) print e print "\n"+"\4"*43 print print c.g_extrapolatingModel print c.costOfEquity print e.g_gordonsModel print e.costOfEquity print "\n"+"\5"*43 m = [None,0.5,0.57,None,None] e = e._make(m) print e.g_gordonsModel ########################################################################################## # c:\Python26>python "C:\Users\<NAME>\Documents\python\DVM_Versus_CAPM7.py" #♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦ #< Ke = 14.0% #♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦ #< Extrapolating Growth Model g = 0.12 #< Cost of equity Ke = 0.14 #< Market value of the share Po = $13.44 #♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦ #< Gordon's Growth Model g = 0.056 #< Cost of equity Ke = 0.127 #< Market value of the share Po = $2.68 #♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦ #< Extrapolating Growth Model g = 0.06 #< Cost of equity Ke = 7.5% #< Market value of the share Po = $17.5 #♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦ #< Gordon's Growth Model g = 0.189 #< Cost of equity Ke = 30.3% #< Market value of the share Po = $1.77 #♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦♦ #0.06 #7.5 #0.189 #30.3 #♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣♣ #0.215 #c:\Python26> ########################################################################################## #Version : Python 3.2 #import math as m #from collections import namedtuple #class MyError: # """ Demonstrate imporper operation on negative number""" # def _negativeNumberException(self,*args): # """ Utility method to raise a negative number exception""" # # for item in args: # if item <0: # # raise ValueError(" <The value %s should be a positive number " % item) # #class DVMextrapolating(namedtuple('DVMextrapolating','dividend_just_paid,dividend_n_years,n,share_price,Ke'),MyError): # """ DVMeModel class inherits from tuple and MyError class """ # # #set __slots__ to an empty tuple keep memory requirements low # __slots__ = () # # #Pick Myerror method # _negativeNumberException =MyError._negativeNumberException # # @property # def g_extrapolatingModel(self): # """ Compute g using extrapolating """ # # try: # #Test for negative numbers input and raise the exception # self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.n) # return "%2.2f" % ((float(m.pow((self.dividend_just_paid/self.dividend_n_years),(1/float(self.n)))) -1)) # # #Raise TypeError if input is not numerical # except TypeError: # print("\n<The entered value is not a number") # # #division by zero raises ZeroDivisionError exception # except ZeroDivisionError: # raise ZeroDivisionError("\n<Please check and re-enter the values") # # @property # def valueOfShare(self): # """ Compute the share value """ # # try: # #Test for negative numbers input and raise the exception # self._negativeNumberException(self.dividend_just_paid,self.dividend_n_years,self.Ke) # return "%2.2f" % (((self.dividend_just_paid* # (1+float(self.g_extrapolatingModel)))/(self.Ke-float(self.g_extrapolatingModel)))) # # #Raise TypeError if input is not numerical # except TypeError: # print("\n<The entered value is not a number") # # #division by zero raises ZeroDivisionError exception # except ZeroDivisionError: # raise ZeroDivisionError("\n<Please check and re-enter the values") # # @property # def costOfEquity(self): # """ Compute cost of equity using DVM Model """ # # try: # #Test for negative numbers input and raise the exception # self._negativeNumberException(self.dividend_just_paid,self.share_price) # return "%2.1f" % ((((self.dividend_just_paid* # (1+float(self.g_extrapolatingModel))/self.share_price))+ float(self.g_extrapolatingModel))*100) # # #Raise TypeError if input is not numerical # except TypeError: # print("\n<The entered value is not a number") # # #division by zero raises ZeroDivisionError exception # except ZeroDivisionError: # raise ZeroDivisionError("\n<Please check and re-enter the values") # # def __str__(self): # """ String representation of DVMeModel""" # # if self.Ke == None: # return "\n< Extrapolating Growth Model g = %s\n \ # \n< Cost of equity Ke = %s \n\ # \n< Market value of the share Po = %s" % \ # (self.g_extrapolatingModel,(self.costOfEquity+'%'),('$'+ str(self.share_price))) # else: # return "\n<
#!/usr/local/bin/python3.8 __author__ = "<NAME>" __credits__ = ["ivosh", "laura"] __version__ = "1.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "production" __dev_version__ = "v1" __spec__= "GaiaAPI connector" import requests import urllib3 import json import sys import time import getpass import logging import os import base64 import ipaddress import signal import argparse from datetime import datetime ######## Class############ class DoLogging(): """ Logging class, to have some possibility debug code in the future """ def __init__(self) -> None: """ Constructor does not do anything """ pass def do_logging(self:object, msg:str) -> None: """ Log appropriate message into log file """ # if needed change to DEBUG for more data current_path=(os.path.dirname(os.path.abspath(__file__))) log='{0}/gaia_api_connector.elg'.format(current_path) logging.basicConfig(filename=log, level=logging.DEBUG) msgq = 'TIME:{}:{}'.format(str(datetime.now()),msg) requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(logging.DEBUG) requests_log.propagate = True logging.info(msgq) logging.info(requests_log) ######## Class############ class Connector(): """ Connector class is main class handling connectivity to CP API """ # do not care about ssl cert validation for now urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @classmethod def task_method(cls, sid:str, url:str, task:str) -> dict: """ this is help method which is checking task status when publish is needed """ payload_list={} payload_list['task-id']=task headers = { 'content-type': "application/json", 'Accept': "*/*", 'x-chkp-sid': sid, } response = requests.post(url+"show-task", json=payload_list, headers=headers, verify=False) return response def __init__(self, url:str, payload:dict) -> dict: """ This is constructor for class, login to API server is handled here - handling also conectivity problems to API """ self.sid="" # default header without SID self.headers_default = { 'content-type': "application/json", 'Accept': "*/*", } # headers for usage in instance methods - with self.SID - will be filled up in constructor self.headers = {} self.url=url self.payload_list = payload # default only username and passowrd done=False counter=0 # loop to handle connection interuption while not done: counter +=1 if counter == 5: DoLogging().do_logging ('Connector() - init() - connection to API can not be established even in loop, check your credentials or IP connectivity') sys.exit(1) try: self.response = requests.post(self.url+"login", json=self.payload_list, headers=self.headers_default, verify=False) DoLogging().do_logging('Connector() - init() - login OK: {}'.format(self.url)) DoLogging().do_logging('Connector() - init() - login data: {}'.format(self.response.text)) if self.response.status_code == 200: #print(json.loads(self.response.text)) try: sid_out=json.loads(self.response.text) self.sid = sid_out['sid'] self.headers = { 'content-type': "application/json", 'Accept': "*/*", 'x-chkp-sid': self.sid, } DoLogging().do_logging('Connector() - init() - Connection to API is okay') except Exception as e: DoLogging().do_logging(' Connector() - init() - API is not running probably: {}..'.format(e)) else: a = json.loads(self.response.text) DoLogging().do_logging("Connector() - init() - Exception occured: {}".format(a)) DoLogging().do_logging('Connector() - init() - There is no SID, connection problem to API gateway, trying again..') time.sleep (1) continue except Exception as e: DoLogging().do_logging(' Connector() - init() - exception occured..can not connect to mgmt server, check IP connectivity or ssl certificates!!! : {}'.format(e)) else: done=True def logout(self) -> None: """ Logout method for correct disconenction from API """ done=False counter=0 while not done: counter +=1 if counter == 5: DoLogging().do_logging('Connector() - logout() - logout can not be done because connection to mgmt is lost and reconnect does not work...') sys.exit(1) else: try: payload_list={} self.response = requests.post(self.url+"logout", json=payload_list, headers=self.headers, verify=False) if self.response.status_code == 200: DoLogging().do_logging ('Connector() - logout() - logout from API is okay') return self.response.json() else: out = json.loads(self.response.text) DoLogging().do_logging (" ") DoLogging().do_logging(out) DoLogging().do_logging (" ") return self.response.json() except Exception as e: DoLogging().do_logging ('Connector() - logout() - connection to gateway is broken, trying again: {}'.format(e)) @staticmethod def base64_ascii(base64resp:str) -> str: """Converts base64 to ascii for run command/showtask.""" try: return base64.b64decode(base64resp).decode('utf-8') except Exception as e: DoLogging().do_logging("base64 error:{}".format(e)) def run_script(self, payload:dict) -> str: """ run script method is responsible for running script on target (ls -la, df -lh etc. basic linux commands) """ payload_list=payload headers = { 'content-type': "application/json", 'Accept': "*/*", 'x-chkp-sid': self.sid, } return_string = '' done=False counter=0 while not done: counter +=1 if counter == 5: DoLogging().do_logging('Connector() - run_script() - discard can not be done because connection to mgmt is lost and reconnect does not work...') sys.exit(1) else: try: self.response = requests.post(self.url+"run-script", json=payload_list, headers=headers, verify=False) task=json.loads(self.response.text) while True: show_task=Connector.task_method(self.sid,self.url,task['task-id']) show_task_text=json.loads(show_task.text) #DoLogging().do_logging ("Connector() - run_script() - :{}".format(show_task_text)) time.sleep (5) if show_task_text['tasks'][0]['progress-percentage'] == 100: base64resp = (str(self.send_cmd('show-task', payload={"task-id":show_task_text['tasks'][0]['task-id']})['tasks'][0]['task-details'][0]['output'])) asciiresp = self.base64_ascii(base64resp) return_string=return_string+"\n\n"+"Data for target:"+"\n"+asciiresp+"\n\n\n\n\n\n" #DoLogging().do_logging ("Connector() - run_script() - :{}".format(show_task_text)) break else: continue return return_string except Exception as e: DoLogging().do_logging ("Connector() - run_script() - Exception in run_script method, some data not returned, continue: {} {}".format(e, tasks)) else: done=True def send_cmd(self, cmd:str, payload:dict) -> dict: """ Core method, all data are exchanged via this method via cmd variable, you can show, add data etc. """ done=False counter=0 while not done: counter +=1 if counter == 5: DoLogging().do_logging ("Connector() - send_cmd() - Can not send API cmd in loop, there are some problems, changes are unpublished, check it manually..") self.logout() sys.exit(1) else: try: payload_list=payload self.response = requests.post(self.url + cmd, json=payload_list, headers=self.headers, verify=False) if self.response.status_code == 200: #uncomment for TSHOOT purposes DoLogging().do_logging ('Connector() - send_cmd() - send cmd is okay') #out = json.loads(self.response.text) #DoLogging().do_logging ('Connector() - send_cmd() - send cmd response is 200 :{}'.format(out)) return self.response.json() else: out = json.loads(self.response.text) DoLogging().do_logging(" Connector() - send_cmd() - response code is not 200 :{}".format(out)) return self.response.json() except Exception as e: DoLogging().do_logging ("Connector() - send_cmd() - POST operation to API is broken due connectivity flap or issue.. trying again..: {}".format(e)) ######## Class############ class Interactive_Init_Handler(): """ Init class for getting basic data about user/pwd/GW IP and establishing connector for API """ def __init__(self) -> None: self.user='' self.password='' self.IP='' self.node1IP='' self.node2IP='' self.connector=None self.connectors=[] self.version='' self.data = None self.path='' @staticmethod def validate_ip(ip:str) -> bool: """ validate ip format to avoid adding crazy data for IP based variable """ check = True try: data = ip.split(":") ip = ipaddress.ip_address(data[0]) return check except Exception as e: check= False print ("IP validation failed for some reason!: {}".format(e)) return check def _single(self, singleIP=None)-> None: """ establishing single connector to appropriate gateways via special class Connector() object depends on the call from Interactive_Init_Handler().run() it is asking for path to json or not """ try: if singleIP == None: self.IP=input("Enter GW IP: ") else: self.IP = singleIP if not self.user or not self.password or not self.IP: print ("Empty username or password or server IP, finish..") sys.exit(1) else: if self.validate_ip(self.IP): payload ={ "user":self.user, "password":<PASSWORD> } try: connector = Connector('https://{}/gaia_api/'.format(self.IP), payload) self.connector = connector except Exception as e: print ("Can not establish connector, check logcp_gaia_api.elg : {}".format(e)) else: print ("Wrong IP for single GW, exit") raise Exception ("Wrong IP for single GW, exit") except Exception as e: raise Exception ("Error in Interactive_Init_Handler()._single() method") print ("Connector to single gw is established") def _cluster (self, nodeIP1=None, nodeIP2=None) -> None: """ establishing cluster connectors to appropriate gateways via special class Connector() object depends on the call from Interactive_Init_Handler().run() it is asking for path to json or not """ try: if nodeIP1 == None and nodeIP2 == None: self.node1IP=input("Enter node1 IP: ") self.node2IP=input("Enter node2 IP: ") else: self.node1IP = nodeIP1 self.node2IP = nodeIP2 if not self.user or not self.password or not self.node1IP or not self.node2IP: print ("Empty username or password or server IP, finish..") sys.exit(1) else: if self.validate_ip(self.node1IP): payload ={ "user":self.user, "password":<PASSWORD> } try: connector = Connector('https://{}/gaia_api/'.format(self.node1IP), payload) self.connectors.append(connector) except Exception as e: print ("Can not establish connector, check logcp_gaia_api.elg : {}".format(e)) if self.validate_ip(self.node2IP): payload ={ "user":self.user, "password":self.password } try: connector = Connector('https://{}/gaia_api/'.format(self.node2IP), payload) self.connectors.append(connector) except Exception as e: print ("Can not establish connector, check logcp_gaia_api.elg : {}".format(e)) else: print ("Wrong IP for single GW, exit") raise Exception ("Wrong IP for single GW, exit") except Exception as e: raise Exception ("Error in Interactive_Init_Handler()._cluster() method") print ("Connectors to cluster established") def _load_data(self, path=None)-> dict: """ load json data via separate object via class Load_Data() depends on the call from Interactive_Init_Handler().run() it is asking for path to json or not """ try: if path == None: # interactive mode path=input("Where is your json file with data for vlan manipulation?\n If no path specified, I count file data.json is in same folder as script\n") if not path: data = Load_Data() return data.load_data() else: data = Load_Data(path) return data.load_data() else: # mode with args data = Load_Data(path) return data.load_data() except Exception as e: raise Exception def run(self) -> None: """ handle user input at the beginning // handle argparse
* np.dot(lam_top.T, lam_bottom) else: return s def cpd_symmetrize(factors, permdict, adjust_scale=True, weights=None): """ Produce a symmetric CPD decomposition. :param factors: CPD factors :param permdict: dictionary of tuple: tuple pairs. keys are tuples containing the permutation values are tuples of any of('ident', 'neg', 'conj') Identity permutation has to be excluded(added internally) :param weights: list, default None weights of the symmetry elements. If set adjust scale will turn off :param adjust_scale: bool, default True If factors have to be scaled by the order of the permutation group Returns ------- symm_factos: ndarray list, symmetrized CPD factors >>> a = cpd_initialize([3, 3, 4, 4], 3) >>> t1 = cpd_rebuild(a) >>> ts = 1/4 * (t1 - t1.transpose([1, 0, 2, 3]) + np.conj(t1.transpose([1, 0, 3, 2])) - t1.transpose([0, 1, 3, 2])) >>> k = cpd_symmetrize(a, {(1, 0, 2, 3): ('neg', ), (1, 0, 3, 2): ('conj', ), (0, 1, 3, 2): ('neg', )}) >>> np.allclose(ts, cpd_rebuild(k)) True >>> a = cpd_initialize([3, 3, 4, 4], 3) >>> t1 = cpd_rebuild(a) >>> ts = 1/2 * (t1 + t1.transpose([1, 0, 3, 2])) >>> k = cpd_symmetrize(a, {(1, 0, 3, 2): ('ident', ),}) >>> np.allclose(ts, cpd_rebuild(k)) True >>> ts = 2 * t1 - t1.transpose([1, 0, 3, 2]) >>> k = cpd_symmetrize(a, {(1, 0, 3, 2): ('neg', ),}, weights=[2, 1]) >>> np.allclose(ts, cpd_rebuild(k)) True """ nsym = len(permdict) + 1 nfactors = len(factors) if weights is not None: if len(weights) != nsym: raise ValueError('len(weights) != len(permdict)+1') weights = [pow(w, 1 / nfactors) for w in weights] adjust_scale = False else: weights = [1 for ii in range(len(factors))] if adjust_scale: scaling_factor = pow(1 / nsym, 1 / nfactors) else: scaling_factor = 1 new_factors = [] def ident(x): return x def neg(x): return -1 * x def conj(x): return np.conj(x) def make_scaler(weight): return lambda x: x * weight from functools import reduce n_factors = len(factors) new_factors = [] for idx, factor in enumerate(factors): new_factor = factor * scaling_factor * weights[0] for (perm, operations), weight in zip(permdict.items(), weights[1:]): transforms = [] for operation in operations: if operation == 'ident': transforms.append(ident) elif operation == 'neg': if n_factors % 2 == 0 and idx == 0: # We don't want to negate transforms.append(ident) # even number of times else: transforms.append(neg) elif operation == 'conj': transforms.append(conj) else: raise ValueError( 'Unknown operation: {}'.format( operation)) transforms.append(make_scaler(weight)) new_factor = np.hstack( (new_factor, scaling_factor * reduce((lambda x, y: y(x)), transforms, factors[perm[idx]]))) new_factors.append(new_factor) return new_factors def ncpd_symmetrize(norm_factors, permdict, weights=None): """ Produce a symmetric nCPD decomposition. :param norm_factors: norm and normalized CPD factors :param permdict: dictionary of tuple: tuple pairs. keys are tuples containing the permutation values are tuples of any of('ident', 'neg', 'conj') Identity permutation has to be excluded(added internally) :param weights: list, default None weights of the symmetry elements. Returns ------- symm_norm_factos: ndarray list, symmetrized nCPD factors >>> a = ncpd_initialize([3, 3, 4], 3) >>> t1 = ncpd_rebuild(a) >>> ts = 1/2 * (t1 + t1.transpose([1, 0, 2])) >>> k = ncpd_symmetrize(a, {(1, 0, 2): ('ident', )}) >>> np.allclose(ts, ncpd_rebuild(k)) True >>> ts = (2 * t1 - 3 * t1.transpose([1, 0, 2])) >>> k = ncpd_symmetrize(a, {(1, 0, 2): ('neg', )}, [2, 3]) >>> np.allclose(ts, ncpd_rebuild(k)) True """ lam = norm_factors[0] factors = norm_factors[1:] nsym = len(permdict) + 1 if weights is not None: if len(weights) != nsym: raise ValueError('len(weights) != len(permdict)+1') scaling_factor = 1 else: weights = [1 for ii in range(nsym)] scaling_factor = 1 / nsym new_factors = cpd_symmetrize(factors, permdict, adjust_scale=False, weights=None) new_lam = scaling_factor * np.hstack( (lam * weight for weight in weights)) return [new_lam, ] + new_factors def unfold(tensor, mode=0): """Returns the mode-`mode` unfolding of `tensor` with modes starting at `0`. Parameters ---------- tensor : ndarray mode : int, default is 0 indexing starts at 0, therefore mode is in ``range(0, tensor.ndim)``. If -1 is passed, then `tensor` is flattened Returns ------- ndarray unfold_tensor of shape ``(tensor.shape[mode], -1)`` """ if mode > -1: return np.moveaxis(tensor, mode, 0).reshape((tensor.shape[mode], -1)) elif mode == -1: return tensor.reshape([1, -1]) else: raise ValueError('Wrong mode: {}'.format(mode)) def fold(unfolded_tensor, mode, shape): """Refolds the mode-`mode` unfolding into a tensor of shape `shape` In other words, refolds the n-mode unfolded tensor into the original tensor of the specified shape. Parameters ---------- unfolded_tensor : ndarray unfolded tensor of shape ``(shape[mode], -1)`` mode : int the mode of the unfolding shape : tuple shape of the original tensor before unfolding Returns ------- ndarray folded_tensor of shape `shape` >>> a = np.random.rand(3, 12) >>> b = fold(a, 0, [3, 3, 4]) >>> b.shape == (3, 3, 4) True >>> np.allclose(fold(a.ravel(), -1, [3, 3, 4]), b) True """ if mode > -1: full_shape = list(shape) mode_dim = full_shape.pop(mode) full_shape.insert(0, mode_dim) return np.moveaxis(unfolded_tensor.reshape(full_shape), 0, mode) elif mode == -1: return unfolded_tensor.reshape(shape) else: raise ValueError('Wrong mode: {}'.format(mode)) def als_contract_cpd(factors_top, tensor_cpd, skip_factor, conjugate=False, tensor_format='cpd'): """ Performs the first part of the ALS step on an (already CPD decomposed) tensor, which is to contract "external" indices of the tensor with all CPD factors of the decomposition except of one, denoted by skip_factor :param factors_top: iterable with CPD decomposition of the top (left) tensor :param tensor_cpd: iterable with CPD decomposition of the bottom (right) tensor :param skip_factor: int skip the factor number skip_factor :param conjugate: bool, default: False conjugate the top (left) factors :param tensor_format: str, default 'cpd' Format of the decomposition ('cpd' or 'ncpd') for both tensor_cpd and factors_top Returns ------- matrix """ if tensor_format == 'cpd': contractor = cpd_contract_free_cpd elif tensor_format == 'ncpd': contractor = ncpd_contract_free_ncpd else: raise ValueError('Unknown tensor_format: {}'.format(tensor_format)) s = contractor(factors_top, tensor_cpd, skip_factor=skip_factor, conjugate=conjugate) return np.dot(tensor_cpd[skip_factor], s.T) def als_contract_dense(factors_top, tensor, skip_factor, conjugate=False, tensor_format='cpd'): """ Performs the first part of the ALS step on a dense tensor, which is to contract "external" indices of the tensor with all nCPD factors of the decomposition except of one, denoted by skip_factor :param factors_top: iterable with CPD decomposition of the top (left) tensor :param tensor: tensor to contract with :param skip_factor: int skip the factor number skip_factor :param conjugate: bool, default: False conjugate the top (left) factors :param tensor_format: str, default 'cpd' Format of the decomposition ('cpd' or 'ncpd') Returns ------- matrix >>> kn = ncpd_initialize([3, 3, 4], 4) >>> k = kn[1:] >>> t = cpd_rebuild(k) >>> s1 = als_contract_dense(kn, t, skip_factor=0, tensor_format='ncpd') >>> s2 = np.dot(t.ravel(), khatrirao(k)) >>> np.allclose(s1, s2) True >>> kn1 = ncpd_renormalize(kn) >>> s3 = als_contract_dense(kn, t, skip_factor=1, tensor_format='ncpd') >>> s4 = als_contract_dense(kn1, t, skip_factor=1, tensor_format='ncpd') >>> np.allclose(s3, s4) True """ if conjugate: factors_top = [factor.conjugate() for factor in factors_top] if tensor_format == 'cpd': mode = skip_factor elif tensor_format == 'ncpd': mode = skip_factor - 1 else: raise ValueError('Unknown tensor_format: {}'.format(tensor_format)) return np.dot(unfold(tensor, mode=mode), khatrirao(factors_top, skip_matrix=skip_factor)) def als_pseudo_inverse(factors_top, factors_bottom, skip_factor, conjugate=False, thresh=1e-10): """ Calculates the pseudo inverse needed in the ALS algorithm. :param factors_top: iterable with CPD decomposition of the top (left) tensor :param factors_bottom: iterable with CPD decomposition of the bottom (right) tensor :param skip_factor: int skip the factor number skip_factor :param conjugate: bool, default: False conjugate the top (left) factors :param thresh: float, default: 1e-10 threshold used to calculate pseudo inverse Returns ------- matrix >>> a = cpd_initialize([3, 3, 4], 3) >>> b = cpd_initialize([3, 3, 4], 4) >>> r = cpd_contract_free_cpd(a, b, skip_factor=2) >>> s = als_pseudo_inverse(a, b, skip_factor=2) >>> np.allclose(np.linalg.pinv(r), s) True >>> a = ncpd_initialize([3, 3, 4], 3) >>> b = ncpd_initialize([3, 3, 4], 4) >>> r = ncpd_contract_free_ncpd(a, b, skip_factor=2) >>> s = als_pseudo_inverse(a, b, skip_factor=2) >>> np.allclose(np.linalg.pinv(r), s) True """ rank1 = factors_top[0].shape[1] rank2 = factors_bottom[0].shape[1] if conjugate: factors_top = [factor.conjugate() for factor in factors_top] pseudo_inverse = np.ones((rank1, rank2)) for ii, (factor1, factor2) in enumerate(zip(factors_top, factors_bottom)): if ii != skip_factor: pseudo_inverse *= np.dot(factor1.T, factor2) return np.linalg.pinv(pseudo_inverse, thresh) def als_step_cpd(factors_top, tensor_cpd, skip_factor, conjugate=False, tensor_format='cpd'): """ Performs one ALS update of the factor skip_factor for a CPD decomposed tensor :param factors_top: iterable with CPD decomposition of the top (left) tensor
31, 16, 876, 532], [706, 99, 684, 613, 93, 504, 584, 599, 513, 638, 645, 334, 448, 148, 802, 805, 255, 759, 176], [262, 671, 68, 389, 36, 561, 104, 285, 968, 896, 20, 912, 215, 161, 564, 476, 828, 815, 331], [74, 29, 857, 758, 382, 578, 150, 745, 684, 558, 384, 439, 118, 599, 779, 378, 816, 996, 206], [83, 545, 645, 856, 457, 736, 454, 105, 282, 587, 180, 436, 188, 477, 503, 377, 696, 918, 592]]), [279, 149, 635, 162, 437, 751, 73, 382, 918, 994, 660, 832, 818, 312, 381, 306, 375, 87, 245, 162, 768, 161, 656, 457, 421, 136, 852, 668, 671, 227, 172, 784, 532, 176, 331, 206, 592, 918, 696, 377, 503, 477, 188, 436, 180, 587, 282, 105, 454, 736, 457, 856, 645, 545, 83, 74, 262, 706, 1000, 839, 976, 249, 412, 424, 191, 812, 442, 450, 138, 164, 966, 54, 599, 406, 599, 951, 888, 231, 723, 287, 692, 617, 275, 719, 445, 361, 954, 583, 951, 694, 186, 491, 906, 448, 764, 854, 87, 636, 171, 777, 694, 876, 759, 815, 996, 816, 378, 779, 599, 118, 439, 384, 558, 684, 745, 150, 578, 382, 758, 857, 29, 671, 99, 249, 795, 808, 695, 431, 5, 591, 736, 235, 129, 179, 463, 522, 282, 502, 739, 889, 323, 635, 486, 477, 231, 502, 471, 524, 566, 189, 91, 998, 772, 108, 727, 234, 200, 139, 333, 258, 422, 160, 16, 255, 828, 476, 564, 161, 215, 912, 20, 896, 968, 285, 104, 561, 36, 389, 68, 684, 558, 83, 967, 992, 428, 610, 992, 968, 249, 723, 498, 961, 850, 665, 898, 53, 331, 507, 69, 164, 99, 435, 418, 104, 868, 468, 665, 849, 34, 778, 943, 390, 517, 338, 78, 31, 805, 802, 148, 448, 334, 645, 638, 513, 599, 584, 504, 93, 613, 794, 698, 898, 731, 777, 711, 189, 113, 838, 662, 106, 803, 338, 361, 631, 370, 805, 156, 583, 102, 486, 989, 81, 693, 530, 225, 706, 746, 165, 656, 827, 140, 2, 359, 516, 938, 452, 18, 399, 564, 668, 891, 557, 78, 15, 880, 796, 987, 205, 26, 665, 9, 227, 23, 222, 199, 111, 556, 897, 4, 544, 701, 408, 324, 833, 738, 954, 669, 671, 514, 886, 858, 89, 382, 452, 584, 231, 843, 971, 952, 162, 680, 861, 927, 55, 260, 9, 140, 495, 478, 891, 410, 406, 115, 83, 6, 809, 421, 611, 696, 182, 563, 567, 931, 899, 784, 936, 699, 733, 830, 760, 301, 919, 720, 1000, 601, 804, 942, 762, 332, 966, 192, 566, 788, 983, 584, 953, 63, 510, 281, 643, 399]) def test_snail_028(self): self.assertEqual(snail([[694, 584, 826, 873, 217, 367, 668, 234, 472, 306, 498, 94, 613, 797], [712, 162, 246, 54, 330, 345, 797, 656, 949, 377, 907, 79, 246, 655], [393, 162, 490, 233, 843, 794, 437, 391, 266, 639, 553, 518, 364, 569], [844, 274, 883, 549, 545, 431, 169, 974, 129, 186, 605, 391, 354, 562], [439, 363, 626, 800, 507, 849, 391, 701, 310, 374, 946, 329, 720, 188], [110, 517, 124, 454, 546, 362, 238, 717, 444, 560, 620, 885, 732, 631], [849, 531, 960, 464, 448, 802, 101, 755, 69, 843, 256, 543, 728, 839], [538, 525, 681, 672, 849, 637, 688, 939, 393, 184, 675, 434, 361, 557], [483, 832, 588, 542, 124, 605, 146, 492, 359, 465, 278, 352, 815, 884], [837, 448, 77, 252, 291, 313, 816, 79, 919, 188, 845, 26, 918, 190], [994, 349, 148, 613, 557, 269, 695, 471, 944, 90, 2, 167, 136, 926], [596, 304, 727, 835, 858, 635, 727, 136, 179, 266, 171, 679, 985, 945], [152, 294, 615, 139, 465, 165, 578, 914, 232, 953, 268, 143, 847, 663], [355, 96, 458, 217, 834, 690, 302, 691, 470, 344, 567, 66, 479, 144]]), [694, 584, 826, 873, 217, 367, 668, 234, 472, 306, 498, 94, 613, 797, 655, 569, 562, 188, 631, 839, 557, 884, 190, 926, 945, 663, 144, 479, 66, 567, 344, 470, 691, 302, 690, 834, 217, 458, 96, 355, 152, 596, 994, 837, 483, 538, 849, 110, 439, 844, 393, 712, 162, 246, 54, 330, 345, 797, 656, 949, 377, 907, 79, 246, 364, 354, 720, 732, 728, 361, 815, 918, 136, 985, 847, 143, 268, 953, 232, 914, 578, 165, 465, 139, 615, 294, 304, 349, 448, 832, 525, 531, 517, 363, 274, 162, 490, 233, 843, 794, 437, 391, 266, 639, 553, 518, 391, 329, 885, 543, 434, 352, 26, 167, 679, 171, 266, 179, 136, 727, 635, 858, 835, 727, 148, 77, 588, 681, 960, 124, 626, 883, 549, 545, 431, 169, 974, 129, 186, 605, 946, 620, 256, 675, 278, 845, 2, 90, 944, 471, 695, 269, 557, 613, 252, 542, 672, 464, 454, 800, 507, 849, 391, 701, 310, 374, 560, 843, 184, 465, 188, 919, 79, 816, 313, 291, 124, 849, 448, 546, 362, 238, 717, 444, 69, 393, 359, 492, 146, 605, 637, 802, 101, 755, 939, 688]) def test_snail_029(self): self.assertEqual(snail([[823, 448, 897, 244, 584, 461, 96], [645, 751, 213, 852, 812, 16, 617], [341, 284, 208, 458, 28, 238, 767], [773, 348, 159, 197, 957, 501, 818], [932, 118, 964, 418, 423, 847, 430], [545, 667, 931, 75, 818, 645, 45], [923, 151, 732, 63, 520, 681, 627]]), [823, 448, 897, 244, 584, 461, 96, 617, 767, 818, 430, 45, 627, 681, 520, 63, 732, 151, 923, 545, 932, 773, 341, 645, 751, 213, 852, 812, 16, 238, 501, 847, 645, 818, 75, 931, 667, 118, 348, 284, 208, 458, 28, 957, 423, 418, 964, 159, 197]) def test_snail_030(self): self.assertEqual(snail([[491, 432, 751, 729, 722, 964, 386, 710, 130, 369, 227, 487, 395, 914, 468, 885, 81, 569, 868, 900], [925, 992, 601, 188, 204, 640, 239, 6, 26, 451, 26, 630, 429, 830, 38, 905, 555, 630, 296, 840], [401, 86, 682, 405, 960, 499, 290, 765, 513, 376, 331, 78, 471, 999, 3, 328, 896, 758, 56, 75], [542, 905, 880, 788, 546, 879, 658, 836, 787, 912, 968, 988, 98, 461, 973, 469, 371, 178, 984, 431], [584, 627, 404, 160, 875, 721, 409, 163, 30, 127, 499, 300, 869, 690, 69, 260, 751, 151, 288, 319], [748, 508, 826, 682, 70, 215, 89, 186, 418, 386, 474, 42, 389, 599, 872, 534, 181, 496, 186, 21], [546, 745, 446, 346, 449, 807, 863, 996, 605, 427, 845, 182, 932, 282, 544, 650, 123, 188, 505, 745], [107, 963, 507, 886, 162, 321, 597, 90, 576, 101, 818, 394, 542, 276, 578, 417, 797, 89, 366, 771], [904, 230, 474, 400, 921, 749, 277, 826, 638, 294, 520, 617, 405, 983, 437, 87, 940, 492, 561, 407], [877, 195, 809, 714, 64, 362, 585, 4, 995, 949, 383, 172, 55, 468, 637, 229, 746, 208, 91, 708], [663, 758, 330, 359, 996, 67, 409, 169, 660, 688, 11, 50, 191, 88, 802, 834, 559, 139, 490, 412], [310, 464, 204, 408, 801, 352, 18, 167, 815, 753, 758, 833, 85, 731, 253, 655, 290, 493, 356, 396], [424, 931, 222, 6, 67, 347, 450, 528, 353, 444, 283, 971, 925, 76, 208, 101, 989, 64, 209, 875], [903, 651, 952, 356, 647, 99, 895, 868, 203, 620, 147, 200, 657, 839, 745, 260, 916, 552, 896, 209], [721, 17, 825, 638, 691, 971, 95, 844, 75, 203, 692, 210, 618, 113, 518, 82, 493, 463, 647, 122], [335, 97, 438, 636, 568, 329, 681, 998, 316, 679, 597, 547, 505, 283, 748, 299, 800, 828, 521, 139], [209, 110, 325, 990, 706, 379, 897, 133, 457, 573, 653, 863, 452, 819, 801, 756, 590, 925, 583, 731],
if ``pn`` is close to 1, this approximation gives incorrect results. Here we calculate this probability by inverting the Binomial problem. Given that (see ``p_multitrial_from_single_trial``) the probability of getting more than one hit in n trials, given the single-trial probability *p*, is .. math :: P (k \geq 1) = 1 - (1 - p)^n, we get the single trial probability from the multi-trial one from .. math :: p = 1 - (1 - P)^{(1/n)} This is also known as Šidák correction. Parameters ---------- pn : float The significance at which we want to reject the null hypothesis after multiple trials n : int The number of trials Returns ------- p1 : float The significance at which we reject the null hypothesis on each single trial. """ logp = _logp_single_trial_from_logp_multitrial( np.log(pn).astype(np.float64), n) if np.any(np.isnan(logp)): if np.any(1 - pn < np.finfo(np.double).resolution * 1000): warnings.warn("Multi-trial probability is very close to 1.") warnings.warn("The problem is ill-conditioned. Returning NaN") return np.exp(logp) def fold_profile_probability(stat, nbin, ntrial=1): """Calculate the probability of a certain folded profile, due to noise. Parameters ---------- stat : float The epoch folding statistics nbin : int The number of bins in the profile Other Parameters ---------------- ntrial : int The number of trials executed to find this profile Returns ------- p : float The probability that the profile has been produced by noise """ p1 = stats.chi2.sf(stat, (nbin - 1)) return p_multitrial_from_single_trial(p1, ntrial) def fold_profile_logprobability(stat, nbin, ntrial=1): """Calculate the probability of a certain folded profile, due to noise. Parameters ---------- stat : float The epoch folding statistics nbin : int The number of bins in the profile Other Parameters ---------------- ntrial : int The number of trials executed to find this profile Returns ------- logp : float The log-probability that the profile has been produced by noise """ p1 = chi2_logp(stat, (nbin - 1)) return _logp_multitrial_from_single_logp(p1, ntrial) def fold_detection_level(nbin, epsilon=0.01, ntrial=1): """Return the detection level for a folded profile. See Leahy et al. (1983). Parameters ---------- nbin : int The number of bins in the profile epsilon : float, default 0.01 The fractional probability that the signal has been produced by noise Other Parameters ---------------- ntrial : int The number of trials executed to find this profile Returns ------- detlev : float The epoch folding statistics corresponding to a probability epsilon * 100 % that the signal has been produced by noise """ epsilon = p_single_trial_from_p_multitrial(epsilon, ntrial) return stats.chi2.isf(epsilon.astype(np.double), nbin - 1) def z2_n_probability(z2, n, ntrial=1, n_summed_spectra=1): """Calculate the probability of a certain folded profile, due to noise. Parameters ---------- z2 : float A Z^2_n statistics value n : int, default 2 The ``n`` in $Z^2_n$ (number of harmonics, including the fundamental) Other Parameters ---------------- ntrial : int The number of trials executed to find this profile n_summed_spectra : int Number of Z_2^n periodograms that were averaged to obtain z2 Returns ------- p : float The probability that the Z^2_n value has been produced by noise """ epsilon_1 = stats.chi2.sf(z2 * n_summed_spectra, 2 * n * n_summed_spectra) epsilon = p_multitrial_from_single_trial(epsilon_1, ntrial) return epsilon def z2_n_logprobability(z2, n, ntrial=1, n_summed_spectra=1): """Calculate the probability of a certain folded profile, due to noise. Parameters ---------- z2 : float A Z^2_n statistics value n : int, default 2 The ``n`` in $Z^2_n$ (number of harmonics, including the fundamental) Other Parameters ---------------- ntrial : int The number of trials executed to find this profile n_summed_spectra : int Number of Z_2^n periodograms that were averaged to obtain z2 Returns ------- p : float The probability that the Z^2_n value has been produced by noise """ epsilon_1 = chi2_logp(np.double(z2 * n_summed_spectra), 2 * n * n_summed_spectra) epsilon = _logp_multitrial_from_single_logp(epsilon_1, ntrial) return epsilon def z2_n_detection_level(n=2, epsilon=0.01, ntrial=1, n_summed_spectra=1): """Return the detection level for the Z^2_n statistics. See Buccheri et al. (1983), Bendat and Piersol (1971). Parameters ---------- n : int, default 2 The ``n`` in $Z^2_n$ (number of harmonics, including the fundamental) epsilon : float, default 0.01 The fractional probability that the signal has been produced by noise Other Parameters ---------------- ntrial : int The number of trials executed to find this profile n_summed_spectra : int Number of Z_2^n periodograms that are being averaged Returns ------- detlev : float The epoch folding statistics corresponding to a probability epsilon * 100 % that the signal has been produced by noise """ epsilon = p_single_trial_from_p_multitrial(epsilon, ntrial) retlev = stats.chi2.isf(epsilon.astype(np.double), 2 * n_summed_spectra * n) / (n_summed_spectra) return retlev def pds_probability(level, ntrial=1, n_summed_spectra=1, n_rebin=1): r"""Give the probability of a given power level in PDS. Return the probability of a certain power level in a Power Density Spectrum of nbins bins, normalized a la Leahy (1983), based on the 2-dof :math:`{\chi}^2` statistics, corrected for rebinning (n_rebin) and multiple PDS averaging (n_summed_spectra) Parameters ---------- level : float or array of floats The power level for which we are calculating the probability Other Parameters ---------------- ntrial : int The number of *independent* trials (the independent bins of the PDS) n_summed_spectra : int The number of power density spectra that have been averaged to obtain this power level n_rebin : int The number of power density bins that have been averaged to obtain this power level Returns ------- epsilon : float The probability value(s) """ epsilon_1 = stats.chi2.sf(level * n_summed_spectra * n_rebin, 2 * n_summed_spectra * n_rebin) epsilon = p_multitrial_from_single_trial(epsilon_1, ntrial) return epsilon def pds_logprobability(level, ntrial=1, n_summed_spectra=1, n_rebin=1): r"""Give the probability of a given power level in PDS. Return the probability of a certain power level in a Power Density Spectrum of nbins bins, normalized a la Leahy (1983), based on the 2-dof :math:`{\chi}^2` statistics, corrected for rebinning (n_rebin) and multiple PDS averaging (n_summed_spectra) Parameters ---------- level : float or array of floats The power level for which we are calculating the probability Other Parameters ---------------- ntrial : int The number of *independent* trials (the independent bins of the PDS) n_summed_spectra : int The number of power density spectra that have been averaged to obtain this power level n_rebin : int The number of power density bins that have been averaged to obtain this power level Returns ------- epsilon : float The probability value(s) """ epsilon_1 = chi2_logp(level * n_summed_spectra * n_rebin, 2 * n_summed_spectra * n_rebin) epsilon = _logp_multitrial_from_single_logp(epsilon_1, ntrial) return epsilon def pds_detection_level(epsilon=0.01, ntrial=1, n_summed_spectra=1, n_rebin=1): r"""Detection level for a PDS. Return the detection level (with probability 1 - epsilon) for a Power Density Spectrum of nbins bins, normalized a la Leahy (1983), based on the 2-dof :math:`{\chi}^2` statistics, corrected for rebinning (n_rebin) and multiple PDS averaging (n_summed_spectra) Parameters ---------- epsilon : float The single-trial probability value(s) Other Parameters ---------------- ntrial : int The number of *independent* trials (the independent bins of the PDS) n_summed_spectra : int The number of power density spectra that have been averaged to obtain this power level n_rebin : int The number of power density bins that have been averaged to obtain this power level Examples -------- >>> np.isclose(pds_detection_level(0.1), 4.6, atol=0.1) True >>> np.allclose(pds_detection_level(0.1, n_rebin=[1]), [4.6], atol=0.1) True """ epsilon = p_single_trial_from_p_multitrial(epsilon, ntrial) epsilon = epsilon.astype(np.double) if isinstance(n_rebin, Iterable): retlev = [stats.chi2.isf(epsilon, 2 * n_summed_spectra * r) / (n_summed_spectra * r) for r in n_rebin] retlev = np.array(retlev) else: r = n_rebin retlev = stats.chi2.isf(epsilon, 2 * n_summed_spectra * r) \ / (n_summed_spectra * r) return retlev def classical_pvalue(power, nspec): """ Note: This is stingray's original implementation of the probability distribution for the power spectrum. It is superseded by the implementation in pds_probability for practical purposes, but remains here for backwards compatibility and for its educational value as a clear, explicit implementation of the correct probability distribution. Compute the probability of detecting the current power under the assumption that there is no periodic oscillation in the data. This computes the single-trial p-value that the power was observed under the null hypothesis that there is no signal in the data. Important:
ChanX = ['_']*self.chanx_len _ = [self._set_bit(ChanX, e.attrib['index']) for e in chan.findall('./driver_node[@type="CHANX"]')] ChanY = ['_']*self.chany_len _ = [self._set_bit(ChanY, e.attrib['index']) for e in chan.findall('./driver_node[@type="CHANY"]')] flags = ChanX if side in ['top', 'bottom'] else ChanY arr = np.vstack([arr, flags]) if show: self.render_ipin(arr) return arr def report_incoming_channels(self, side): """ This prints incoming channels in the given switch box from the given direction ``index, Mux, ChanX, Chany, OPIN_L, OPIN_R, OPIN_T, OPIN_B`` """ format_str = "{:^6s} {:^6s} {:^45s} {:^45s} {:^10s} {:^10s} {:^10s} {:^10s}" print("= = "*40) print(format_str.format("index", "Mux", "ChanX", "Chany", "OPIN_L", "OPIN_R", "OPIN_T", "OPIN_B")) print("= = "*40) items = {"left": self.chanx_l, "right": self.chanx_r, "top": self.chany_t, "bottom": self.chany_b}[side] for chan in items: ChanX = ['_']*self.chanx_len _ = [self._set_vbit(ChanX, e.attrib['index']) for e in chan.findall('./driver_node[@type="CHANX"]')] ChanY = ['_']*self.chany_len ChanY = ['_']*40 _ = [self._set_hbit(ChanY, e.attrib['index']) for e in chan.findall('./driver_node[@type="CHANY"]')] # OPIN_L = ['_']*self.opin_l_len # _ = [self._set_bit(OPIN_L, e.attrib['index']) # for e in chan.findall('./driver_node[@type="OPIN"][@side="left"]')] # OPIN_R = ['_']*self.opin_l_len # _ = [self._set_bit(OPIN_R, e.attrib['index']) # for e in chan.findall('./driver_node[@type="OPIN"][@side="right"]')] # OPIN_T = ['_']*self.opin_l_len # _ = [self._set_bit(OPIN_T, e.attrib['index']) # for e in chan.findall('./driver_node[@type="OPIN"][@side="top"]')] # OPIN_B = ['_']*self.opin_l_len # _ = [self._set_bit(OPIN_B, e.attrib['index']) # for e in chan.findall('./driver_node[@type="OPIN"][@side="bottom"]')] OPIN_L = ["_", "_"] OPIN_R = ["_", "_"] OPIN_T = ["_", "_"] OPIN_B = ["_", "_"] print(format_str.format( chan.attrib["index"], chan.attrib["mux_size"], ''.join(ChanX), ''.join(ChanY), ''.join(OPIN_L), ''.join(OPIN_R), ''.join(OPIN_T), ''.join(OPIN_B))) def report_outgoing_channels(self, side): """ This prints the channel information of given switch box for a given direction channels """ format_str = "{:^6s} {:^6s} {:^45s} {:^45s} {:^10s} {:^10s} {:^10s} {:^10s}" print("= = "*40) print(format_str.format("index", "Mux", "ChanX", "Chany", "OPIN_L", "OPIN_R", "OPIN_T", "OPIN_B")) print("= = "*40) items = {"left": self.chanx_l, "right": self.chanx_r, "top": self.chany_t, "bottom": self.chany_b}[side] for chan in items: ChanX = ['_']*self.chanx_len _ = [self._set_vbit(ChanX, e.attrib['index']) for e in chan.findall('./driver_node[@type="CHANX"]')] ChanY = ['_']*self.chany_len _ = [self._set_hbit(ChanY, e.attrib['index']) for e in chan.findall('./driver_node[@type="CHANY"]')] OPIN_L = ['_']*self.opin_l_len _ = [self._set_bit(OPIN_L, e.attrib['index']) for e in chan.findall('./driver_node[@type="OPIN"][@side="left"]')] OPIN_R = ['_']*self.opin_l_len _ = [self._set_bit(OPIN_R, e.attrib['index']) for e in chan.findall('./driver_node[@type="OPIN"][@side="right"]')] OPIN_T = ['_']*self.opin_l_len _ = [self._set_bit(OPIN_T, e.attrib['index']) for e in chan.findall('./driver_node[@type="OPIN"][@side="top"]')] OPIN_B = ['_']*self.opin_l_len _ = [self._set_bit(OPIN_B, e.attrib['index']) for e in chan.findall('./driver_node[@type="OPIN"][@side="bottom"]')] print(format_str.format( chan.attrib["index"], chan.attrib["mux_size"], ''.join(ChanX), ''.join(ChanY), ''.join(OPIN_L), ''.join(OPIN_R), ''.join(OPIN_T), ''.join(OPIN_B))) def extract_info(self): """ Extracts insformation from provided general switch box file """ root = self.root self.chanx = sorted(root.findall("CHANX"), key=lambda x: int(x.attrib['index'])) self.chanx_len = len(self.chanx) self.chanx_l = root.findall("CHANX[@side='left']") self.chanx_l_len = len(self.chanx_l) self.chanx_r = root.findall("CHANX[@side='right']") self.chanx_r_len = len(self.chanx_r) self.chanx_l_out_map = [0]*self.chanx_len self.chanx_r_out_map = [0]*self.chanx_len self.chanx_drivers = self.chanx_l + self.chanx_r self.chanx_drivers = root.findall('.//CHANX/driver_node[@type="CHANX"]') + \ root.findall('.//CHANY/driver_node[@type="CHANX"]') self.chany_t = root.findall("CHANY[@side='top']") self.chany_t_len = len(self.chany_t) self.chany_b = root.findall("CHANY[@side='bottom']") self.chany_b_len = len(self.chany_b) self.chany = sorted(root.findall("CHANY"), key=lambda x: int(x.attrib['index'])) self.chany_len = self.chany_t_len + self.chany_b_len self.chany_t_out_map = [0]*self.chany_len self.chany_b_out_map = [0]*self.chany_len self.chany_drivers = self.chany_t + self.chany_b self.chany_drivers = root.findall('.//CHANY/driver_node[@type="CHANY"]') + \ root.findall('.//CHANX/driver_node[@type="CHANY"]') self.ipin_l = root.findall("IPIN[@side='left']") self.ipin_l_len = self._get_max_index(self.ipin_l) self.ipin_r = root.findall("IPIN[@side='right']") self.ipin_r_len = self._get_max_index(self.ipin_r) self.ipin_t = root.findall("IPIN[@side='top']") self.ipin_t_len = self._get_max_index(self.ipin_t) self.ipin_b = root.findall("IPIN[@side='bottom']") self.ipin_b_len = self._get_max_index(self.ipin_b) # Collect Feedthrough self.ft_left = [chan for chan in self.chanx_l if len( chan.getchildren()) == 1] self.ft_left_len = len(set((e.attrib["index"] for e in self.ft_left))) self.ft_right = [chan for chan in self.chanx_r if len( chan.getchildren()) == 1] self.ft_right_len = len( set((e.attrib["index"] for e in self.ft_right))) self.ft_top = [chan for chan in self.chany_t if len( chan.getchildren()) == 1] self.ft_top_len = len(set((e.attrib["index"] for e in self.ft_top))) self.ft_bottom = [chan for chan in self.chany_b if len( chan.getchildren()) == 1] self.ft_bottom_len = len( set((e.attrib["index"] for e in self.ft_bottom))) # Left side OPins self.opin_l = self._get_driver_node(root, "*", "OPIN", "left") self.opin_l_len = self._get_max_index(self.opin_l) self.opin_l_t = self._filter_attrib(self.opin_l, "grid_side", "top") self.opin_l_t_len = self._get_max_index(self.opin_l_t) self.opin_l_b = self._filter_attrib(self.opin_l, "grid_side", "bottom") self.opin_l_b_len = self._get_max_index(self.opin_l_b) # right side OPins self.opin_r = self._get_driver_node(root, "*", "OPIN", "right") self.opin_r_len = self._get_max_index(self.opin_r) self.opin_r_t = self._filter_attrib(self.opin_r, "grid_side", "top") self.opin_r_t_len = self._get_max_index(self.opin_r_t) self.opin_r_b = self._filter_attrib(self.opin_r, "grid_side", "bottom") self.opin_r_b_len = self._get_max_index(self.opin_r_b) # top side OPins self.opin_t = self._get_driver_node(root, "*", "OPIN", "top") self.opin_t_len = self._get_max_index(self.opin_t) self.opin_t_l = self._filter_attrib(self.opin_t, "grid_side", "left") self.opin_t_l_len = self._get_max_index(self.opin_t_l) self.opin_t_r = self._filter_attrib(self.opin_t, "grid_side", "right") self.opin_t_r_len = self._get_max_index(self.opin_t_r) # Bottom side OPins self.opin_b = self._get_driver_node(root, "*", "OPIN", "bottom") self.opin_b_len = self._get_max_index(self.opin_b) self.opin_b_l = self._filter_attrib(self.opin_b, "grid_side", "left") self.opin_b_l_len = self._get_max_index(self.opin_b_l) self.opin_b_r = self._filter_attrib(self.opin_b, "grid_side", "right") self.opin_b_r_len = self._get_max_index(self.opin_b_r) def get_stats(self, print_header=False, noprint=False): """ Prints switch box statistics """ if print_header: self._print_stat_header() msg = ("%15s %8s %8s %8s %8s %8s %8s %8s %8s %15s %15s %15s %15s" % (self.name, self.chanx_l_len, self.chanx_r_len, self.chany_t_len, self.chany_b_len, self.ipin_l_len, self.ipin_r_len, self.ipin_t_len, self.ipin_b_len, f"{self.opin_l_len:3} [{self.opin_l_t_len:3},{self.opin_l_b_len:3}]", f"{self.opin_r_len:3} [{self.opin_r_t_len:3},{self.opin_r_b_len:3}]", f"{self.opin_t_len:3} [{self.opin_t_l_len:3},{self.opin_t_r_len:3}]", f"{self.opin_b_len:3} [{self.opin_b_l_len:3},{self.opin_b_r_len:3}]")) if not noprint: print(msg) return msg def save(self, filename=None, viewbox=None): """ Save SVG file""" self._add_stylehseet() filename = filename or "_"+self.name+".svg" margin = 200 width, height = self.x_max_4-self.x_min_4, self.y_max_4-self.y_min_4 viewbox = viewbox or (self.x_min_4-margin, -1*(self.y_max_4+margin), width+2*margin, height+2*margin) self.dwg.viewbox(*viewbox) logger.debug(f"Saving svg {filename}") self.dwg.saveas(filename, pretty=True) def _add_left_connection_box(self, pinmap=None, channel_map=None): self.chanx_l_out_map = [] left_drivers = [e.attrib["index"] for e in self.chanx_l] for index in range(self.chanx_len): offset = self.x_min_0+self.spacing + pinmap(index)*self.scale self.chanx_l_out_map.append(offset) marker = self.marker_blue start = (self.y_min_4, offset) end = (self.y_min_3, offset) class_ = "lr" if str(index) in left_drivers: marker = self.marker_red start, end = end, start class_ = "rl" self.dwgShapes.add(shapes.Line(start=start, end=end, marker_end=marker.get_funciri(), class_=f"channel {class_}_chan")) self.dwgText.add(Text(index, transform="scale(1,-1)", class_=f"{class_}_text", insert=(start[0], -1*start[-1]))) self.dwgText.add(Text(index, transform="scale(1,-1)", class_=f"{class_}_text", insert=(end[0], -1*end[-1]))) self._add_ipins(side="left", channel_map=channel_map) def _add_top_connection_box(self, pinmap=None, channel_map=None): self.chany_t_out_map = [] left_drivers = [e.attrib["index"] for e in self.chany_t] for index in range(self.chany_len): offset = self.y_min_0+self.spacing + pinmap(index)*self.scale self.chany_t_out_map.append(offset) marker = self.marker_blue start = (offset, self.x_max_4) end = (offset, self.x_max_3) class_ = "lr" if str(index) in left_drivers: marker = self.marker_red start, end = end, start class_ = "rl" self.dwgShapes.add(shapes.Line(start=start, end=end, marker_end=marker.get_funciri(), class_=f"channel {class_}_chan")) self.dwgText.add(Text(index, transform="scale(1,-1)", class_=f"{class_}_text", insert=(start[0], -1*start[-1]))) self.dwgText.add(Text(index, transform="scale(1,-1)", class_=f"{class_}_text", insert=(end[0], -1*end[-1]))) self._add_ipins(side="top", channel_map=channel_map) def render_connection_box(self, side, pinmap=None, channel_map=None, filename=None): """ Render connections box in SVG format """ self._setup_svg() self._add_origin_marker() pinmap = pinmap or (lambda x: x) if side == "top": self._add_top_connection_box(pinmap=pinmap, channel_map=channel_map) else: self._add_left_connection_box(pinmap=pinmap, channel_map=channel_map) if filename: margin = 200 width = ( self.x_max_4-self.x_min_4) if side == 'top' else (self.x_max_4-self.x_max_3) height = ( self.y_max_4-self.y_min_4) if side == 'left' else (self.y_max_4-self.y_max_3) llx = self.x_min_4-margin lly = self.y_max_4+margin viewbox = (llx, -1*lly, width+(2*margin), height+(2*margin)) self.save(filename, viewbox=viewbox) def render_switch_pattern(self): """ Create SVG object rendering all the switchs from switch box """ self._setup_svg() self.add_partitions() self._add_origin_marker() self._add_channels() self._add_opins() # ==================================== # Create channels # ==================================== self._add_left_channels() self._add_right_channels() self._add_top_channels() self._add_bottom_channels() # ==================================== # Added Input Pins # ==================================== self._add_ipins(side='left') self._add_ipins(side='top') def _add_left_channels(self): """ Creates horizontal channels """ term_indx = 0 for ele in self.chanx_l: chan = int(ele.attrib["index"]) # Right to left channels if not chan in [int(e.attrib["index"]) for e in self.ft_left]: # Add connecting Vertical line x_line = self.x_min_1 - term_indx*self.scale - self.spacing y_line = self.y_min_0 - term_indx*self.scale - self.spacing self.dwgShapes.add(shapes.Line(start=(x_line, self.y_max_0), end=(x_line, y_line), marker_start=self.marker_red.get_funciri(), marker_end=self.marker_terminate.get_funciri(), class_="channel rl_chan")) # Add connecting horizontal line self.dwgShapes.add(shapes.Line(start=(self.x_max_0, y_line), end=(self.x_min_4, y_line), marker_start=self.marker_red.get_funciri(), class_="channel rl_chan")) self._add_short_at(x_line, y_line) # Add Text self.dwgText.add(Text(ele.attrib["index"], transform="scale(1,-1)", class_="rl_text", insert=(self.x_min_4, -1*y_line))) self.chanx_l_out_map[int(ele.attrib["index"])] = y_line # Add Switches for switch in ele.getchildren(): sw_type = switch.attrib["type"] side = switch.attrib["side"] index = int(switch.attrib["index"]) grid_side = switch.attrib.get("grid_side", "") offset = index*self.scale if sw_type == "CHANX": self._add_switch_at( x_line, self.x_min_0 + offset + self.spacing) elif sw_type == "CHANY": self._add_switch_at( self.y_min_0 + offset + self.spacing, y_line) elif sw_type == "OPIN": self._add_switch_at( self.x_min_2 - offset - self.spacing, y_line) term_indx += 1 def _add_right_channels(self): """ Creates horizontal channels """ term_indx = 0 offset_0 = self.y_max_0-self.spacing-self.scale for ele in self.chanx_r: chan = int(ele.attrib["index"]) offset = offset_0 + int(ele.attrib["index"])*self.scale # left to right channels if not chan in [int(e.attrib["index"]) for e in self.ft_right]: # Add connecting Vertical line x_line = self.x_max_1 + term_indx*self.scale + self.spacing y_line = self.y_max_0 + term_indx*self.scale + self.spacing self.dwgShapes.add(shapes.Line(start=(x_line, self.y_min_0), end=(x_line, y_line), marker_start=self.marker_blue.get_funciri(), marker_end=self.marker_terminate.get_funciri(), class_="channel lr_chan")) # Add connecting horizontal line self.dwgShapes.add(shapes.Line(start=(self.x_min_0, y_line), end=(self.x_max_4, y_line), marker_start=self.marker_terminate.get_funciri(), marker_end=self.marker_blue.get_funciri(), class_="channel lr_chan")) self._add_short_at(x_line, y_line) # Add Text self.dwgText.add(Text(ele.attrib["index"], transform="scale(1,-1)", class_="lr_text", insert=(self.x_max_4, -1*y_line))) self.chanx_r_out_map[int(ele.attrib["index"])] = x_line # Add Switches for switch in ele.getchildren(): sw_type = switch.attrib["type"] side = switch.attrib["side"] index = int(switch.attrib["index"]) grid_side = switch.attrib.get("grid_side", "") offset = index*self.scale if sw_type == "CHANX": self._add_switch_at( x_line, self.x_min_0 + offset + self.spacing) elif sw_type == "CHANY": self._add_switch_at( self.y_min_0 + offset + self.spacing, y_line) elif sw_type == "OPIN": self._add_switch_at(
3: print("This projection does not calculate that axis, using qr/(sqrt(h^2+k^2)) instead") xRoiData=self.ctrROI.getArrayRegion(self.grid_qx, self.imgLeft) #axis = qr elif self.p.param('Profile tools', 'Axis of interest').value() == 3: #h-k if projection == 0: xRoiData=self.ctrROI.getArrayRegion(self.grid_qr, self.imgLeft) #h-l elif projection == 1: print("This axis could be misleading showing h/qx instead") xRoiData=self.ctrROI.getArrayRegion(self.grid_qx, self.imgLeft) #k-l elif projection == 2: print("This axis could be misleading showing k/qk instead") xRoiData=self.ctrROI.getArrayRegion(self.grid_qx, self.imgLeft) #h^2 + k ^2 elif projection == 3: xRoiData=self.ctrROI.getArrayRegion(self.grid_qx, self.imgLeft) #axis = L elif self.p.param('Profile tools', 'Axis of interest').value() == 4: #h-k if projection == 0: print("This projection does not calculate that axis, using qx/h instead") xRoiData=self.ctrROI.getArrayRegion(self.grid_qx, self.imgLeft) #h-l elif projection == 1: xRoiData=self.ctrROI.getArrayRegion(self.grid_qy, self.imgLeft) #k-l elif projection == 2: xRoiData=self.ctrROI.getArrayRegion(self.grid_qy, self.imgLeft) #h^2 + k ^2 elif projection == 3: xRoiData=self.ctrROI.getArrayRegion(self.grid_qy, self.imgLeft) ROIData=self.ctrROI.getArrayRegion(self.showData, self.imgLeft) xdata=np.mean(xRoiData,axis=1) self.roixdata = xdata self.roiydata = np.mean(ROIData,axis=1) self.CTRCurve.setData(x=xdata,y=self.roiydata)#-np.min(ROIData[0],axis=1)) #pixel view else: ROIData=self.ctrROI.getArrayRegion(self.showData, self.imgLeft) self.roiydata = np.mean(ROIData,axis=1) self.roixdata = range(0,len(np.mean(ROIData,axis=1))) self.CTRCurve.setData(self.roiydata) def detectorMode(self): """Go back to pixel coordiante mode""" self.ImgState=0 self.select_l_slice = True self.angleRegion.setMovable(True) #self.hist.setImageItem(self.imgLeft) #self.imgLeft.show() self.imgLeft.resetTransform() self.updateRegion() self.ROICalc self.p3.getAxis('bottom').setScale(None) self.p3.getAxis('left').setScale(None) self.p3.getAxis('bottom').setGrid(0) self.p3.getAxis('left').setGrid(0) self.p3.getAxis('bottom').setLabel("PIXELS") self.p3.getAxis('left').setLabel("PIXELS") self.statusLabel.setText('Viewing in pixel coordinates') self.p3.getViewBox().setAspectLocked(False) def data_format_changed(self): """We update the interface if a beamline preset is chosen""" #ID31 Beamline if self.experiment.param('Beamline Preset').value() == 4: self.p.param('File', 'Select Dark Image').hide() self.p.param('File', 'Select Background Dark Image').hide() self.p.param('File', 'Select Data Images').setName("Select HDF5 file") self.p.param('File', 'Select Log File').hide() self.p.param('Data Processing', 'Use 2nd detector').setValue(False) self.p.param('Data Processing', 'Use 2nd detector').hide() #P07 Beamline if self.experiment.param('Beamline Preset').value() == 3: self.p.param('File', 'Select Log File').hide() self.p.param('Experiment', 'Y Pixels').setValue(2048) self.p.param('Experiment', 'X Pixels').setValue(2048) self.p.param('Experiment', 'Pixel Size').setValue(200e-6) self.p.param('Experiment', 'Energy').setValue(73700) self.p.param('Experiment', 'Sample-Detector Dist.').setValue(1.6) #There was no 2nd detector and dark images subtracted normaly self.p.param('Data Processing', 'Use 2nd detector').hide() self.p.param('File', 'Select Dark Image').hide() self.p.param('File', 'Select Background Dark Image').hide() #P21.2 Beamline if self.experiment.param('Beamline Preset').value() == 1 or self.experiment.param('Beamline Preset').value() == 2: self.p.param('Experiment', 'Y Pixels').setValue(2880) self.p.param('Experiment', 'X Pixels').setValue(2880) self.p.param('Experiment', 'Pixel Size').setValue(150e-6) if self.experiment.param('Beamline Preset').value() == 5: self.experiment.param('Manual Start Angle').show() self.experiment.param('Manual End Angle').show() #self.p.param('File', 'Select Dark Image').show() #self.p.param('Data Processing', 'Use 2nd detector').show() #self.p.param('File', 'Select Background Dark Image').show() self.p.param('File', 'Select Log File').hide() else: self.experiment.param('Manual Start Angle').hide() self.experiment.param('Manual End Angle').hide() def sample_preset(self): """Apply presets for sample if changed, eventually this should be read from a text file""" #Au 111 (surface units) if self.p.param('Crystal','Preset').value() == 1: self.p.param('Crystal', 'a₁').setValue(2.885) self.p.param('Crystal', 'a₂').setValue(2.885) self.p.param('Crystal', 'a₃').setValue(7.064) self.p.param('Crystal', 'α₁').setValue(90) self.p.param('Crystal', 'α₂').setValue(90) self.p.param('Crystal', 'α₃').setValue(120) #Au 100 (surface units) if self.p.param('Crystal','Preset').value() == 2: self.p.param('Crystal', 'a₁').setValue(2.88) self.p.param('Crystal', 'a₂').setValue(2.88) self.p.param('Crystal', 'a₃').setValue(4.08) self.p.param('Crystal', 'α₁').setValue(90) self.p.param('Crystal', 'α₂').setValue(90) self.p.param('Crystal', 'α₃').setValue(90) #TiO2 if self.p.param('Crystal','Preset').value() == 3: self.p.param('Crystal', 'a₁').setValue(6.496) self.p.param('Crystal', 'a₂').setValue(2.959) self.p.param('Crystal', 'a₃').setValue(6.496) self.p.param('Crystal', 'α₁').setValue(90) self.p.param('Crystal', 'α₂').setValue(90) self.p.param('Crystal', 'α₃').setValue(90) def saveprofile(self, paramHandle, filename=None): if type(paramHandle) == str: filename = paramHandle if not filename: options = QFileDialog.Options() filename, _ = QFileDialog.getSaveFileName(self,"Chose file name", "","csv (*.csv);;All Files (*)", options=options) data = np.asarray([self.roixdata,self.roiydata]) np.savetxt(filename,np.transpose(data),fmt='%10.5f', delimiter=',',newline='\n') def saverocks(self, paramHandle, folderName=None): if type(paramHandle) == str: folderName = paramHandle if not folderName: folderName = str(QFileDialog.getExistingDirectory(self, "Select Directory")) #get the image indices to use if self.image_stack.angle_mode == False: from_image=int(np.round(self.angleRegion.getRegion()[0])) to_image=int(np.round(self.angleRegion.getRegion()[1])) else: from_image = int(np.floor(self.image_stack.angle2image(self.angleRegion.getRegion()[0]))) to_image = int(np.ceil(self.image_stack.angle2image(self.angleRegion.getRegion()[1]))) #images in our angular range tmp = self.image_stack.image_data[from_image:to_image+1,:,:] profiles = [] angles = [] for i,image in enumerate(tmp): ROIData=self.ctrROI.getArrayRegion(image, self.imgLeft) profiles.append(np.sum(ROIData,axis=1)) angles.append(self.image_stack.start_angle+i*self.image_stack.get_step()) profiles = np.transpose(np.asarray(profiles)) res=int(self.p.param('Data Processing', 'hkl Resolution').value()) binning=int(self.p.param('Data Processing', 'Binning').value()) second_det = self.p.param('Data Processing', 'Use 2nd detector').value() self.pixMaps=self.experiment.dector_frame_to_hkl_frame(self,binning,second_det,0) print(np.min(self.pixMaps[2]),np.max(self.pixMaps[2])) xRoiData=self.ctrROI.getArrayRegion(np.rot90(self.pixMaps[2],3), self.imgLeft) qz = np.mean(xRoiData,axis=1) np.savetxt(folderName+'/axis.csv',qz, delimiter=',',newline='\n') for i in range(len(profiles)): fileName = str(folderName)+'/'+str(i)+'.csv' tmp2 = profiles[i] data = np.transpose([angles,tmp2]) np.savetxt(fileName,data, delimiter=',',newline='\n') print(i, "rocking scans saved in folder: ", folderName) def saveroi(self, paramHandle, filename=None): if type(paramHandle) == str: filename = paramHandle if not filename: options = QFileDialog.Options() filename, _ = QFileDialog.getSaveFileName(self,"Chose file name", "","roi (*.roi);;All Files (*)", options=options) if filename: state = [self.ctrROI.saveState(),self.angleRegion.getRegion()] pickle.dump(state, open(filename, "wb" )) def loadroi(self, paramHandle, filename=None): if type(paramHandle) == str: filename = paramHandle if not filename: options = QFileDialog.Options() filename, _ = QFileDialog.getOpenFileName(self,"Select a ROI file too use", "","ROI File (*.roi);;All Files (*)", options=options) if filename: state = pickle.load(open(filename, "rb" )) self.ctrROI.setState(state[0]) #self.angleRegion.setRegion(state[1]) def load(self): if self.image_stack.images_read: self.image_stack.load_aux_files() self.image_stack.load_images() if self.image_stack.angle_mode: self.angleRegion.setRegion((self.image_stack.start_angle,self.image_stack.end_angle)) self.angleRegion.setBounds((self.image_stack.start_angle,self.image_stack.end_angle)) self.showData = self.image_stack.get_image(self.image_stack.start_angle,self.image_stack.end_angle) self.imgLeft.setImage(self.showData) self.imgLeft.show() self.hist.setImageItem(self.imgLeft) self.p2.setLabel('bottom',text='Angle (degrees)') else: self.angleRegion.setRegion((1,self.image_stack.number_of_images)) self.angleRegion.setBounds((1,self.image_stack.number_of_images)) self.showData = self.image_stack.get_image(0,self.image_stack.number_of_images) self.imgLeft.setImage(self.showData) self.imgLeft.show() self.hist.setImageItem(self.imgLeft) else: self.statusLabel.setText('Background that will be subtracted. Select images to load') self.image_stack.load_aux_files() self.imgLeft.setImage(self.image_stack.subtract_image) self.imgLeft.show() self.hist.setImageItem(self.imgLeft) self.ImgState = 5 def makeFigure(self, paramHandle, filename=None): projection = self.p.param('View Mode', 'Select Projection').value() if type(paramHandle) == str: filename = paramHandle """This function reloads plotting.py and runs the correct plotting function to generate a figure with matplotlib""" if not filename: options = QFileDialog.Options() filename, _ = QFileDialog.getSaveFileName(self,"Chose file name", "","png (*.png);;All Files (*)", options=options) if filename: importlib.reload(plotting) if self.ImgState == 2: cmin,cmax=self.imgLeft.getLevels() self.grid_qy if projection == 0: plotting.plot_projection_hk(self, self.grid_qx,self.grid_qy,self.showData,cmin,cmax, filename) print("In-plane qx/qy map saved as: ",filename) if projection == 1: plotting.plot_projection_hl(self, self.grid_qx,self.grid_qy,self.showData,cmin,cmax, filename) print("qx/qz map saved as: ",filename) if projection == 2: plotting.plot_projection_kl(self, self.grid_qx,self.grid_qy,self.showData,cmin,cmax, filename) print("qy/qz map saved as: ",filename) if projection == 3: plotting.plot_projection_qrl(self, self.grid_qx,self.grid_qy,self.showData,cmin,cmax, filename) print("qr/qz map saved as: ",filename) if self.ImgState == 1: cmin,cmax=self.imgLeft.getLevels() plotting.plot_transformed_detector(self.grid_hk,self.grid_l,self.showData,cmin,cmax, filename) print("Transfomred Detector View saved as: ",filename) if self.ImgState == 0: cmin,cmax=self.imgLeft.getLevels() plotting.plot_out_of_plane(self.showData,cmin,cmax, filename) print("Image saved as: ",filename) def runScript(self): importlib.reload(script) script.script_main(self) def addMask(self): #when the button is pressed add a new ROI i = len(self.mask_list) self.mask_list.append(pg.RectROI([0, 5], [1, 1], pen=(i,9))) self.mask_list[i].show() self.p3.addItem(self.mask_list[i]) def convertMasks(self): #this converts the ROIs into bounds for the binning algorithm for mask in self.mask_list: xmin = mask.pos()[0] xmax = mask.size()[0] + xmin ymin = mask.pos()[1] ymax = mask.size()[1] + ymin self.binBounds.append([xmin,xmax,ymin,ymax]) def clearMasks(self): self.binBounds = [] for mask in self.mask_list: self.p3.removeItem(mask) self.mask_list = [] def hideMasks(self): for mask in self.mask_list: self.p3.removeItem(mask) def showMasks(self): for mask in self.mask_list: self.p3.addItem(mask) def saveMasks(self, paramHandle, filename=None): if type(paramHandle) == str: filename = paramHandle if not filename: options = QFileDialog.Options() filename, _ = QFileDialog.getSaveFileName(self,"Chose file name", "","msk (*.msk);;All Files (*)", options=options) if filename: mask_states = [] for mask in self.mask_list: mask_states.append(mask.saveState()) pickle.dump(mask_states, open( filename, "wb" )) def loadMasks(self, paramHandle, filename=None): if type(paramHandle) == str: filename = paramHandle if not filename: options = QFileDialog.Options() filename, _ = QFileDialog.getOpenFileName(self,"Select a Mask file too use", "","Mask File (*.msk);;All Files (*)", options=options) if filename: mask_states = pickle.load(open(filename, "rb" )) for mask_state in mask_states: self.mask_list.append(pg.RectROI([0, 5], [1, 1], pen=(len(mask_states),9))) self.mask_list[-1].setState(mask_state) self.mask_list[-1].show() self.p3.addItem(self.mask_list[-1]) self.convertMasks() def makeProjection(self): self.binBounds = [] self.convertMasks() self.__makeProjection() def save(self): state = self.p.saveState() pickle.dump(state, open( 'params.bin', "wb" )) pickle.dump(self.image_stack.save_state(),open('imagestack.bin', "wb" ) ) print("Parameters and file selection saved\n") def restore(self): self.p.restoreState(pickle.load(open( 'params.bin', "rb" )),removeChildren=False) print("Saved parameters restored\n") def restoreImageStack(self): print("Restoring file selection:") self.image_stack.restore_state(pickle.load(open( 'imagestack.bin', "rb" ))) self.image_stack.load_aux_files() print(self.image_stack.flags) print(self.image_stack.number_of_images, "images selected") print("Complete (Press Load)\n") def initUI(self): self.setWindowTitle(self.title) gridLayout = QGridLayout(self) self.setLayout(gridLayout) params = [ self.experiment, self.crystal, {'name': 'Data Processing', 'type': 'group', 'children': [ {'name': 'Binning', 'type': 'int', 'value': 4, 'step': 1}, {'name': 'hkl Resolution', 'type': 'int', 'value': 1000, 'step': 1}, {'name': 'Divide Bins By Frequency', 'type': 'bool', 'value': True}, {'name': 'Grid Size', 'type': 'int', 'value': 800, 'step': 1}, {'name': 'White Background', 'type': 'bool', 'value': False}, {'name': 'Mean Images Instead of Max', 'type': 'bool',}, {'name': 'Acceleration', 'type': 'list', 'values': {"none": 0, "numba (cpu)": 1, "cuda (gpu)": 2},'value': 1}, {'name': 'Bin From Full Images', 'type': 'bool', 'value': False}, {'name': 'Apply Intensity Corrections', 'type': 'bool', 'value': False}, {'name': 'Correct for Refraction', 'type': 'bool', 'value': False}, {'name': 'Intensity Offset', 'type': 'float', 'value': 0, 'step':10}, {'name': 'Multiply intensity by', 'type': 'float', 'value': 1, 'step':0.1}, {'name': 'Image Rotation', 'type': 'list', 'values': {"0 degrees": 0, "90 degrees": 1, "180 degrees": 2,"270 degrees": 3},'value': 0}, {'name': 'Image Flip U/D', 'type': 'list', 'values': {"True": True,"False": False},'value': False}, {'name': 'Image Flip L/R', 'type': 'list', 'values': {"True":
# vim:fileencoding=utf-8:noet from __future__ import (unicode_literals, division, absolute_import, print_function) import itertools import re from copy import copy from powerline.lib.unicode import unicode from powerline.lint.markedjson.error import echoerr, DelayedEchoErr, NON_PRINTABLE_STR from powerline.lint.selfcheck import havemarks NON_PRINTABLE_RE = re.compile( NON_PRINTABLE_STR.translate({ ord('\t'): None, ord('\n'): None, 0x0085: None, }) ) class Spec(object): '''Class that describes some JSON value In powerline it is only used to describe JSON values stored in powerline configuration. :param dict keys: Dictionary that maps keys that may be present in the given JSON dictionary to their descriptions. If this parameter is not empty it implies that described value has dictionary type. Non-dictionary types must be described using ``Spec()``: without arguments. .. note:: Methods that create the specifications return ``self``, so calls to them may be chained: ``Spec().type(unicode).re('^\w+$')``. This does not apply to functions that *apply* specification like :py:meth`Spec.match`. .. note:: Methods starting with ``check_`` return two values: first determines whether caller should proceed on running other checks, second determines whether there were any problems (i.e. whether error was reported). One should not call these methods directly: there is :py:meth:`Spec.match` method for checking values. .. note:: In ``check_`` and ``match`` methods specifications are identified by their indexes for the purpose of simplyfying :py:meth:`Spec.copy` method. Some common parameters: ``data``: Whatever data supplied by the first caller for checker functions. Is not processed by :py:class:`Spec` methods in any fashion. ``context``: :py:class:`powerline.lint.context.Context` instance, describes context of the value. :py:class:`Spec` methods only use its ``.key`` methods for error messages. ``echoerr``: Callable that should be used to echo errors. Is supposed to take four optional keyword arguments: ``problem``, ``problem_mark``, ``context``, ``context_mark``. ``value``: Checked value. ''' def __init__(self, **keys): self.specs = [] self.keys = {} self.checks = [] self.cmsg = '' self.isoptional = False self.uspecs = [] self.ufailmsg = lambda key: 'found unknown key: {0}'.format(key) self.did_type = False self.update(**keys) def update(self, **keys): '''Describe additional keys that may be present in given JSON value If called with some keyword arguments implies that described value is a dictionary. If called without keyword parameters it is no-op. :return: self. ''' for k, v in keys.items(): self.keys[k] = len(self.specs) self.specs.append(v) if self.keys and not self.did_type: self.type(dict) self.did_type = True return self def copy(self, copied=None): '''Deep copy the spec :param dict copied: Internal dictionary used for storing already copied values. This parameter should not be used. :return: New :py:class:`Spec` object that is a deep copy of ``self``. ''' copied = copied or {} try: return copied[id(self)] except KeyError: instance = self.__class__() copied[id(self)] = instance return self.__class__()._update(self.__dict__, copied) def _update(self, d, copied): '''Helper for the :py:meth:`Spec.copy` function Populates new instance with values taken from the old one. :param dict d: ``__dict__`` of the old instance. :param dict copied: Storage for already copied values. ''' self.__dict__.update(d) self.keys = copy(self.keys) self.checks = copy(self.checks) self.uspecs = copy(self.uspecs) self.specs = [spec.copy(copied) for spec in self.specs] return self def unknown_spec(self, keyfunc, spec): '''Define specification for non-static keys This method should be used if key names cannot be determined at runtime or if a number of keys share identical spec (in order to not repeat it). :py:meth:`Spec.match` method processes dictionary in the given order: * First it tries to use specifications provided at the initialization or by the :py:meth:`Spec.update` method. * If no specification for given key was provided it processes specifications from ``keyfunc`` argument in order they were supplied. Once some key matches specification supplied second ``spec`` argument is used to determine correctness of the value. :param Spec keyfunc: :py:class:`Spec` instance or a regular function that returns two values (the same :py:meth:`Spec.match` returns). This argument is used to match keys that were not provided at initialization or via :py:meth:`Spec.update`. :param Spec spec: :py:class:`Spec` instance that will be used to check keys matched by ``keyfunc``. :return: self. ''' if isinstance(keyfunc, Spec): self.specs.append(keyfunc) keyfunc = len(self.specs) - 1 self.specs.append(spec) self.uspecs.append((keyfunc, len(self.specs) - 1)) return self def unknown_msg(self, msgfunc): '''Define message which will be used when unknown key was found “Unknown” is a key that was not provided at the initialization and via :py:meth:`Spec.update` and did not match any ``keyfunc`` provided via :py:meth:`Spec.unknown_spec`. :param msgfunc: Function that takes that unknown key as an argument and returns the message text. Text will appear at the top (start of the sentence). :return: self. ''' self.ufailmsg = msgfunc return self def context_message(self, msg): '''Define message that describes context :param str msg: Message that describes context. Is written using the :py:meth:`str.format` syntax and is expected to display keyword parameter ``key``. :return: self. ''' self.cmsg = msg for spec in self.specs: if not spec.cmsg: spec.context_message(msg) return self def check_type(self, value, context_mark, data, context, echoerr, types): '''Check that given value matches given type(s) :param tuple types: List of accepted types. Since :py:class:`Spec` is supposed to describe JSON values only ``dict``, ``list``, ``unicode``, ``bool``, ``float`` and ``NoneType`` types make any sense. :return: proceed, hadproblem. ''' havemarks(value) if type(value.value) not in types: echoerr( context=self.cmsg.format(key=context.key), context_mark=context_mark, problem='{0!r} must be a {1} instance, not {2}'.format( value, ', '.join((t.__name__ for t in types)), type(value.value).__name__ ), problem_mark=value.mark ) return False, True return True, False def check_func(self, value, context_mark, data, context, echoerr, func, msg_func): '''Check value using given function :param function func: Callable that should accept four positional parameters: #. checked value, #. ``data`` parameter with arbitrary data (supplied by top-level caller), #. current context and #. function used for echoing errors. This callable should return three values: #. determines whether ``check_func`` caller should proceed calling other checks, #. determines whether ``check_func`` should echo error on its own (it should be set to False if ``func`` echoes error itself) and #. determines whether function has found some errors in the checked value. :param function msg_func: Callable that takes checked value as the only positional parameter and returns a string that describes the problem. Only useful for small checker functions since it is ignored when second returned value is false. :return: proceed, hadproblem. ''' havemarks(value) proceed, echo, hadproblem = func(value, data, context, echoerr) if echo and hadproblem: echoerr(context=self.cmsg.format(key=context.key), context_mark=context_mark, problem=msg_func(value), problem_mark=value.mark) return proceed, hadproblem def check_list(self, value, context_mark, data, context, echoerr, item_func, msg_func): '''Check that each value in the list matches given specification :param function item_func: Callable like ``func`` from :py:meth:`Spec.check_func`. Unlike ``func`` this callable is called for each value in the list and may be a :py:class:`Spec` object index. :param func msg_func: Callable like ``msg_func`` from :py:meth:`Spec.check_func`. Should accept one problematic item and is not used for :py:class:`Spec` object indices in ``item_func`` method. :return: proceed, hadproblem. ''' havemarks(value) i = 0 hadproblem = False for item in value: havemarks(item) if isinstance(item_func, int): spec = self.specs[item_func] proceed, fhadproblem = spec.match( item, value.mark, data, context.enter_item('list item ' + unicode(i), item), echoerr ) else: proceed, echo, fhadproblem = item_func(item, data, context, echoerr) if echo and fhadproblem: echoerr(context=self.cmsg.format(key=context.key + '/list item ' + unicode(i)), context_mark=value.mark, problem=msg_func(item), problem_mark=item.mark) if fhadproblem: hadproblem = True if not proceed: return proceed, hadproblem i += 1 return True, hadproblem def check_either(self, value, context_mark, data, context, echoerr, start, end): '''Check that given value matches one of the given specifications :param int start: First specification index. :param int end: Specification index that is greater by 1 then last specification index. This method does not give an error if any specification from ``self.specs[start:end]`` is matched by the given value. ''' havemarks(value) new_echoerr = DelayedEchoErr( echoerr, 'One of the either variants failed. Messages from the first variant:', 'messages from the next variant:' ) hadproblem = False for spec in self.specs[start:end]: proceed, hadproblem = spec.match(value, value.mark, data, context, new_echoerr) new_echoerr.next_variant() if not proceed: break if not hadproblem: return True, False new_echoerr.echo_all() return False, hadproblem def check_tuple(self, value, context_mark, data, context, echoerr, start, end): '''Check that given value is a list with items matching specifications :param int start: First specification index. :param int end: Specification index that is greater by 1 then last specification index. This method checks that each item in the value list matches specification with index ``start + item_number``. ''' havemarks(value) hadproblem = False for (i, item, spec) in zip(itertools.count(), value, self.specs[start:end]): proceed, ihadproblem = spec.match( item, value.mark, data, context.enter_item('tuple item ' + unicode(i), item), echoerr ) if ihadproblem: hadproblem = True if not proceed: return False, hadproblem return True, hadproblem def check_printable(self, value, context_mark, data, context, echoerr, _): '''Check that given unicode string contains only printable characters ''' hadproblem = False for match in NON_PRINTABLE_RE.finditer(value): hadproblem = True echoerr( context=self.cmsg.format(key=context.key), context_mark=value.mark, problem='found not printable character U+{0:04x} in a configuration string'.format( ord(match.group(0))), problem_mark=value.mark.advance_string(match.start() + 1) ) return True, hadproblem def printable(self, *args): self.type(unicode) self.checks.append(('check_printable', args)) return self def type(self, *args): '''Describe value that has one of the types given in arguments :param args: List of accepted types. Since :py:class:`Spec` is supposed to describe JSON values only ``dict``, ``list``, ``unicode``, ``bool``, ``float`` and ``NoneType`` types make any sense. :return: self. ''' self.checks.append(('check_type', args)) return self cmp_funcs = { 'le': lambda x, y: x <= y, 'lt': lambda x, y: x < y, 'ge': lambda x, y: x >= y, 'gt': lambda x, y: x > y, 'eq': lambda x, y: x == y, } cmp_msgs = { 'le': 'lesser or equal to', 'lt': 'lesser then', 'ge': 'greater or equal to', 'gt': 'greater then', 'eq': 'equal to', } def len(self, comparison, cint, msg_func=None): '''Describe value that has given length :param str comparison: Type of the comparison. Valid values: ``le``, ``lt``, ``ge``, ``gt``, ``eq``. :param int cint: Integer with which length is compared. :param function msg_func: Function that should accept checked value and return message that describes the problem with this value. Default
<filename>speakeasy/winenv/api/kernelmode/wdfldr.py # Copyright (C) 2020 FireEye, Inc. All Rights Reserved. import uuid import speakeasy.winenv.arch as e_arch import speakeasy.winenv.defs.nt.ddk as ddk import speakeasy.winenv.defs.wdf as wdf import speakeasy.winenv.defs.usb as usbdefs import speakeasy.winenv.defs.nt.ntoskrnl as ntos from .. import api class WdfDriver(object): def __init__(self): self.reg_path = None self.typed_context_worker = None self.queues = {} self.driver_object_addr = None self.driver_object = None class WdfDevice(object): def __init__(self): self.device_object_addr = None self.device_object = None class WdfUsbDevice(object): def __init__(self): self.num_interfaces = 0 self.config_desc = None class WdfUsbInterface(object): def __init__(self): self.config_desc = 0 self.iface_index = 0 self.setting_index = 0 class WdfUsbPipe(object): def __init__(self): self.interface = None self.index = 0 class Wdfldr(api.ApiHandler): """ Implements the Windows Driver Framework (WDK) """ name = "wdfldr" apihook = api.ApiHandler.apihook impdata = api.ApiHandler.impdata def __init__(self, emu): super(Wdfldr, self).__init__(emu) self.funcs = {} self.curr_handle = 4 self.pnp_device = None self.data = {} self.emu = emu self.wdf_drivers = {} self.wdf_devices = {} self.usb_devices = {} self.usb_pipes = {} self.usb_interfaces = {} self.handles = {} self.types = wdf self.func_table = self.types.WDFFUNCTIONS(emu.get_ptr_size()) self.func_table_ptr = None self.component_globals = None super(Wdfldr, self).__get_hook_attrs__(self) def get_handle(self): self.curr_handle += 4 return self.curr_handle def set_func_table(self, emu): addr = emu.add_callback(Wdfldr.name, self.WdfDriverCreate.__apihook__[0]) self.func_table.pfnWdfDriverCreate = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceInitSetPnpPowerEventCallbacks.__apihook__[0] ) self.func_table.pfnWdfDeviceInitSetPnpPowerEventCallbacks = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceInitSetRequestAttributes.__apihook__[0] ) self.func_table.pfnWdfDeviceInitSetRequestAttributes = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceInitSetFileObjectConfig.__apihook__[0] ) self.func_table.pfnWdfDeviceInitSetFileObjectConfig = addr addr = emu.add_callback(Wdfldr.name, self.WdfDeviceInitSetIoType.__apihook__[0]) self.func_table.pfnWdfDeviceInitSetIoType = addr addr = emu.add_callback(Wdfldr.name, self.WdfDeviceCreate.__apihook__[0]) self.func_table.pfnWdfDeviceCreate = addr addr = emu.add_callback( Wdfldr.name, self.WdfObjectGetTypedContextWorker.__apihook__[0] ) self.func_table.pfnWdfObjectGetTypedContextWorker = addr addr = emu.add_callback( Wdfldr.name, self.WdfDriverOpenParametersRegistryKey.__apihook__[0] ) self.func_table.pfnWdfDriverOpenParametersRegistryKey = addr addr = emu.add_callback(Wdfldr.name, self.WdfRegistryQueryULong.__apihook__[0]) self.func_table.pfnWdfRegistryQueryULong = addr addr = emu.add_callback(Wdfldr.name, self.WdfRegistryClose.__apihook__[0]) self.func_table.pfnWdfRegistryClose = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceSetPnpCapabilities.__apihook__[0] ) self.func_table.pfnWdfDeviceSetPnpCapabilities = addr addr = emu.add_callback(Wdfldr.name, self.WdfIoQueueCreate.__apihook__[0]) self.func_table.pfnWdfIoQueueCreate = addr addr = emu.add_callback(Wdfldr.name, self.WdfIoQueueReadyNotify.__apihook__[0]) self.func_table.pfnWdfIoQueueReadyNotify = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceCreateDeviceInterface.__apihook__[0] ) self.func_table.pfnWdfDeviceCreateDeviceInterface = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceWdmGetAttachedDevice.__apihook__[0] ) self.func_table.pfnWdfDeviceWdmGetAttachedDevice = addr addr = emu.add_callback( Wdfldr.name, self.WdfDeviceWdmGetDeviceObject.__apihook__[0] ) self.func_table.pfnWdfDeviceWdmGetDeviceObject = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetDeviceCreateWithParameters.__apihook__[0] ) self.func_table.pfnWdfUsbTargetDeviceCreateWithParameters = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetDeviceGetDeviceDescriptor.__apihook__[0] ) self.func_table.pfnWdfUsbTargetDeviceGetDeviceDescriptor = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetDeviceRetrieveConfigDescriptor.__apihook__[0] ) self.func_table.pfnWdfUsbTargetDeviceRetrieveConfigDescriptor = addr addr = emu.add_callback(Wdfldr.name, self.WdfMemoryCreate.__apihook__[0]) self.func_table.pfnWdfMemoryCreate = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetDeviceSelectConfig.__apihook__[0] ) self.func_table.pfnWdfUsbTargetDeviceSelectConfig = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetDeviceGetNumInterfaces.__apihook__[0] ) self.func_table.pfnWdfUsbTargetDeviceGetNumInterfaces = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetDeviceRetrieveInformation.__apihook__[0] ) self.func_table.pfnWdfUsbTargetDeviceRetrieveInformation = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbInterfaceGetNumSettings.__apihook__[0] ) self.func_table.pfnWdfUsbInterfaceGetNumSettings = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbInterfaceSelectSetting.__apihook__[0] ) self.func_table.pfnWdfUsbInterfaceSelectSetting = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbInterfaceGetNumConfiguredPipes.__apihook__[0] ) self.func_table.pfnWdfUsbInterfaceGetNumConfiguredPipes = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbInterfaceGetConfiguredPipe.__apihook__[0] ) self.func_table.pfnWdfUsbInterfaceGetConfiguredPipe = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbTargetPipeGetInformation.__apihook__[0] ) self.func_table.pfnWdfUsbTargetPipeGetInformation = addr addr = emu.add_callback( Wdfldr.name, self.WdfUsbInterfaceGetInterfaceNumber.__apihook__[0] ) self.func_table.pfnWdfUsbInterfaceGetInterfaceNumber = addr self.mem_write(self.func_table_ptr, self.func_table.get_bytes()) def parse_usb_config(self, data): interfaces = [] # Get the USB config descriptor cd = usbdefs.USB_CONFIGURATION_DESCRIPTOR().cast(data) ifaces = cd.bNumInterfaces data = data[cd.bLength :] for i in range(ifaces): endpoints = [] _id = usbdefs.USB_INTERFACE_DESCRIPTOR().cast(data) data = data[_id.bLength :] for j in range(_id.bNumEndpoints): ep = usbdefs.USB_ENDPOINT_DESCRIPTOR().cast(data) data = data[ep.bLength :] endpoints.append(ep) interfaces.append([_id, endpoints]) return interfaces @apihook("WdfVersionBind", argc=4) def WdfVersionBind(self, emu, argv, ctx={}): """ NTSTATUS WdfVersionBind( __in PDRIVER_OBJECT DriverObject, __in PUNICODE_STRING RegistryPath, __inout PWDF_BIND_INFO BindInfo, __out PWDF_COMPONENT_GLOBALS* ComponentGlobals ); """ rv = ddk.STATUS_SUCCESS drv, reg_path, BindInfo, comp_globals = argv wbi = self.types.WDF_BIND_INFO(emu.get_ptr_size()) wbi = self.mem_cast(wbi, BindInfo) if not self.func_table_ptr: size = self.func_table.sizeof() self.func_table_ptr = self.mem_alloc(size, tag="api.struct.WDFFUNCTIONS") self.mem_write( wbi.FuncTable, (self.func_table_ptr).to_bytes(emu.get_ptr_size(), "little"), ) if not self.component_globals: components = self.types.WDF_COMPONENT_GLOBALS(emu.get_ptr_size()) self.component_globals = self.mem_alloc( components.sizeof(), tag="api.struct.WDF_COMPONENT_GLOBALS" ) self.mem_write( comp_globals, (self.component_globals).to_bytes(emu.get_ptr_size(), "little"), ) self.set_func_table(emu) # For now, just leave the handle open so we can reference it later return rv @apihook("WdfDriverCreate", argc=6) def WdfDriverCreate(self, emu, argv, ctx={}): """ NTSTATUS WdfDriverCreate( PWDF_DRIVER_GLOBALS DriverGlobals, PDRIVER_OBJECT DriverObject, PCUNICODE_STRING RegistryPath, PWDF_OBJECT_ATTRIBUTES DriverAttributes, PWDF_DRIVER_CONFIG DriverConfig, WDFDRIVER *Driver ); """ ( DriverGlobals, DriverObject, RegistryPath, DriverAttributes, DriverConfig, Driver, ) = argv driver = WdfDriver() driver.reg_path = self.read_unicode_string(RegistryPath) driver.driver_object_addr = DriverObject driver.driver_object = self.mem_cast( ntos.DRIVER_OBJECT(emu.get_ptr_size()), DriverObject ) self.wdf_drivers.update({DriverGlobals: driver}) if DriverConfig: config = self.mem_cast( self.types.WDF_DRIVER_CONFIG(emu.get_ptr_size()), DriverConfig # noqa ) rv = 0 return rv @apihook("WdfDeviceInitSetPnpPowerEventCallbacks", argc=3) def WdfDeviceInitSetPnpPowerEventCallbacks(self, emu, argv, ctx={}): """ void WdfDeviceInitSetPnpPowerEventCallbacks( PWDFDEVICE_INIT DeviceInit, PWDF_PNPPOWER_EVENT_CALLBACKS PnpPowerEventCallbacks ); """ DriverGlobals, DeviceInit, PnpPowerEventCallbacks = argv return @apihook("WdfDeviceInitSetRequestAttributes", argc=3) def WdfDeviceInitSetRequestAttributes(self, emu, argv, ctx={}): """ void WdfDeviceInitSetRequestAttributes( PWDFDEVICE_INIT DeviceInit, PWDF_OBJECT_ATTRIBUTES RequestAttributes ); """ DriverGlobals, DeviceInit, RequestAttributes = argv return @apihook("WdfDeviceInitSetFileObjectConfig", argc=4) def WdfDeviceInitSetFileObjectConfig(self, emu, argv, ctx={}): """ void WdfDeviceInitSetFileObjectConfig( PWDFDEVICE_INIT DeviceInit, PWDF_FILEOBJECT_CONFIG FileObjectConfig, PWDF_OBJECT_ATTRIBUTES FileObjectAttributes ); """ DriverGlobals, DeviceInit, FileObjectConfig, FileObjectAttributes = argv return @apihook("WdfDeviceInitSetIoType", argc=3) def WdfDeviceInitSetIoType(self, emu, argv, ctx={}): """ void WdfDeviceInitSetIoType( PWDFDEVICE_INIT DeviceInit, WDF_DEVICE_IO_TYPE IoType ); """ DriverGlobals, DeviceInit, IoType = argv return @apihook("WdfDeviceCreate", argc=4) def WdfDeviceCreate(self, emu, argv, ctx={}): """ NTSTATUS WdfDeviceCreate( PWDFDEVICE_INIT *DeviceInit, PWDF_OBJECT_ATTRIBUTES DeviceAttributes, WDFDEVICE *Device ); """ DriverGlobals, DeviceInit, DeviceAttributes, Device = argv rv = ddk.STATUS_SUCCESS if Device: handle = self.get_handle() dev = WdfDevice() self.wdf_devices.update({handle: dev}) self.mem_write(Device, (handle).to_bytes(emu.get_ptr_size(), "little")) do = ntos.DEVICE_OBJECT(emu.get_ptr_size()) dev.device_object_addr = self.mem_alloc( do.sizeof(), tag="api.struct.DEVICE_OBJECT" ) dev.device_object = do driver = self.wdf_drivers.get(DriverGlobals) if driver: dev.device_object.DriverObject = driver.driver_object_addr self.mem_write(dev.device_object_addr, dev.device_object.get_bytes()) return rv @apihook("WdfObjectGetTypedContextWorker", argc=3, conv=e_arch.CALL_CONV_FASTCALL) def WdfObjectGetTypedContextWorker(self, emu, argv, ctx={}): """ PVOID WdfObjectGetTypedContextWorker( WDFOBJECT Handle, PCWDF_OBJECT_CONTEXT_TYPE_INFO TypeInfo ); """ DriverGlobals, Handle, TypeInfo = argv driver = self.wdf_drivers.get(DriverGlobals) if not driver.typed_context_worker: size = self.types.WDF_COMPONENT_GLOBALS(emu.get_ptr_size()).sizeof() driver.typed_context_worker = self.mem_alloc( size, tag="api.struct.WDF_TYPED_CONTEXT_WORKER" ) rv = driver.typed_context_worker return rv @apihook("WdfDriverOpenParametersRegistryKey", argc=5) def WdfDriverOpenParametersRegistryKey(self, emu, argv, ctx={}): """ NTSTATUS WdfDriverOpenParametersRegistryKey( WDFDRIVER Driver, ACCESS_MASK DesiredAccess, PWDF_OBJECT_ATTRIBUTES KeyAttributes, WDFKEY *Key ); """ DriverGlobals, Driver, DesiredAccess, KeyAttributes, pKey = argv rv = ddk.STATUS_OBJECT_NAME_NOT_FOUND driver = self.wdf_drivers.get(DriverGlobals) hnd = emu.reg_open_key(driver.reg_path + "\\Parameters") if hnd: rv = ddk.STATUS_SUCCESS if pKey: self.mem_write(pKey, (hnd).to_bytes(emu.get_ptr_size(), "little")) return rv @apihook("WdfRegistryQueryULong", argc=4) def WdfRegistryQueryULong(self, emu, argv, ctx={}): """ NTSTATUS WdfRegistryQueryULong( WDFKEY Key, PCUNICODE_STRING ValueName, PULONG Value ); """ DriverGlobals, Key, ValueName, Value = argv rv = ddk.STATUS_OBJECT_NAME_NOT_FOUND wkey = emu.reg_get_key(Key) if wkey: val_name = self.read_unicode_string(ValueName) argv[2] = val_name value = wkey.get_value(val_name) if value: ulong = value.get_data() self.mem_write(Value, (ulong).to_bytes(4, "little")) rv = ddk.STATUS_SUCCESS return rv @apihook("WdfRegistryClose", argc=2) def WdfRegistryClose(self, emu, argv, ctx={}): """ void WdfRegistryClose( WDFKEY Key ); """ DriverGlobals, Key = argv return @apihook("WdfDeviceSetPnpCapabilities", argc=3) def WdfDeviceSetPnpCapabilities(self, emu, argv, ctx={}): """ void WdfDeviceSetPnpCapabilities( WDFDEVICE Device, PWDF_DEVICE_PNP_CAPABILITIES PnpCapabilities ); """ DriverGlobals, Device, PnpCapabilities = argv return @apihook("WdfIoQueueReadyNotify", argc=4) def WdfIoQueueReadyNotify(self, emu, argv, ctx={}): """ NTSTATUS WdfIoQueueReadyNotify( WDFQUEUE Queue, PFN_WDF_IO_QUEUE_STATE QueueReady, WDFCONTEXT Context ); """ DriverGlobals, Queue, QueueReady, Context = argv rv = ddk.STATUS_SUCCESS return rv @apihook("WdfDeviceCreateDeviceInterface", argc=4) def WdfDeviceCreateDeviceInterface(self, emu, argv, ctx={}): """ NTSTATUS WdfDeviceCreateDeviceInterface( WDFDEVICE Device, const GUID *InterfaceClassGUID, PCUNICODE_STRING ReferenceString ); """ DriverGlobals, Device, InterfaceClassGUID, ReferenceString = argv rv = ddk.STATUS_SUCCESS if InterfaceClassGUID: guid = self.mem_read(InterfaceClassGUID, 16) guid = uuid.UUID(bytes_le=guid) argv[2] = str(guid) if ReferenceString: ref = self.read_unicode_string(ReferenceString) argv[3] = ref return rv @apihook("WdfIoQueueCreate", argc=5) def WdfIoQueueCreate(self, emu, argv, ctx={}): """ NTSTATUS WdfIoQueueCreate( WDFDEVICE Device, PWDF_IO_QUEUE_CONFIG Config, PWDF_OBJECT_ATTRIBUTES QueueAttributes, WDFQUEUE *Queue ); """ DriverGlobals, Device, Config, QueueAttributes, Queue = argv rv = ddk.STATUS_SUCCESS queue_config = self.types.WDF_IO_QUEUE_CONFIG(emu.get_ptr_size()) queue_config = self.mem_cast(queue_config, Config) hnd = self.get_handle() driver = self.wdf_drivers.get(DriverGlobals) driver.queues.update({hnd: queue_config}) if Queue: self.mem_write(Queue, (hnd).to_bytes(emu.get_ptr_size(), "little")) return rv @apihook("WdfDeviceWdmGetAttachedDevice", argc=2) def WdfDeviceWdmGetAttachedDevice(self, emu, argv, ctx={}): """ PDEVICE_OBJECT WdfDeviceWdmGetAttachedDevice( WDFDEVICE Device ); """ DriverGlobals, Device = argv if not self.pnp_device: do = ntos.DEVICE_OBJECT(emu.get_ptr_size()) self.pnp_device = self.mem_alloc( do.sizeof(), tag="api.struct.DEVICE_OBJECT" ) rv = self.pnp_device return rv @apihook("WdfUsbTargetDeviceCreateWithParameters", argc=5) def WdfUsbTargetDeviceCreateWithParameters(self, emu, argv, ctx={}): """ NTSTATUS WdfUsbTargetDeviceCreateWithParameters( WDFDEVICE Device, PWDF_USB_DEVICE_CREATE_CONFIG Config, PWDF_OBJECT_ATTRIBUTES Attributes, WDFUSBDEVICE *UsbDevice ); """ DriverGlobals, Device, Config, Attributes, UsbDevice = argv rv = ddk.STATUS_SUCCESS handle = self.get_handle() usb = WdfUsbDevice() self.usb_devices.update({handle: usb}) self.mem_write(UsbDevice, (handle).to_bytes(emu.get_ptr_size(), "little")) return rv @apihook("WdfDeviceWdmGetDeviceObject", argc=2) def WdfDeviceWdmGetDeviceObject(self, emu, argv, ctx={}): """ PDEVICE_OBJECT WdfDeviceWdmGetDeviceObject( WDFDEVICE Device ); """ DriverGlobals, Device = argv rv = 0 dev = self.wdf_devices.get(Device) if dev: rv = dev.device_object_addr return rv @apihook("WdfUsbTargetDeviceGetDeviceDescriptor", argc=3) def WdfUsbTargetDeviceGetDeviceDescriptor(self, emu, argv, ctx={}): """ void WdfUsbTargetDeviceGetDeviceDescriptor( WDFUSBDEVICE UsbDevice, PUSB_DEVICE_DESCRIPTOR UsbDeviceDescriptor ); """ DriverGlobals, UsbDevice, UsbDeviceDescriptor = argv dev = self.usb_devices.get(UsbDevice) if dev: dd = usbdefs.USB_DEVICE_DESCRIPTOR(emu.get_ptr_size()) self.mem_write(UsbDeviceDescriptor, dd.get_bytes()) return @apihook("WdfMemoryCreate", argc=7) def WdfMemoryCreate(self, emu, argv, ctx={}): """ NTSTATUS WdfMemoryCreate( PWDF_OBJECT_ATTRIBUTES Attributes, POOL_TYPE PoolType, ULONG PoolTag, size_t BufferSize, WDFMEMORY *Memory, PVOID *Buffer ); """ DriverGlobals, Attributes, PoolType, PoolTag, BufferSize, Mem, Buf = argv rv = ddk.STATUS_SUCCESS ptr = self.mem_alloc(BufferSize, tag="api.struct.WDFMEMORY") if Mem: self.mem_write(Mem, (ptr).to_bytes(emu.get_ptr_size(), "little")) if Buf:
#!/usr/bin/env python import argparse import concurrent.futures import logging import os import re import threading import time import cv2 import numpy as np import tensorboardX import torch from scipy import ndimage from robot import SimRobot from trainer import Trainer from utils import utils, viz from utils.logger import Logger class LearnManipulation: def __init__(self, args): # --------------- Setup options --------------- self.is_sim = args.is_sim sim_port = args.sim_port obj_mesh_dir = os.path.abspath(args.obj_mesh_dir) if self.is_sim else None num_obj = args.num_obj if self.is_sim else None # Cols: min max, Rows: x y z (define workspace limits in robot coordinates) if self.is_sim: self.workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.8]]) else: self.workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]]) self.heightmap_resolution = args.heightmap_resolution random_seed = args.random_seed force_cpu = args.force_cpu # ------------- Algorithm options ------------- network = args.network num_rotations = args.num_rotations self.future_reward_discount = args.future_reward_discount self.explore_actions = args.explore_actions self.explore_type = args.explore_type self.explore_rate_decay = args.explore_rate_decay self.LAE_sigma = 0.33 self.LAE_beta = 0.25 self.experience_replay_disabled = args.experience_replay_disabled self.push_enabled = args.push_enabled self.place_enabled = args.place_enabled self.max_iter = args.max_iter self.reward_type = args.reward_type self.filter_type = args.filter_type self.place_reward_scale = args.place_reward_scale self.goal_stack_height = args.goal_stack_height # -------------- Testing options -------------- self.is_testing = args.is_testing self.max_test_trials = args.max_test_trials test_preset_cases = args.test_preset_cases test_preset_file = os.path.abspath(args.test_preset_file) if test_preset_cases else None # ------ Pre-loading and logging options ------ if args.logging_directory and not args.snapshot_file: logging_directory = os.path.abspath(args.logging_directory) self.snapshot_file = os.path.join(logging_directory, 'models/snapshot-backup.pth') elif args.snapshot_file: logging_directory = os.path.abspath(args.logging_directory) self.snapshot_file = os.path.abspath(args.snapshot_file) else: logging_directory = None self.snapshot_file = None self.save_visualizations = args.save_visualizations # Initialize pick-and-place system (camera and robot) if self.is_sim: self.robot = SimRobot(sim_port, obj_mesh_dir, num_obj, self.workspace_limits, self.is_testing, test_preset_cases, test_preset_file, self.place_enabled) else: raise NotImplementedError # Initialize data logger self.logger = Logger(logging_directory, args) self.logger.save_camera_info(self.robot.cam_intrinsics, self.robot.cam_pose, self.robot.cam_depth_scale) self.logger.save_heightmap_info(self.workspace_limits, self.heightmap_resolution) # Tensorboard self.tb = tensorboardX.SummaryWriter(logging_directory) # Initialize trainer self.trainer = Trainer(network, force_cpu, self.push_enabled, self.place_enabled, num_rotations) # Find last executed iteration of pre-loaded log, and load execution info and RL variables if self.logger.logging_directory_exists and not self.is_testing: self.trainer.preload(self.logger.transitions_directory) self.trainer.load_snapshot(self.snapshot_file) elif args.snapshot_file: self.trainer.load_snapshot(self.snapshot_file) # Set random seed np.random.seed(random_seed) # Initialize variables for heuristic bootstrapping and exploration probability self.no_change_count = [2, 2] if not self.is_testing else [0, 0] self.explore_prob = 0.5 if not self.is_testing else 0.0 self.mission_complete = False self.execute_action = False self.shutdown_called = False self.prev_primitive_action = None self.prev_grasp_success = None self.prev_push_success = None self.prev_place_success = None self.prev_color_heightmap = None self.prev_depth_heightmap = None self.prev_best_pix_ind = None self.prev_stack_height = 0 self.last_task_complete = 0 self.push_predictions = None self.grasp_predictions = None self.place_predictions = None self.color_heightmap = None self.depth_heightmap = None self.primitive_action = None self.best_pix_ind = None self.predicted_value = None def policy(self): """ Determine whether grasping or pushing or placing should be executed based on network predictions """ best_push_conf = np.max(self.push_predictions) best_grasp_conf = np.max(self.grasp_predictions) best_place_conf = np.max(self.place_predictions) logging.info('Primitive confidence scores: %f (push), %f (grasp), %f (place)' % ( best_push_conf, best_grasp_conf, best_place_conf)) # Exploitation (do best action) vs exploration (do other action) if self.explore_actions and not self.is_testing: explore_actions = np.random.uniform() < self.explore_prob logging.info('Strategy: explore (exploration probability: %f)' % self.explore_prob) else: explore_actions = False self.trainer.is_exploit_log.append([0 if explore_actions else 1]) self.logger.write_to_log('is-exploit', self.trainer.is_exploit_log) # Select action type self.primitive_action = 'grasp' if self.place_enabled and self.prev_primitive_action == 'grasp' and self.prev_grasp_success: self.primitive_action = 'place' elif self.push_enabled: if best_push_conf > best_grasp_conf: self.primitive_action = 'push' if explore_actions: self.primitive_action = 'push' if np.random.randint(0, 2) == 0 else 'grasp' # Get pixel location and rotation with highest affordance prediction (rotation, y, x) if self.primitive_action == 'push': self.compute_action(explore_actions, self.push_predictions) elif self.primitive_action == 'grasp': self.compute_action(explore_actions, self.grasp_predictions) elif self.primitive_action == 'place': self.compute_action(explore_actions, self.place_predictions) else: raise NotImplementedError('Primitive action type {} is not implemented'.format(self.primitive_action)) # Save predicted confidence value self.trainer.predicted_value_log.append([self.predicted_value]) self.logger.write_to_log('predicted-value', self.trainer.predicted_value_log) def compute_action(self, explore_actions, predictions): if explore_actions: maximas = utils.k_largest_index_argpartition(predictions, k=10) self.best_pix_ind = maximas[np.random.choice(maximas.shape[0])] else: self.best_pix_ind = np.unravel_index(np.argmax(predictions), predictions.shape) self.predicted_value = predictions[self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]] def agent(self): """ Parallel thread to process network output and execute actions """ while not self.shutdown_called and self.trainer.iteration <= self.max_iter: if self.execute_action: # Select action based on policy self.policy() # Compute 3D position of pixel logging.info( 'Action: %s at (%d, %d, %d)' % ( self.primitive_action, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2])) best_rotation_angle = np.deg2rad(self.best_pix_ind[0] * (360.0 / self.trainer.model.num_rotations)) best_pix_x = self.best_pix_ind[2] best_pix_y = self.best_pix_ind[1] primitive_position = [best_pix_x * self.heightmap_resolution + self.workspace_limits[0][0], best_pix_y * self.heightmap_resolution + self.workspace_limits[1][0], self.depth_heightmap[best_pix_y][best_pix_x] + self.workspace_limits[2][0]] # If pushing, adjust start position, and make sure z value is safe and not too low if self.primitive_action == 'push' or self.primitive_action == 'place': finger_width = 0.02 safe_kernel_width = int(np.round((finger_width / 2) / self.heightmap_resolution)) local_region = self.depth_heightmap[ max(best_pix_y - safe_kernel_width, 0):min(best_pix_y + safe_kernel_width + 1, self.depth_heightmap.shape[0]), max(best_pix_x - safe_kernel_width, 0):min(best_pix_x + safe_kernel_width + 1, self.depth_heightmap.shape[1])] if local_region.size == 0: safe_z_position = self.workspace_limits[2][0] else: safe_z_position = np.max(local_region) + self.workspace_limits[2][0] primitive_position[2] = safe_z_position # Save executed primitive if self.primitive_action == 'push': self.trainer.executed_action_log.append( [0, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]) # 0 - push elif self.primitive_action == 'grasp': self.trainer.executed_action_log.append( [1, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]) # 1 - grasp elif self.primitive_action == 'place': self.trainer.executed_action_log.append( [2, self.best_pix_ind[0], self.best_pix_ind[1], self.best_pix_ind[2]]) # 2 - place self.logger.write_to_log('executed-action', self.trainer.executed_action_log) # Visualize executed primitive, and affordances grasp_pred_vis = viz.get_prediction_vis(self.grasp_predictions, self.color_heightmap, self.best_pix_ind, 'grasp') imgs = torch.from_numpy(grasp_pred_vis).permute(2, 0, 1) self.tb.add_image('grasp_pred', imgs, self.trainer.iteration) # grasp_pred_vis = viz.get_prediction_full_vis(self.grasp_predictions, self.color_heightmap, self.best_pix_ind) # imgs = torch.from_numpy(grasp_pred_vis).permute(2, 0, 1) # self.tb.add_image('grasp_pred_full', imgs, self.trainer.iteration) if self.push_enabled: push_pred_vis = viz.get_prediction_vis(self.push_predictions, self.color_heightmap, self.best_pix_ind, 'push') imgs = torch.from_numpy(push_pred_vis).permute(2, 0, 1) self.tb.add_image('push_pred', imgs, self.trainer.iteration) if self.place_enabled: place_pred_vis = viz.get_prediction_vis(self.place_predictions, self.color_heightmap, self.best_pix_ind, 'place') imgs = torch.from_numpy(place_pred_vis).permute(2, 0, 1) self.tb.add_image('place_pred', imgs, self.trainer.iteration) if self.save_visualizations: if self.primitive_action == 'push': self.logger.save_visualizations(self.trainer.iteration, push_pred_vis, 'push') elif self.primitive_action == 'grasp': self.logger.save_visualizations(self.trainer.iteration, grasp_pred_vis, 'grasp') elif self.primitive_action == 'place': self.logger.save_visualizations(self.trainer.iteration, place_pred_vis, 'place') # Initialize variables that influence reward push_success = False grasp_success = False place_success = False # Execute primitive pool = concurrent.futures.ThreadPoolExecutor() try: if self.primitive_action == 'push': future = pool.submit(self.robot.push, primitive_position, best_rotation_angle) push_success = future.result(timeout=60) logging.info('Push successful: %r' % push_success) elif self.primitive_action == 'grasp': future = pool.submit(self.robot.grasp, primitive_position, best_rotation_angle) grasp_success = future.result(timeout=60) logging.info('Grasp successful: %r' % grasp_success) elif self.primitive_action == 'place': future = pool.submit(self.robot.place, primitive_position, best_rotation_angle) place_success = future.result(timeout=60) logging.info('Place successful: %r' % place_success) except concurrent.futures.TimeoutError: logging.error('Robot execution timeout!') self.mission_complete = False else: self.mission_complete = True # Save information for next training step self.prev_color_heightmap = self.color_heightmap.copy() self.prev_depth_heightmap = self.depth_heightmap.copy() self.prev_grasp_success = grasp_success self.prev_push_success = push_success self.prev_place_success = place_success self.prev_primitive_action = self.primitive_action self.prev_best_pix_ind = self.best_pix_ind self.execute_action = False else: time.sleep(0.1) def compute_reward(self, change_detected, stack_height): # Compute current reward current_reward = 0 if self.prev_primitive_action == 'push' and self.prev_push_success: if change_detected: if self.reward_type == 3: current_reward = 0.75 else: current_reward = 0.5 else: self.prev_push_success = False elif self.prev_primitive_action == 'grasp' and self.prev_grasp_success: if self.reward_type < 4: if (self.place_enabled and stack_height >= self.prev_stack_height) or (not self.place_enabled): current_reward = 1.0 else: self.prev_grasp_success = False elif self.reward_type == 4: if self.place_enabled: if stack_height >= self.prev_stack_height: current_reward = 1.0 else: self.prev_grasp_success = False current_reward = -0.5 else: current_reward = 1.0 elif self.prev_primitive_action == 'place' and self.prev_place_success: if stack_height > self.prev_stack_height: current_reward = self.place_reward_scale * stack_height else: self.prev_place_success = False # Compute future reward if self.place_enabled and not change_detected and not self.prev_grasp_success and not self.prev_place_success: future_reward = 0 elif not self.place_enabled and not change_detected and not self.prev_grasp_success: future_reward = 0 elif self.reward_type > 1 and current_reward == 0: future_reward = 0 else: future_reward = self.predicted_value expected_reward = current_reward + self.future_reward_discount * future_reward return expected_reward, current_reward, future_reward def reward_function(self): # Detect changes depth_diff = abs(self.depth_heightmap - self.prev_depth_heightmap) depth_diff[np.isnan(depth_diff)] = 0 depth_diff[depth_diff > 0.3] = 0 depth_diff[depth_diff < 0.01] = 0 depth_diff[depth_diff > 0] = 1 change_threshold = 300 change_value = np.sum(depth_diff) change_detected = change_value > change_threshold or self.prev_grasp_success logging.info('Change detected: %r (value: %d)' % (change_detected, change_value)) if change_detected: if self.prev_primitive_action == 'push': self.no_change_count[0] = 0 elif self.prev_primitive_action == 'grasp' or self.prev_primitive_action == 'place': self.no_change_count[1] = 0 else: if self.prev_primitive_action == 'push': self.no_change_count[0] += 1 elif self.prev_primitive_action == 'grasp': self.no_change_count[1] += 1 # Check stack height img_median = ndimage.median_filter(self.depth_heightmap, size=5) max_z = np.max(img_median) if max_z <= 0.069: stack_height = 1 elif (max_z > 0.069) and (max_z <= 0.11): stack_height = 2 elif (max_z > 0.11) and (max_z <= 0.156): stack_height = 3 elif (max_z > 0.156) and (max_z <= 0.21): stack_height = 4 else: stack_height = 0 if self.place_enabled: logging.info('Current stack height is {}'.format(stack_height)) self.tb.add_scalar('stack_height', stack_height, self.trainer.iteration) #
for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]] would setup constant (uniform) multiplier parameters for recharge for stress period 1,5,11,and 16. temporal_list_props ([[`str`,[`int`]]]): list-type input stress-period level multiplier parameters. A nested list of list-type input elements to parameterize using name, iterable pairs. The iterable is zero-based stress-period indices. For example, to setup multipliers for WEL flux and for RIV conductance, temporal_list_props = [["wel.flux",[0,1,2]],["riv.cond",None]] would setup multiplier parameters for well flux for stress periods 1,2 and 3 and would setup one single river conductance multiplier parameter that is applied to all stress periods spatial_list_props ([[`str`,[`int`]]]): list-type input for spatial multiplier parameters. A nested list of list-type elements to parameterize using names (e.g. [["riv.cond",0],["wel.flux",1] to setup up cell-based parameters for each list-type element listed. These multiplier parameters are applied across all stress periods. For this to work, there must be the same number of entries for all stress periods. If more than one list element of the same type is in a single cell, only one parameter is used to multiply all lists in the same cell. grid_props ([[`str`,[`int`]]]): grid-based (every active model cell) multiplier parameters. A nested list of grid-scale model properties to parameterize using name, iterable pairs. For 3D properties, the iterable is zero-based layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier parameter for layer property file horizontal hydraulic conductivity for model layers 1,2, and 3 in every active model cell). For time-varying properties (e.g. recharge), the iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]] would setup grid-based multiplier parameters in every active model cell for recharge for stress period 1,5,11,and 16. sfr_pars (`bool`): setup parameters for the stream flow routing modflow package. If list is passed it defines the parameters to set up. sfr_temporal_pars (`bool`) flag to include stress-period level spatially-global multipler parameters in addition to the spatially-discrete `sfr_pars`. Requires `sfr_pars` to be passed. Default is False grid_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to build the prior parameter covariance matrix elements for grid-based parameters. If None, a generic GeoStruct is created using an "a" parameter that is 10 times the max cell size. Default is None pp_space (`int`): number of grid cells between pilot points. If None, use the default in pyemu.pp_utils.setup_pilot_points_grid. Default is None zone_props ([[`str`,[`int`]]]): zone-based multiplier parameters. A nested list of zone-based model properties to parameterize using name, iterable pairs. For 3D properties, the iterable is zero-based layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier parameter for layer property file horizontal hydraulic conductivity for model layers 1,2, and 3 for unique zone values in the ibound array. For time-varying properties (e.g. recharge), the iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]] would setup zone-based multiplier parameters for recharge for stress period 1,5,11,and 16. pp_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to use for building the prior parameter covariance matrix for pilot point parameters. If None, a generic GeoStruct is created using pp_space and grid-spacing information. Default is None par_bounds_dict (`dict`): a dictionary of model property/boundary condition name, upper-lower bound pairs. For example, par_bounds_dict = {"hk":[0.01,100.0],"flux":[0.5,2.0]} would set the bounds for horizontal hydraulic conductivity to 0.001 and 100.0 and set the bounds for flux parameters to 0.5 and 2.0. For parameters not found in par_bounds_dict, `pyemu.helpers.wildass_guess_par_bounds_dict` is used to set somewhat meaningful bounds. Default is None temporal_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to build the prior parameter covariance matrix for time-varying list-type multiplier parameters. This GeoStruct express the time correlation so that the 'a' parameter is the length of time that boundary condition multiplier parameters are correlated across. If None, then a generic GeoStruct is created that uses an 'a' parameter of 3 stress periods. Default is None spatial_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to build the prior parameter covariance matrix for spatially-varying list-type multiplier parameters. If None, a generic GeoStruct is created using an "a" parameter that is 10 times the max cell size. Default is None. remove_existing (`bool`): a flag to remove an existing new_model_ws directory. If False and new_model_ws exists, an exception is raised. If True and new_model_ws exists, the directory is destroyed - user beware! Default is False. k_zone_dict (`dict`): a dictionary of zero-based layer index, zone array pairs. e.g. {lay: np.2darray} Used to override using ibound zones for zone-based parameterization. If None, use ibound values greater than zero as zones. Alternatively a dictionary of dictionaries can be passed to allow different zones to be defined for different parameters. e.g. {"upw.hk" {lay: np.2darray}, "extra.rc11" {lay: np.2darray}} or {"hk" {lay: np.2darray}, "rc11" {lay: np.2darray}} use_pp_zones (`bool`): a flag to use ibound zones (or k_zone_dict, see above) as pilot point zones. If False, ibound values greater than zero are treated as a single zone for pilot points. Default is False obssim_smp_pairs ([[`str`,`str`]]: a list of observed-simulated PEST-type SMP file pairs to get observations from and include in the control file. Default is [] external_tpl_in_pairs ([[`str`,`str`]]: a list of existing template file, model input file pairs to parse parameters from and include in the control file. Default is [] external_ins_out_pairs ([[`str`,`str`]]: a list of existing instruction file, model output file pairs to parse observations from and include in the control file. Default is [] extra_pre_cmds ([`str`]): a list of preprocessing commands to add to the forward_run.py script commands are executed with os.system() within forward_run.py. Default is None. redirect_forward_output (`bool`): flag for whether to redirect forward model output to text files (True) or allow model output to be directed to the screen (False). Default is True extra_post_cmds ([`str`]): a list of post-processing commands to add to the forward_run.py script. Commands are executed with os.system() within forward_run.py. Default is None. tmp_files ([`str`]): a list of temporary files that should be removed at the start of the forward run script. Default is []. model_exe_name (`str`): binary name to run modflow. If None, a default from flopy is used, which is dangerous because of the non-standard binary names (e.g. MODFLOW-NWT_x64, MODFLOWNWT, mfnwt, etc). Default is None. build_prior (`bool`): flag to build prior covariance matrix. Default is True sfr_obs (`bool`): flag to include observations of flow and aquifer exchange from the sfr ASCII output file hfb_pars (`bool`): add HFB parameters. uses pyemu.gw_utils.write_hfb_template(). the resulting HFB pars have parval1 equal to the values in the original file and use the spatial_list_geostruct to build geostatistical covariates between parameters kl_props ([[`str`,[`int`]]]): karhunen-loeve based multiplier parameters. A nested list of KL-based model properties to parameterize using name, iterable pairs. For 3D properties, the iterable is zero-based layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier parameter for layer property file horizontal hydraulic conductivity for model layers 1,2, and 3 for unique zone values in the ibound array. For time-varying properties (e.g. recharge), the iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]] would setup zone-based multiplier parameters for recharge for stress period 1,5,11,and 16. kl_num_eig (`int`): the number of KL-based eigenvector multiplier parameters to use for each KL parameter set. default is 100 kl_geostruct (`pyemu.geostats.Geostruct`): the geostatistical structure to build the prior parameter covariance matrix elements for KL-based parameters. If None, a generic GeoStruct is created using an "a" parameter that is 10 times the max cell size. Default is None Note: Setup up multiplier parameters for an existing MODFLOW model. Does all kinds of coolness like building a meaningful prior, assigning somewhat meaningful parameter groups and bounds, writes a forward_run.py script with all the calls need to implement multiplier parameters, run MODFLOW and post-process. Works a lot better if TEMPCHEK, INSCHEK and PESTCHEK are available in the system path variable """ def __init__( self, model, new_model_ws, org_model_ws=None, pp_props=[], const_props=[], temporal_bc_props=[], temporal_list_props=[], grid_props=[], grid_geostruct=None, pp_space=None, zone_props=[], pp_geostruct=None, par_bounds_dict=None, sfr_pars=False, temporal_sfr_pars=False, temporal_list_geostruct=None, remove_existing=False, k_zone_dict=None, mflist_waterbudget=True, mfhyd=True, hds_kperk=[], use_pp_zones=False, obssim_smp_pairs=None, external_tpl_in_pairs=None, external_ins_out_pairs=None, extra_pre_cmds=None, extra_model_cmds=None, extra_post_cmds=None, redirect_forward_output=True, tmp_files=None, model_exe_name=None, build_prior=True, sfr_obs=False, spatial_bc_props=[], spatial_list_props=[], spatial_list_geostruct=None, hfb_pars=False, kl_props=None, kl_num_eig=100, kl_geostruct=None, ):
import bisect import collections import os import queue import random import subprocess import threading import time import traceback from hydrus.core import HydrusData from hydrus.core import HydrusExceptions from hydrus.core import HydrusGlobals as HG NEXT_THREAD_CLEAROUT = 0 THREADS_TO_THREAD_INFO = {} THREAD_INFO_LOCK = threading.Lock() def CheckIfThreadShuttingDown(): if IsThreadShuttingDown(): raise HydrusExceptions.ShutdownException( 'Thread is shutting down!' ) def ClearOutDeadThreads(): with THREAD_INFO_LOCK: all_threads = list( THREADS_TO_THREAD_INFO.keys() ) for thread in all_threads: if not thread.is_alive(): del THREADS_TO_THREAD_INFO[ thread ] def GetThreadInfo( thread = None ): global NEXT_THREAD_CLEAROUT if HydrusData.TimeHasPassed( NEXT_THREAD_CLEAROUT ): ClearOutDeadThreads() NEXT_THREAD_CLEAROUT = HydrusData.GetNow() + 600 if thread is None: thread = threading.current_thread() with THREAD_INFO_LOCK: if thread not in THREADS_TO_THREAD_INFO: thread_info = {} thread_info[ 'shutting_down' ] = False THREADS_TO_THREAD_INFO[ thread ] = thread_info return THREADS_TO_THREAD_INFO[ thread ] def IsThreadShuttingDown(): if HG.controller.DoingFastExit(): return True me = threading.current_thread() if isinstance( me, DAEMON ): if HG.started_shutdown: return True else: if HG.model_shutdown: return True thread_info = GetThreadInfo() return thread_info[ 'shutting_down' ] def ShutdownThread( thread ): thread_info = GetThreadInfo( thread ) thread_info[ 'shutting_down' ] = True def SubprocessCommunicate( process: subprocess.Popen ): def do_test(): if HG.model_shutdown: try: process.kill() except: pass raise HydrusExceptions.ShutdownException( 'Application is shutting down!' ) do_test() while True: try: return process.communicate( timeout = 10 ) except subprocess.TimeoutExpired: do_test() class DAEMON( threading.Thread ): def __init__( self, controller, name ): threading.Thread.__init__( self, name = name ) self._controller = controller self._name = name self._event = threading.Event() self._controller.sub( self, 'wake', 'wake_daemons' ) self._controller.sub( self, 'shutdown', 'shutdown' ) def _DoPreCall( self ): if HG.daemon_report_mode: HydrusData.ShowText( self._name + ' doing a job.' ) def GetCurrentJobSummary( self ): return 'unknown job' def GetName( self ): return self._name def shutdown( self ): ShutdownThread( self ) self.wake() def wake( self ): self._event.set() class DAEMONWorker( DAEMON ): def __init__( self, controller, name, callable, topics = None, period = 3600, init_wait = 3, pre_call_wait = 0 ): if topics is None: topics = [] DAEMON.__init__( self, controller, name ) self._callable = callable self._topics = topics self._period = period self._init_wait = init_wait self._pre_call_wait = pre_call_wait for topic in topics: self._controller.sub( self, 'set', topic ) self.start() def _CanStart( self ): return self._ControllerIsOKWithIt() def _ControllerIsOKWithIt( self ): return True def _DoAWait( self, wait_time, event_can_wake = True ): time_to_start = HydrusData.GetNow() + wait_time while not HydrusData.TimeHasPassed( time_to_start ): if event_can_wake: event_was_set = self._event.wait( 1.0 ) if event_was_set: self._event.clear() return else: time.sleep( 1.0 ) CheckIfThreadShuttingDown() def _WaitUntilCanStart( self ): while not self._CanStart(): time.sleep( 1.0 ) CheckIfThreadShuttingDown() def GetCurrentJobSummary( self ): return self._callable def run( self ): try: self._DoAWait( self._init_wait ) while True: CheckIfThreadShuttingDown() self._DoAWait( self._pre_call_wait, event_can_wake = False ) CheckIfThreadShuttingDown() self._WaitUntilCanStart() CheckIfThreadShuttingDown() self._DoPreCall() try: self._callable( self._controller ) except HydrusExceptions.ShutdownException: return except Exception as e: HydrusData.ShowText( 'Daemon ' + self._name + ' encountered an exception:' ) HydrusData.ShowException( e ) self._DoAWait( self._period ) except HydrusExceptions.ShutdownException: return def set( self, *args, **kwargs ): self._event.set() # Big stuff like DB maintenance that we don't want to run while other important stuff is going on, like user interaction or vidya on another process class DAEMONBackgroundWorker( DAEMONWorker ): def _ControllerIsOKWithIt( self ): return self._controller.GoodTimeToStartBackgroundWork() # Big stuff that we want to run when the user sees, but not at the expense of something else, like laggy session load class DAEMONForegroundWorker( DAEMONWorker ): def _ControllerIsOKWithIt( self ): return self._controller.GoodTimeToStartForegroundWork() class THREADCallToThread( DAEMON ): def __init__( self, controller, name ): DAEMON.__init__( self, controller, name ) self._callable = None self._queue = queue.Queue() self._currently_working = True # start off true so new threads aren't used twice by two quick successive calls def CurrentlyWorking( self ): return self._currently_working def GetCurrentJobSummary( self ): return self._callable def put( self, callable, *args, **kwargs ): self._currently_working = True self._queue.put( ( callable, args, kwargs ) ) self._event.set() def run( self ): try: while True: while self._queue.empty(): CheckIfThreadShuttingDown() self._event.wait( 10.0 ) self._event.clear() CheckIfThreadShuttingDown() try: try: ( callable, args, kwargs ) = self._queue.get( 1.0 ) except queue.Empty: # https://github.com/hydrusnetwork/hydrus/issues/750 # this shouldn't happen, but... # even if we assume we'll never get this, we don't want to make a business of hanging forever on things continue self._DoPreCall() self._callable = ( callable, args, kwargs ) if HG.profile_mode: summary = 'Profiling CallTo Job: {}'.format( callable ) HydrusData.Profile( summary, 'callable( *args, **kwargs )', globals(), locals(), min_duration_ms = HG.callto_profile_min_job_time_ms ) else: callable( *args, **kwargs ) self._callable = None del callable except HydrusExceptions.ShutdownException: return except Exception as e: HydrusData.Print( traceback.format_exc() ) HydrusData.ShowException( e ) finally: self._currently_working = False time.sleep( 0.00001 ) except HydrusExceptions.ShutdownException: return class JobScheduler( threading.Thread ): def __init__( self, controller ): threading.Thread.__init__( self, name = 'Job Scheduler' ) self._controller = controller self._waiting = [] self._waiting_lock = threading.Lock() self._new_job_arrived = threading.Event() self._current_job = None self._cancel_filter_needed = threading.Event() self._sort_needed = threading.Event() self._controller.sub( self, 'shutdown', 'shutdown' ) def _FilterCancelled( self ): with self._waiting_lock: self._waiting = [ job for job in self._waiting if not job.IsCancelled() ] def _GetLoopWaitTime( self ): with self._waiting_lock: if len( self._waiting ) == 0: return 0.2 next_job = self._waiting[0] time_delta_until_due = next_job.GetTimeDeltaUntilDue() return min( 1.0, time_delta_until_due ) def _NoWorkToStart( self ): with self._waiting_lock: if len( self._waiting ) == 0: return True next_job = self._waiting[0] if next_job.IsDue(): return False else: return True def _SortWaiting( self ): # sort the waiting jobs in ascending order of expected work time with self._waiting_lock: # this uses __lt__ to sort self._waiting.sort() def _StartWork( self ): jobs_started = 0 while True: with self._waiting_lock: if len( self._waiting ) == 0: break if jobs_started >= 10: # try to avoid spikes break next_job = self._waiting[0] if not next_job.IsDue(): # front is not due, so nor is the rest of the list break next_job = self._waiting.pop( 0 ) if next_job.IsCancelled(): continue if next_job.SlotOK(): # important this happens outside of the waiting lock lmao! next_job.StartWork() jobs_started += 1 else: # delay is automatically set by SlotOK with self._waiting_lock: bisect.insort( self._waiting, next_job ) def AddJob( self, job ): with self._waiting_lock: bisect.insort( self._waiting, job ) self._new_job_arrived.set() def ClearOutDead( self ): with self._waiting_lock: self._waiting = [ job for job in self._waiting if not job.IsDead() ] def GetName( self ): return 'Job Scheduler' def GetCurrentJobSummary( self ): with self._waiting_lock: return HydrusData.ToHumanInt( len( self._waiting ) ) + ' jobs'
= [n for n in list(parallel.sub) if n.kind == 'Reversed'][0] nets['lstm2'] = rev.sub[0] hidden = int(nets['lstm1'].attribute[0].value) weights = {} # type: Dict[str, torch.Tensor] for n in nets: weights[n] = {} for w in list(nets[n].weights): weights[n][w.name] = torch.Tensor(w.value).view(list(w.dim)) if mode == 'clstm_compat': weightnames = ('.WGI', '.WGF', '.WCI', '.WGO') weightname_softm = '.W' else: weightnames = ('WGI', 'WGF', 'WCI', 'WGO') weightname_softm = 'W1' # input hidden and hidden-hidden weights are in one matrix. also # CLSTM/ocropy likes 1-augmenting every other tensor so the ih weights # are input+1 in one dimension. t = torch.cat(list(w for w in [weights['lstm1'][wn] for wn in weightnames])) weight_ih_l0 = t[:, :input+1] weight_hh_l0 = t[:, input+1:] t = torch.cat(list(w for w in [weights['lstm2'][wn] for wn in weightnames])) weight_ih_l0_rev = t[:, :input+1] weight_hh_l0_rev = t[:, input+1:] weight_lin = weights['softm'][weightname_softm] if mode == 'clstm_compat': weight_lin = torch.cat([torch.zeros(len(weight_lin), 1), weight_lin], 1) # build vgsl spec and set weights nn = cls('[1,1,0,{} Lbxc{} O1ca{}]'.format(input, hidden, len(net.codec))) nn.nn.L_0.layer.weight_ih_l0 = torch.nn.Parameter(weight_ih_l0) nn.nn.L_0.layer.weight_hh_l0 = torch.nn.Parameter(weight_hh_l0) nn.nn.L_0.layer.weight_ih_l0_reverse = torch.nn.Parameter(weight_ih_l0_rev) nn.nn.L_0.layer.weight_hh_l0_reverse = torch.nn.Parameter(weight_hh_l0_rev) nn.nn.O_1.lin.weight = torch.nn.Parameter(weight_lin) nn.add_codec(codec) return nn @classmethod def load_model(cls, path: Union[str, pathlib.Path]): """ Deserializes a VGSL model from a CoreML file. Args: path: CoreML file Returns: A TorchVGSLModel instance. Raises: KrakenInvalidModelException if the model data is invalid (not a string, protobuf file, or without appropriate metadata). FileNotFoundError if the path doesn't point to a file. """ if isinstance(path, pathlib.Path): path = path.as_posix() try: mlmodel = MLModel(path) except TypeError as e: raise KrakenInvalidModelException(str(e)) except DecodeError as e: raise KrakenInvalidModelException('Failure parsing model protobuf: {}'.format(str(e))) if 'vgsl' not in mlmodel.user_defined_metadata: raise KrakenInvalidModelException('No VGSL spec in model metadata') vgsl_spec = mlmodel.user_defined_metadata['vgsl'] nn = cls(vgsl_spec) def _deserialize_layers(name, layer): logger.debug(f'Deserializing layer {name} with type {type(layer)}') if type(layer) in (layers.MultiParamParallel, layers.MultiParamSequential): for name, l in layer.named_children(): _deserialize_layers(name, l) else: layer.deserialize(name, mlmodel.get_spec()) _deserialize_layers('', nn.nn) if 'codec' in mlmodel.user_defined_metadata: nn.add_codec(PytorchCodec(json.loads(mlmodel.user_defined_metadata['codec']))) nn.user_metadata = {'accuracy': [], 'seg_type': 'bbox', 'one_channel_mode': '1', 'model_type': None, 'hyper_params': {}} # type: dict[str, str] if 'kraken_meta' in mlmodel.user_defined_metadata: nn.user_metadata.update(json.loads(mlmodel.user_defined_metadata['kraken_meta'])) return nn @property def one_channel_mode(self): return self.user_metadata['one_channel_mode'] @one_channel_mode.setter def one_channel_mode(self, val: str): if val not in ['1', 'L', None]: raise ValueError('one_channel_mode {} is not one of [1, L, None]'.format(val)) self.user_metadata['one_channel_mode'] = val @property def model_type(self): return self.user_metadata['model_type'] @model_type.setter def model_type(self, val: str): if val not in ['recognition', 'segmentation']: raise ValueError('model_type {} is not one of [recognition, segmentation]'.format(val)) self.user_metadata['model_type'] = val @property def seg_type(self): return self.user_metadata['seg_type'] @seg_type.setter def seg_type(self, val: str): if val not in ['bbox', 'baselines', None]: raise ValueError('segmentation type {} is not one of [bbox, baselines, None]'.format(val)) self.user_metadata['seg_type'] = val @property def hyper_params(self, **kwargs): return self.user_metadata['hyper_params'] @hyper_params.setter def hyper_params(self, val: Dict[str, Any]): self.user_metadata['hyper_params'].update(val) def save_model(self, path: str): """ Serializes the model into path. Args: path (str): Target destination """ inputs = [('input', datatypes.Array(*self.input))] outputs = [('output', datatypes.Array(*self.output))] net_builder = NeuralNetworkBuilder(inputs, outputs) input = 'input' prev_device = next(self.nn.parameters()).device try: self.nn.to('cpu') def _serialize_layer(net, input, net_builder): for name, l in net.named_children(): logger.debug(f'Serializing layer {name} with type {type(l)}') if type(l) in (layers.MultiParamParallel, layers.MultiParamSequential): _serialize_layer(l, input, net_builder) else: l.serialize(name, input, net_builder) _serialize_layer(self.nn, input, net_builder) mlmodel = MLModel(net_builder.spec) mlmodel.short_description = 'kraken model' mlmodel.user_defined_metadata['vgsl'] = '[' + ' '.join(self.named_spec) + ']' if self.codec: mlmodel.user_defined_metadata['codec'] = json.dumps(self.codec.c2l) if self.user_metadata: mlmodel.user_defined_metadata['kraken_meta'] = json.dumps(self.user_metadata) mlmodel.save(path) finally: self.nn.to(prev_device) def add_codec(self, codec: PytorchCodec) -> None: """ Adds a PytorchCodec to the model. """ self.codec = codec def init_weights(self, idx: slice = slice(0, None)) -> None: """ Initializes weights for all or a subset of layers in the graph. LSTM/GRU layers are orthogonally initialized, convolutional layers uniformly from (-0.1,0.1). Args: idx (slice): A slice object representing the indices of layers to initialize. """ def _wi(m): if isinstance(m, torch.nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) torch.nn.init.constant_(m.bias.data, 0) elif isinstance(m, torch.nn.LSTM): for p in m.parameters(): # weights if p.data.dim() == 2: torch.nn.init.orthogonal_(p.data) # initialize biases to 1 (jozefowicz 2015) else: torch.nn.init.constant_(p.data[len(p)//4:len(p)//2], 1.0) elif isinstance(m, torch.nn.GRU): for p in m.parameters(): torch.nn.init.orthogonal_(p.data) elif isinstance(m, torch.nn.Conv2d): for p in m.parameters(): torch.nn.init.uniform_(p.data, -0.1, 0.1) self.nn[idx].apply(_wi) def resize_output(self, output_size: int, del_indices: Optional[Iterable] = None) -> None: """ Resizes an output layer. Args: output_size (int): New size/output channels of last layer del_indices (list): list of outputs to delete from layer """ if type(self.nn[-1]) not in [layers.ActConv2D, layers.LinSoftmax]: raise ValueError('last layer is neither linear nor convolutional layer') logger.debug('Resizing output layer to {}'.format(output_size)) self.nn[-1].resize(output_size, del_indices) pattern = re.compile(r'(O)(?P<name>{\w+})?(?P<dim>2|1|0)(?P<type>l|s|c)(?P<aug>a)?(?P<out>\d+)') m = pattern.match(self.named_spec[-1]) if not m: raise ValueError('Output specification is not parsable') aug = m.group('aug') if m.group('aug') else '' self.named_spec[-1] = 'O{}{}{}{}{}'.format(m.group('name'), m.group('dim'), m.group('type'), aug, output_size) self.spec = '[' + ' '.join(self.named_spec) + ']' def build_rnn(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds an LSTM/GRU layer returning number of outputs and layer. """ pattern = re.compile(r'(?P<type>L|G)(?P<dir>f|r|b)(?P<dim>x|y)(?P<sum>s)?(?P<legacy>c|o)?(?P<name>{\w+})?(?P<out>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None type = m.group('type') direction = m.group('dir') dim = m.group('dim') == 'y' summarize = m.group('sum') == 's' legacy = None if m.group('legacy') == 'c': legacy = 'clstm' elif m.group('legacy') == 'o': legacy = 'ocropy' hidden = int(m.group(7)) fn = layers.TransposedSummarizingRNN(input[1], hidden, direction, dim, summarize, legacy) self.idx += 1 logger.debug(f'{self.idx}\t\trnn\tdirection {direction} transposed {dim} ' f'summarize {summarize} out {hidden} legacy {legacy}') return fn.get_shape(input), [VGSLBlock(blocks[idx], type, m.group('name'), self.idx)], fn def build_dropout(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>Do)(?P<name>{\w+})?(?P<p>(\d+(\.\d*)?|\.\d+))?(,(?P<dim>\d+))?') m = pattern.match(blocks[idx]) if not m: return None, None, None prob = float(m.group('p')) if m.group('p') else 0.5 dim = int(m.group('dim')) if m.group('dim') else 1 fn = layers.Dropout(prob, dim) self.idx += 1 logger.debug('{}\t\tdropout\tprobability {} dims {}'.format(self.idx, prob, dim)) return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_addition(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>A)(?P<name>{\w+})?(?P<dim>\d+),(?P<chunk_size>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None dim_map = {0: 0, 1: 2, 2: 3, 3: 1} dim = int(m.group('dim')) chunk_size = int(m.group('chunk_size')) if dim > 3: raise ValueError(f'Invalid dimension {dim} in addition block') dim = dim_map[dim] fn = layers.Addition(dim=dim, chunk_size=chunk_size) self.idx += 1 logger.debug(f'{self.idx}\t\taddition dim: {dim} chunk: {chunk_size}') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_identity(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>I)(?P<name>{\w+})?') m = pattern.match(blocks[idx]) if not m: return None, None, None fn = layers.Identity() self.idx += 1 logger.debug(f'{self.idx}\t\tidentity') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_groupnorm(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: pattern = re.compile(r'(?P<type>Gn)(?P<name>{\w+})?(?P<groups>\d+)') m = pattern.match(blocks[idx]) if not m: return None, None, None groups = int(m.group('groups')) fn = layers.GroupNorm(input[1], groups) self.idx += 1 logger.debug('{}\t\tgroupnorm\tgroups {}'.format(self.idx, groups)) return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_conv(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a 2D convolution layer. """ pattern = re.compile(r'(?P<type>C)(?P<nl>s|t|r|l|m)(?P<name>{\w+})?(\d+),' r'(\d+),(?P<out>\d+)(,(?P<stride_y>\d+),(?P<stride_x>\d+))?') m = pattern.match(blocks[idx]) if not m: return None, None, None kernel_size = (int(m.group(4)), int(m.group(5))) filters = int(m.group('out')) stride = (int(m.group('stride_y')), int(m.group('stride_x'))) if m.group('stride_x') else (1, 1) nl = m.group('nl') fn = layers.ActConv2D(input[1], filters, kernel_size, stride, nl) self.idx += 1 logger.debug(f'{self.idx}\t\tconv\tkernel {kernel_size[0]} x {kernel_size[1]} ' f'filters {filters} stride {stride} activation {nl}') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_maxpool(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a maxpool layer. """ pattern = re.compile(r'(?P<type>Mp)(?P<name>{\w+})?(\d+),(\d+)(?:,(\d+),(\d+))?') m = pattern.match(blocks[idx]) if not m: return None, None, None kernel_size = (int(m.group(3)), int(m.group(4))) stride = (kernel_size[0] if not m.group(5) else int(m.group(5)), kernel_size[1] if not m.group(6) else int(m.group(6))) fn = layers.MaxPool(kernel_size, stride) self.idx += 1 logger.debug(f'{self.idx}\t\tmaxpool\tkernel {kernel_size[0]} x {kernel_size[1]} stride {stride[0]} x {stride[1]}') return fn.get_shape(input), [VGSLBlock(blocks[idx], m.group('type'), m.group('name'), self.idx)], fn def build_reshape(self, input: Tuple[int, int, int, int], blocks: List[str], idx: int) -> Union[Tuple[None, None, None], Tuple[Tuple[int, int, int, int], str, Callable]]: """ Builds a reshape
ML_SSE_m128_v4float32), type_strict_match(*(3*(ML_SSE_m128_v2float64,))): XmmIntrin("_mm_or_pd", arity = 2, output_precision = ML_SSE_m128_v2float64), }, }, }, Conversion: { None: { lambda _: True: { # not supported in SSE (else fallback on generic erroneous # implementation) type_strict_match(ML_Int32, ML_SSE_m128_v1int32): ERROR_OPERATOR, type_strict_match(ML_UInt32, ML_SSE_m128_v1int32): ERROR_OPERATOR, type_strict_match(ML_SSE_m128_v1float32, ML_Binary32): _mm_set_ss, type_strict_match(ML_Binary32, ML_SSE_m128_v1float32): _mm_cvtss_f32, type_strict_match(ML_SSE_m128_v1float64, ML_Binary64): _mm_set_sd, type_strict_match(ML_Binary64, ML_SSE_m128_v1float64): _mm_cvtsd_f64, # m128 float vector from ML's generic vector format type_strict_match(ML_SSE_m128_v4float32, v4float32): IdentityOperator(), # m128 float vector from ML's generic vector format type_strict_match(ML_SSE_m128_v4uint32, v4uint32): SymbolOperator("(__m128i)", arity=1), # m128 float vector to ML's generic vector format type_strict_match(v4float32, ML_SSE_m128_v4float32): IdentityOperator(), type_strict_match(v4uint32, ML_SSE_m128_v4uint32): SymbolOperator("(ml_uint4_t)", arity=1), # signed integer format type_strict_match(ML_SSE_m128_v4int32, v4int32): SymbolOperator("(__m128i)", arity=1), type_strict_match(v4int32, ML_SSE_m128_v4int32): SymbolOperator("(ml_int4_t)", arity=1), # identity operators lambda dst_type, src_type, **kwords: dst_type == src_type: IdentityOperator(), # signed/unsigned conversions type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32): TransparentOperator(), type_strict_match(ML_SSE_m128_v4uint32, ML_SSE_m128_v4int32): TransparentOperator(), }, }, }, ReciprocalSeed: { None: { lambda _: True: { type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4float32): _mm_rcp_ps(FO_Arg(0)), type_strict_match(ML_SSE_m128_v1float32, ML_SSE_m128_v1float32): _mm_rcp_ss(FO_Arg(0)), type_strict_match(ML_Binary32, ML_Binary32): _mm_cvtss_f32(_mm_rcp_ss(_mm_set_ss(FO_Arg(0)))), }, }, }, Multiplication: { None: { lambda _: True: { type_strict_match(*(3*(ML_SSE_m128_v4float32,))): XmmIntrin("_mm_mul_ps", arity = 2, output_precision = ML_SSE_m128_v4float32), type_strict_match(*(3*(ML_SSE_m128_v1float32,))): _mm_mul_ss(FO_Arg(0), FO_Arg(1)), type_strict_match(ML_Binary32, ML_Binary32, ML_Binary32): _mm_cvtss_f32(_mm_mul_ss(_mm_set_ss(FO_Arg(0)), _mm_set_ss(FO_Arg(1)))), # vector multiplication type_strict_match(*(3*(ML_SSE_m128_v4float32,))): XmmIntrin("_mm_mul_ps", arity = 2), }, }, }, NearestInteger: { None: { lambda optree: True: { type_strict_match(ML_Int32, ML_Binary32): _mm_cvt_ss2si(_mm_set_ss(FO_Arg(0))), }, }, }, Negation: { None: { lambda optree: True: { # Float negation type_strict_match(*(2*(ML_SSE_m128_v4float32,))): XmmIntrin("_mm_xor_ps", arity = 2)( FO_Arg(0), FO_Value("_mm_set1_ps(-0.0f)", ML_SSE_m128_v4float32) ), }, }, }, Subtraction: { None: { lambda _: True: { type_strict_match(*(3*(ML_SSE_m128_v4float32,))): XmmIntrin("_mm_sub_ps", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v1float32,))): XmmIntrin("_mm_sub_ss", arity = 2), }, }, }, TableLoad: { None: { lambda optree: True: { # XMM version type_custom_match(FSM(ML_SSE_m128_v1float32), TCM(ML_TableFormat), FSM(ML_Int32)): XmmIntrin("_mm_load_ss", arity = 1, output_precision = ML_SSE_m128_v1float32)( TemplateOperatorFormat( "(float*)&{}[{}]", arity=2, output_precision=ML_Pointer_Format(ML_Binary32) ) ), type_custom_match(FSM(ML_SSE_m128_v1float32), TCM(ML_TableFormat), FSM(ML_SSE_m128_v1int32)): XmmIntrin("_mm_load_ss", arity = 1, output_precision = ML_SSE_m128_v1float32)( TemplateOperatorFormat( "(float*)&{}[_mm_cvtsi128_si32({})]", arity=2, output_precision=ML_Pointer_Format(ML_Binary32) ) ), }, }, }, TypeCast: { None: { lambda _: True: { type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4int32): IdentityOperator(), type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32): IdentityOperator(), }, }, }, } sse2_c_code_generation_table = { Select: { None: { not_pred_vector_select_one_zero: { type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4bool, ML_SSE_m128_v4int32, ML_SSE_m128_v4int32): ComplexOperator(convert_select_to_logic), type_strict_match(ML_SSE_m128_v4uint32, ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32, ML_SSE_m128_v4uint32): ComplexOperator(convert_select_to_logic), }, }, }, Addition: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v1int32,))): EmmIntrin("_mm_add_epi32", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_add_epi32", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v4uint32,))): EmmIntrin("_mm_add_epi32", arity = 2), }, }, }, BitLogicAnd: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_and_si128", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v4uint32,))): EmmIntrin("_mm_and_si128", arity = 2), }, }, }, BitLogicOr: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_or_si128", arity = 2, output_precision = ML_SSE_m128_v4int32), type_strict_match(*(3*(ML_SSE_m128_v4uint32,))): EmmIntrin("_mm_or_si128", arity = 2, output_precision = ML_SSE_m128_v4uint32), type_strict_match(*(3*(ML_SSE_m128_v2int64,))): EmmIntrin("_mm_or_si128", arity = 2, output_precision = ML_SSE_m128_v2int64), type_strict_match(*(3*(ML_SSE_m128_v2uint64,))): EmmIntrin("_mm_or_si128", arity = 2, output_precision = ML_SSE_m128_v2uint64), }, }, }, BitLogicNegate: { None: { lambda _: True: { type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4int32): ImmIntrin("_mm_andnot_si128", arity = 2)( FO_Arg(0), FO_Value("_mm_set1_epi32(-1)", ML_SSE_m128_v4int32) ), }, }, }, LogicalNot: { None: { lambda _: True: { type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4bool): ImmIntrin("_mm_andnot_si128", arity = 2)( FO_Arg(0), FO_Value("_mm_set1_epi32(-1)", ML_SSE_m128_v4bool) ), }, }, }, LogicalOr: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4bool,))): XmmIntrin("_mm_or_si128", arity = 2, output_precision = ML_SSE_m128_v4bool), }, }, }, LogicalAnd: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4bool,))): XmmIntrin("_mm_and_si128", arity = 2, output_precision = ML_SSE_m128_v4bool), }, }, }, BitLogicLeftShift: { None: { uniform_shift_check: { type_strict_match_list( [ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32,], [ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32,], [ML_Int32, ML_UInt32,]): EmmIntrin("_mm_slli_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1)), # TODO the last argument is a scalar on 64 bits here, see # documentation on _mm_sll_epi32. We need to make sure that the # last vector is a constant that can be changed into either an # imm8 (above) or an ML_SSE_m128_v1[u]int64. type_strict_match_list(*(3*([ ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32, ],))): EmmIntrin("_mm_sll_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1)), type_strict_match_list(*(3*([ ML_SSE_m128_v2int64, ML_SSE_m128_v2uint64, ],))): EmmIntrin("_mm_sll_epi64", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1)), }, }, }, BitLogicRightShift: { None: { uniform_shift_check: { type_strict_match_list( [ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32,], [ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32,], [ML_Int32, ML_UInt32,]): EmmIntrin("_mm_srli_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1) ), # TODO the last argument is a scalar here, see documentation on # _mm_srl_epi32. We need to make sure that the last vector is a # constant that can be changed into either an imm8 (above) or # an ML_SSE_m128_v1int32 type_strict_match(*(3*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_srl_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1) ), # TODO: using signed primitives for unsigned formats type_strict_match(*(3*(ML_SSE_m128_v4uint32,))): EmmIntrin("_mm_srl_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1) ), }, }, }, BitArithmeticRightShift: { None: { uniform_shift_check: { type_strict_match_list( [ ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32 ], [ ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32 ], [ ML_Int32, ML_UInt32 ] ): EmmIntrin("_mm_srai_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1) ), # TODO the last argument is a scalar here, see documentation on # _mm_srl_epi32. We need to make sure that the last vector is a # constant that can be changed into either an imm8 (above) or # an ML_SSE_m128_v1int32 type_strict_match(*(3*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_sra_epi32", arity = 2, arg_map = {0: FO_Arg(0), 1: FO_Arg(1)})( FO_Arg(0), FO_Arg(1) ), }, }, }, Conversion: { None: { lambda optree: True: { type_strict_match(ML_SSE_m128_v4float32, ML_SSE_m128_v4int32): EmmIntrin("_mm_cvtepi32_ps", arity = 1), type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4float32): EmmIntrin("_mm_cvtps_epi32", arity = 1), type_strict_match(ML_SSE_m128_v1int32, ML_Int32): _mm_set1_epi32, type_strict_match(ML_Int32, ML_SSE_m128_v1int32): EmmIntrin("_mm_cvtsi128_si32", arity = 1), type_strict_match(v4int32, ML_SSE_m128_v4int32): SymbolOperator("(ml_int4_t)", arity=1), type_strict_match(*((ML_SSE_m128_v4int32,) + 4*(ML_Int32,))): XmmIntrin("_mm_set_epi32", arity = 4), #type_strict_match(ML_SSE_m128_v4int32, v4int32): # ComplexOperator(optree_modifier = v4_to_m128_modifier), type_strict_match(ML_SSE_m128_v4int32, v4int32): SymbolOperator("(__m128i)", arity=1), #IdentityOperator(), # broadcast implemented as conversions type_strict_match(ML_SSE_m128_v4int32, ML_Int32): XmmIntrin("_mm_set1_epi32", arity = 1), type_strict_match(ML_SSE_m128_v4float32, ML_Binary32): XmmIntrin("_mm_set1_ps", arity = 1), # boolean vectors type_strict_match(v4bool, ML_SSE_m128_v4bool): SymbolOperator("(ml_bool4_t)", arity=1), # dummy implementation type_strict_match(ML_SSE_m128_v4bool, v4bool): IdentityOperator(), }, }, }, NearestInteger: { None: { lambda optree: True: { type_strict_match(ML_Int64, ML_Binary64): _mm_cvtsd_si64(_mm_set_sd(FO_Arg(0))), type_strict_match(ML_Int32, ML_Binary64): _mm_cvtsd_si32(_mm_set_sd(FO_Arg(0))), }, }, }, Negation: { None: { lambda optree: True: { # binary32 negation is in the X86_SSE_Processor type_strict_match(*(2*(ML_SSE_m128_v2float64,))): EmmIntrin("_mm_xor_pd", arity = 2)( FO_Value("_mm_set1_pd(-0.0f)", ML_SSE_m128_v2float64), FO_Arg(0) ), # Integer negation type_strict_match(*(2*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_sub_epi32", arity = 2)( FO_Value("_mm_set1_epi32(0)", ML_SSE_m128_v4int32), FO_Arg(0) ), type_strict_match(*(2*(ML_SSE_m128_v2int64,))): EmmIntrin("_mm_sub_epi64", arity = 2)( FO_Value("_mm_set1_epi64x(0)", ML_SSE_m128_v2int64), FO_Arg(0) ), }, }, }, Subtraction: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4int32,))): EmmIntrin("_mm_sub_epi32", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v2int64,))): EmmIntrin("_mm_sub_epi64", arity = 2), }, }, }, TypeCast: { None: { lambda optree: True: { # 32-bit signed, unsigned and bool version type_strict_match_list( [ML_SSE_m128_v4float32], [ML_SSE_m128_v4int32, ML_SSE_m128_v4bool, ML_SSE_m128_v4uint32]): EmmIntrin("_mm_castsi128_ps", arity=1, output_precision=ML_SSE_m128_v4float32), type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4float32): EmmIntrin("_mm_castps_si128", arity = 1, output_precision = ML_SSE_m128_v4int32), type_strict_match(ML_SSE_m128_v4bool, ML_SSE_m128_v4float32): EmmIntrin("_mm_castps_si128", arity = 1, output_precision = ML_SSE_m128_v4int32), type_strict_match(ML_SSE_m128_v4uint32, ML_SSE_m128_v4float32): EmmIntrin("_mm_castps_si128", arity = 1, output_precision = ML_SSE_m128_v4uint32), # 64-bit versions type_strict_match(ML_SSE_m128_v2float64, ML_SSE_m128_v2int64): EmmIntrin("_mm_castsi128_pd", arity = 1, output_precision = ML_SSE_m128_v2float64), type_strict_match(ML_SSE_m128_v2int64, ML_SSE_m128_v2float64): EmmIntrin("_mm_castpd_si128", arity = 1, output_precision = ML_SSE_m128_v2int64), type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v2float64): EmmIntrin("_mm_castpd_si128", arity = 1, output_precision = ML_SSE_m128_v4int32), type_strict_match(ML_SSE_m128_v2float64, ML_SSE_m128_v4float32): EmmIntrin("_mm_castps_pd", arity = 1, output_precision = ML_SSE_m128_v2float64), # transparent cast type_strict_match(ML_SSE_m128_v4uint32, ML_SSE_m128_v4int32): TransparentOperator(), type_strict_match(ML_SSE_m128_v4int32, ML_SSE_m128_v4uint32): TransparentOperator(), }, }, }, Constant: { None: { uniform_vector_constant_check: { type_strict_match(ML_SSE_m128_v4int32): ComplexOperator(optree_modifier = vector_constant_op), type_strict_match(ML_SSE_m128_v4float32): ComplexOperator(optree_modifier = vector_constant_op), }, }, }, VectorUnpack: { VectorUnpack.Hi: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v2float64,))): _mm_unpackhi_pd, }, }, VectorUnpack.Lo: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v2float64,))): _mm_unpacklo_pd, }, }, }, } ## generates a check function which from a Constant vector node of vector_size # genrates a function which check that the constant value is uniform accross # every vector lane def uniform_constant_check(optree): assert isinstance(optree, Constant) value_v = optree.get_value() init_value = value_v[0] return reduce(lambda acc, value: acc and (value == init_value), value_v, True) sse3_c_code_generation_table = {} ssse3_c_code_generation_table = { Negation: { None: { lambda optree: True: { # Float negation is handled by SSE2 instructions # 32-bit integer negation using SSSE3 sign_epi32 instruction type_strict_match(*(2*(ML_SSE_m128_v4int32,))): TmmIntrin("_mm_sign_epi32", arity = 2)( FO_Arg(0), FO_Value("_mm_set1_epi32(-1)", ML_SSE_m128_v4int32) ), }, }, }, } sse41_c_code_generation_table = { Test: { Test.IsMaskNotAnyZero: { lambda optree: True: { type_strict_match(ML_Bool, ML_SSE_m128_v4bool): ImmIntrin("_mm_test_all_ones", arg_map={0: FO_Arg(0), 1: FO_Arg(0)}, arity=1), }, }, Test.IsMaskAllZero: { lambda optree: True: { type_strict_match(ML_Bool, ML_SSE_m128_v4bool): ImmIntrin("_mm_testz_si128", arg_map={0: FO_Arg(0), 1: FO_Arg(0)}, arity=1), }, }, }, Max: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4int32,))): ImmIntrin("_mm_max_epi32", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v4uint32,))): ImmIntrin("_mm_max_epu32", arity = 2), }, }, }, Min: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4int32,))): ImmIntrin("_mm_min_epi32", arity = 2), type_strict_match(*(3*(ML_SSE_m128_v4uint32,))): ImmIntrin("_mm_min_epu32", arity = 2), }, }, }, Multiplication: { None: { lambda optree: True: { type_strict_match(*(3*(ML_SSE_m128_v4int32,))): _mm_mullo_epi32, }, }, },
- start_t)) if not self.it in dset.iterations: raise ValueError("it: {} is missing in dset for v_n: {}\n{}" .format(self.it, key, dset.iterations)) # saving data for iteration outfname = self.outpath + str(self.it) + '_' + key + ".h5" dfile = h5py.File(outfname, "w") if self.description is not None: dfile.create_dataset("description", data=np.string_(self.description)) print("\t Saving {}...".format(outfname)), for rl in range(len(self.grid)): gname = "reflevel=%d" % rl dfile.create_group(gname) dfile[gname].attrs.create("delta", self.grid[rl].delta) dfile[gname].attrs.create("extent", self.grid[rl].extent()) dfile[gname].attrs.create("iteration", self.it) dfile[gname].attrs.create("reflevel", rl) dfile[gname].attrs.create("time", dset.get_time(self.it)) # found = False # for entry in dset.contents.keys(): # print("\tNot found {} in {}".format(val, entry.split())) # if val in entry.split() \ # and "it={}".format(self.it) in entry.split() \ # and 'c=0' in entry.split(): # found = True # print("\tFound {} -> {}".format(val, entry)) # break # if found == False: # raise KeyError("Check for found failed.") # self.grid[rl] # print("\t\tdset.contents : {}".format(dset.iterations)) data = dset.get_reflevel_data(self.grid[rl], iteration=int(self.it), variable=val, timelevel=0, dtype=np.float32) try: data = dset.get_reflevel_data(self.grid[rl], iteration=int(self.it), variable=val, timelevel=0, dtype=np.float32) except KeyError: raise KeyError("Failed to extract data from {} file \n" "Data: rl: {} it: {} v_n: {}\n" "" .format(files[0], rl, self.it, val)) dfile[gname].create_dataset(key, data=data) dfile.close() print("done! (%.2f sec)" % (time.time() - start_t)) dset.close_files() gc.collect() def interpolate_save_eos_quantity(self, v_n, dset_rho, dset_temp, dset_ye, eostable): print("\t Insterpolating/saving {} ...".format(v_n)) start_t = time.time() dfile = h5py.File(self.outpath + str(self.it) + '_' + v_n + ".h5", "w") if self.description is not None: dfile.create_dataset("description", data=np.string_(self.description)) for rl in range(self.nlevels): print("\t\trl:{}".format(rl)) print("\t\t extracting rho, temp, ye...") group_rho = dset_rho["reflevel={}".format(rl)] group_temp = dset_temp["reflevel={}".format(rl)] group_ye = dset_ye["reflevel={}".format(rl)] arr_rho = np.array(group_rho["rho"]) arr_temp = np.array(group_temp["temperature"]) arr_ye = np.array(group_ye["Y_e"]) # arr_rho_ = units.conv_dens(units.cactus, units.cgs, arr_rho) # arr_temp_ = units.conv_temperature(units.cactus, units.cgs, arr_temp) # print("\t interpolating eos rl:{}".format(rl)) print("\t\t evaluating {}".format(Names.eos[v_n])) data_arr = eostable.evaluate(Names.eos[v_n], arr_rho, arr_temp, arr_ye) print("\t\t converting units for {}".format(Names.eos[v_n])) if v_n == 'eps': data_arr = ut.conv_spec_energy(ut.cgs, ut.cactus, data_arr) elif v_n == 'press': data_arr = ut.conv_press(ut.cgs, ut.cactus, data_arr) elif v_n == 'entropy': data_arr = data_arr else: raise NameError("EOS quantity: {}".format(v_n)) gname = "reflevel=%d" % rl dfile.create_group(gname) dfile[gname].attrs.create("delta", group_rho.attrs["delta"]) dfile[gname].attrs.create("extent", group_rho.attrs["extent"]) dfile[gname].attrs.create("iteration", group_rho.attrs["iteration"]) dfile[gname].attrs.create("reflevel", rl) dfile[gname].attrs.create("time", group_rho.attrs["time"]) dfile[gname].create_dataset(v_n, data=data_arr, dtype=np.float32) del arr_rho del group_temp del group_ye dfile.close() print("done! (%.2f sec)" % (time.time() - start_t)) gc.collect() def inter_save_eos_vars(self): # from scivis import eostable o_eos = EOSTable() o_eos.read_table(self.eos_fpath) data_rho = h5py.File(self.outpath + str(self.it) + '_' + "rho" + ".h5", "r") data_temp = h5py.File(self.outpath + str(self.it) + '_' + "temperature" + ".h5", "r") data_ye = h5py.File(self.outpath + str(self.it) + '_' + "Y_e" + ".h5", "r") for v_n in Names.eos.keys(): print("\t{}...".format(v_n)) self.interpolate_save_eos_quantity(v_n, data_rho, data_temp, data_ye, o_eos) return data_rho @staticmethod def merge_two_dicts(x, y): z = x.copy() # start with x's keys and values z.update(y) # modifies z with y's keys and values & returns None return z def load_combine_save(self): all_in_names = self.merge_two_dicts(Names.dattar, Names.eos) print("\t Combining data into the module_profile {}.h5...".format(self.it)), start_t = time.time() dfile = h5py.File(self.outpath + str(self.it) + ".h5", "w") if self.description is not None: dfile.create_dataset("description", data=np.string_(self.description)) for rl in range(self.nlevels): gname = "reflevel=%d" % rl dfile.create_group(gname) dfile[gname].attrs.create("delta", self.default_data["reflevel={}".format(rl)].attrs["delta"]) dfile[gname].attrs.create("extent", self.default_data["reflevel={}".format(rl)].attrs["extent"]) dfile[gname].attrs.create("iteration", self.default_data["reflevel={}".format(rl)].attrs["iteration"]) dfile[gname].attrs.create("reflevel", rl) dfile[gname].attrs.create("time", self.default_data["reflevel={}".format(rl)].attrs["time"]) for key, val in all_in_names.iteritems(): # loading the input h5 dfile__ = h5py.File(self.outpath + str(self.it) + '_' + key + ".h5") data = np.array(dfile__["reflevel={}".format(rl)][key]) if key in Names.out.keys(): key = Names.out[key] dfile[gname].create_dataset(key, data=data, dtype=np.float32) dfile.close() print("done! (%.2f sec)" % (time.time() - start_t)) class ExtractNuProfile: def __init__(self, it, output, inpath, outpath, def_nu_v_n ="thc_M0_abs_energy", overwrite=False): self.it = it self.output = output self.inpath = inpath self.outpath = outpath self.description = None self.overwrite = overwrite outfname = self.outpath + str(self.it) + "nu.h5" if (not os.path.isfile(outfname)) or \ (os.path.isfile(outfname) and self.overwrite): # get reflevel for future use default_dset = h5.dataset(FileWork.get_filelist(def_nu_v_n, self.inpath, self.output)) reflevel = default_dset.get_reflevel() nrad = reflevel.n[0] ntheta = int(round(sqrt(float(reflevel.n[1] / 2)))) nphi = 2 * ntheta if ntheta * nphi != reflevel.n[1]: raise ValueError("The leakage grid is inconsistent") for key, val in Names.nu_dattar.iteritems(): print("\tProcessing key'{}' val:'{}'".format(key, val)) files = FileWork.get_filelist(key, self.inpath, self.output) assert len(files) dset = h5.dataset(files) data = dset.get_reflevel_data(reflevel=reflevel, iteration=int(self.it), variable=val, timelevel=0, dtype=np.float32) # print(data) # output fname = self.outpath + str(self.it) + '_' + key + ".h5" dfile = h5py.File(fname, "w") # dfile.attrs.create("delta", reflevel.delta) # dfile.attrs.create("extent", reflevel.extent()) dfile.attrs.create("iteration", self.it) dfile.attrs.create("time", default_dset.get_time(self.it)) dfile.attrs.create("nrad", nrad) dfile.attrs.create("ntheta", ntheta) dfile.attrs.create("nphi", nphi) print(data.shape) # print('delta: {}'.format(reflevel.delta)) # print('extent:{}'.format(reflevel.extent())) # print('iteration:{}'.format(self.it)) # print('time:{}'.format(dset.get_time(self.it))) # print('nrad:{}'.format(nrad)) # print('ntheta:{}'.format(ntheta)) # print('nphi:{}'.format(nphi)) # exit(1) dfile.create_dataset(key, data=data) dset.close_files() dfile.close() print("\tFinished key'{}' val:'{}'".format(key, val)) # print("done! (%.2f sec)" % (time.time() - start_t)) default_dset.close_files() # load extracted data and save as one file: all_in_names = Names.nu_dattar dfile = h5py.File(outfname, "w") for key, val in all_in_names.iteritems(): print("\tLoading and appending {}".format(key)) dfile__ = h5py.File(self.outpath + str(self.it) + '_' + key + ".h5") data = np.array(dfile__[key]) if key in Names.out.keys(): key = Names.out[key] dfile.create_dataset(key, data=data, dtype=np.float32) dfile.attrs.create("iteration", self.it) dfile.attrs.create("time", default_dset.get_time(self.it)) dfile.attrs.create("nrad", nrad) dfile.attrs.create("ntheta", ntheta) dfile.attrs.create("nphi", nphi) dfile.close() print("\tDONE") else: print("File: {} already exists. Skipping." .format(outfname)) """ ==================================| independent output-it-time mapping methods |=================================""" def find_nearest_index(array, value): ''' Finds index of the value in the array that is the closest to the provided one ''' idx = (np.abs(array - value)).argmin() return idx def get_output_for_time(time, output_it_time_dic, it_time): it_time = np.array(it_time) if time > it_time[:,1].max(): raise ValueError("time {:.3f}s beyond the simulation length ({:.3f}s)".format(time, it_time[:,1].max())) if time < it_time[:,1].min(): raise ValueError("time {:.3f}s is too small, minimum is ({:.3f}s)".format(time, it_time[:,1].min())) closest_iteration = it_time[find_nearest_index(it_time[:,1], time), 0] output = '' for output_dir, it_time in output_it_time_dic.iteritems(): if closest_iteration in it_time[:, 0]: output = output_dir if output == '': raise ValueError("output was not found") print("\t required time:{} found in {} output".format(time, output)) return output def load_one_dset_to_get_iter(time_, key, inpath, output): files = FileWork.get_filelist(key, inpath, output) # files = get_filelist(key, output_dir) dset = h5.dataset(files[0]) # fastest way dataset_iterations = dset.iterations dataset_times = [] for it in dataset_iterations: dataset_times.append(float(dset.get_time(it))) print("\t Iterations {}".format(dataset_iterations)) print("\t Times "), print([("{:.3f}, ".format(i_time)) for i_time in dataset_times]) # print(' ') # selecting the iteration that has the closest time to the required idx = find_nearest_index(np.array(dataset_times), time_ / (0.004925794970773136 * 1e-3)) iteration = dataset_iterations[idx] closest_time = dataset_times[idx] print("\t it:{} with time:{:.3f} is the closest to required time:{:.3f}" .format(iteration, closest_time * 0.004925794970773136 * 1e-3, time_)) return iteration, closest_time * 0.004925794970773136 * 1e-3 def set_it_output_map(inpath, outpath, it_time_fname = "dens.norm1.asc"): """ Loads set of it_time_files that have '1:it 2:time ...' structure to get a map of what output-xxxx contains what iteration (and time) """ output_it_time_dic = {} # if not os.path.isdir(gen_set["inpath"] + "profiles/"): # print("creating output dir: {}".format(gen_set["inpath"] + "profiles/")) # os.mkdir(gen_set["inpath"] + "profiles/") # # it_time_files = glob(gen_set["inpath"] + "output-*" + "/data/" + gen_set["it_map_file"]) # # print('-' * 25 + 'LOADING it list ({})' # .format(gen_set["it_map_file"]) + '-' * 25) # print("\t loading from: {}, {} it_time_files".format(gen_set["inpath"], len(it_time_files))) assert os.path.isdir(inpath) assert os.path.isdir(outpath) it_time_files = glob(inpath + "output-*" + "/data/" + it_time_fname) assert len(it_time_files) > 0 print('-' * 25 + 'LOADING it list ({})' .format(it_time_fname) + '-' * 25) print("\t loading from: {}, {} it_time_files".format(inpath, len(it_time_files))) it_time = np.zeros(2) for file in it_time_files: o_name = file.split('/') o_dir = '' for o_part in o_name: if o_part.__contains__('output-'): o_dir = o_part if o_dir == '': raise NameError("Did not find output-xxxx in {}".format(o_name)) it_time_i = np.loadtxt(file, usecols=(0, 1)) it_time_i[:, 1] *= 0.004925794970773136 * 1e-3 # time is seconds output_it_time_dic[o_dir] = it_time_i it_time = np.vstack((it_time, it_time_i)) it_time = np.delete(it_time, 0, 0) print('outputs:{} its:{} [{}->{}] time:[{}->{:.3f}]'.format(len(it_time_files), len(it_time[:, 0]), int(it_time[:, 0].min()), int(it_time[:, 0].max()), float(it_time[:, 1].min()), float(it_time[:, 1].max()))) if len(it_time[:, 0]) != len(set(it_time[:, 0])): print("Warning: repetitions found in the loaded iterations") iterations = np.unique(it_time[:, 0]) timestpes = np.unique(it_time[:, 1]) if not len(iterations) == len(timestpes): raise ValueError("Failed attmept to remove repetitions from " "\t it and time lists. Wrong lengths: {} {}" .format(len(iterations), len(timestpes))) else: print("\t repetitions are not found in loaded it list, continue nurmally") iterations = np.unique(it_time[:, 0]) timestpes = np.unique(it_time[:, 1]) print('-' * 30 + '------DONE-----' + '-' * 30) return output_it_time_dic, np.vstack((iterations, timestpes)).T if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", dest="input", default='./', required=False, help="path/to/input/data/") parser.add_argument("-o", "--output", dest="output", default='same', required=False, help="path/to/output/dir") parser.add_argument("-t", "--tasklist", nargs='+', dest="tasklist", default=[], required=True, help="tasklist to perform") parser.add_argument("-m", "--mode", dest="mode", default="times", required=True, help="times or iterations") parser.add_argument("--it", dest="iterations", nargs='+', default=[], required=False, help="iterations
# -*- coding: utf-8 -*- import logging import numpy as np import torch from torch import nn from ..layers import TextEncoder, ImageEncoder, VectorDecoder from ..layers import FeatureEncoder, MaxMargin, FF from ..layers import BiLSTMp from ..layers import SimpleGRUDecoder, ConditionalDecoder, ZSpaceAtt from ..utils.misc import get_n_params from ..vocabulary import Vocabulary from ..utils.topology import Topology from ..utils.ml_metrics import Loss from ..utils.device import DEVICE from ..utils.misc import pbar from ..datasets import MultimodalDataset from ..metrics import Metric from ..utils.nn import mean_pool from ..utils.scheduler import Scheduler logger = logging.getLogger('nmtpytorch') class MultitaskAtt(nn.Module): supports_beam_search = True def set_defaults(self): self.defaults = { # ------------- Model generic options 'direction': None, # Network directionality, i.e. en->de 'max_len': 80, # Reject sentences where 'bucket_by' length > 80 'bucket_by': None, # A key like 'en' to define w.r.t which dataset # the batches will be sorted 'bucket_order': None, # None, ascending or descending for curriculum learning 'val_tasks': None, # dictionary of {id:direction} pairs for validation (None|{}) # ------------- Options for text encoder (bidir RNN) 'te_emb_dim': 128, # Source and target embedding sizes 'te_enc_dim': 128, # Encoder hidden size 'te_enc_type': 'gru', # Encoder type (gru|lstm) 'te_dropout_emb': 0, # Simple dropout to source embeddings 'te_dropout_ctx': 0, # Simple dropout to source encodings 'te_dropout_enc': 0, # Intra-encoder dropout if n_encoders > 1 'te_n_encoders': 1, # Number of stacked encoders 'te_emb_maxnorm': None, # Normalize embeddings l2 norm to 1 'te_emb_gradscale': False, # Scale embedding gradients w.r.t. batch frequency # ------------- Options for decoder with attention 'td_type': 'simple', # Decoder type (simple/conditional) 'td_emb_dim': 128, # Input size 'td_dec_dim': 128, # Decoder hidden size 'td_tied_emb': False, # Share decoder embeddings 'td_dec_init': 'mean_ctx', # How to initialize decoder (zero/mean_ctx/feats) 'td_att_type': 'mlp', # Attention type (mlp|dot) 'td_att_temp': 1., # Attention temperature 'td_att_activ': 'tanh', # Attention non-linearity (all torch nonlins) 'td_att_transform_ctx': True, # Transform annotations before attention 'td_att_mlp_bias': False, # Enables bias in attention mechanism 'td_att_bottleneck': 'ctx', # Bottleneck dimensionality (ctx|hid) 'td_dropout_out': 0, # Simple dropout to decoder output 'td_emb_maxnorm': None, # Normalize embeddings l2 norm to 1 'td_emb_gradscale': False, # Scale embedding gradients w.r.t. batch frequency # ------------- Additional options for conditional decoder 'td_dec_type': 'gru', # Decoder type (gru|lstm) 'td_dec_init_size': None, # feature vector dimensionality for dec_init == 'feats' 'td_dec_init_activ': 'tanh', # Decoder initialization activation func 'td_dropout': 0, # Generic dropout overall the architecture # ------------- Options for image CNN encoder 'ie_cnn_type': 'resnet50', # A variant of VGG or ResNet 'ie_cnn_pretrained': True, # Should we use pretrained imagenet weights 'ie_cnn_layer': 'res5c_relu', # From where to extract features 'ie_dropout_img': 0., # a 2d dropout over conv features 'ie_pool': None, # ('Avg|Max', kernel_size, stride_size) 'ie_cnn_finetune': None, # Should we finetune part or all of CNN 'ie_l2_norm': False, # L2 normalize features # NOTE those options are not provided to create the image encoder but found initialized in amnmt.py #'ie_l2_norm_dim': -1, # Which dimension to L2 normalize #'ie_resize': 256, # resize width, height for images #'ie_crop': 224, # center crop size after resize # ------------- Options for video encoder 've_dim': 2048, # Video frame input size 've_proj_size': 512, # Video frame embedding size 've_enc_dim': 256, # Encoder hidden size 've_enc_type': 'gru', # Encoder type (gru|lstm) 've_dropout_emb': 0, # Simple dropout to source embeddings 've_dropout_ctx': 0, # Simple dropout to source encodings 've_dropout_enc': 0, # Intra-encoder dropout if n_encoders > 1 've_n_encoders': 1, # Number of stacked encoders 've_bidirectional': True, # Enable bidirectional encoder # ------------- Options for video decoder 'vd_emb_dim': 256, # Source and target embedding sizes 'vd_vid_dim': 2048, # Video frame input size 'vd_proj_size': 512, # Video frame embedding size 'vd_emb_maxnorm': None, # Normalize embeddings l2 norm to 1 'vd_emb_gradscale': False, # Scale embedding gradients w.r.t. batch frequency 'vd_dec_dim': 512, # Decoder hidden size 'vd_dec_type': 'gru', # Decoder type (gru|lstm) 'vd_dec_init': 'mean_ctx', # How to initialize decoder (zero/mean_ctx/feats) 'vd_dec_init_size': None, # feature vector dimensionality for # dec_init == 'feats' 'vd_att_type': 'mlp', # Attention type (mlp|dot) 'vd_att_temp': 1., # Attention temperature 'vd_att_activ': 'tanh', # Attention non-linearity (all torch nonlins) 'vd_att_mlp_bias': False, # Enables bias in attention mechanism 'vd_att_bottleneck': 'ctx', # Bottleneck dimensionality (ctx|hid) 'vd_att_transform_ctx': True, # Transform annotations before attention 'vd_bidirectional': True, # Whether the encoder is bidirectional or not 'vd_dropout_emb': 0, # Simple dropout to source embeddings 'vd_dropout_out': 0, # Simple dropout to decoder output 'vd_loss_type': 'SmoothL1', # Loss type (MSE_loss | SmoothL1) # ------------- Options for BiLSTMp speech encoder 'se_feat_dim': 43, # Speech features dimensionality 'se_enc_dim': 256, # Encoder hidden size 'se_dropout': 0, # Generic dropout overall the architecture 'se_enc_layers': '1_1_2_2_1_1', # Subsampling & layer architecture 'se_proj_dim': 320, # Intra-LSTM projection layer dim # ------------- Options for the shared z-space 'z_size': 256, # size of hidden state of z-space 'z_len': 10, # how many latent states to produce 'z_transform': None, # how to transform input contexts (None|linear|tanh|sigmoid) 'z_in_size': 256, # input size of the ZSpace layer 'z_merge': 'sum', # How to merge the attended vector to feed the ZSpace layer # ------------- Options for the scheduler 'schedule_type_enc': None, # drop encoder(s) randomly (None|random|random_1) 'schedule_type_dec': None, # drop decoder(s) randomly (None|random|random_1) 'droptask_prob': 1, # probability of dropping encoder(s)/decoder(s) # (only used for non-None schedule_type_enc/dec) 'droptask_e_delay': None, # number of completed epochs before droptask 'manual_schedule': None, # dictionary of {id:direction@num_batches} pairs to cycle thru (None|{}) 'loss_scaling': None, # dictionary with same keys as manual_schedule for loss scaling constants # ------------- Options for mutual projection networks 'use_z': True, # whether to use z-space or decode directly from encoders 'use_mpn': False, # whether to use auxiliary max-margin loss objective 'use_decmpn': False, # use auxiliary max-margin objective in the decoder 'pooling_type': 'mean', # pooling method to be used before max-margin layer (mean) 'margin': 0.1, # max-margin layer "alpha" 'max_violation': False, # max-margin hinge type (True: max-of-hinges, False: sum-of-hinges) 'sim_function': 'cosine' # max-margin similarity function } def __init__(self, opts): super().__init__() # opts -> config file sections {.model, .data, .vocabulary, .train} self.opts = opts # Langs, Vocabulary and Vocab Length objects self.vocabs = {} # all vocabularies self.slangs = [] # source languages IDs self.svocabs = {} # source vocabs self.n_svocabs = {} # sizes of source vocabs self.tlangs = [] # target languages IDs self.tvocabs = {} # target vocabs self.n_tvocabs = {} # sizes of sources vocabs self.val_refs = {} self.ctx_sizes = {} # Each auxiliary loss should be stored inside this dictionary # in order to be taken into account by the mainloop for multi-tasking self.aux_loss = {} # Setup options self.opts.model = self.set_model_options(opts.model) # Parse topology & languages self.topology = Topology(self.opts.model['direction']) # Load vocabularies here for name, fname in self.opts.vocabulary.items(): self.vocabs[name] = Vocabulary(fname, name=name) # Inherently non multi-lingual aware <-- Let's change that! slangs = self.topology.get_src_langs() tlangs = self.topology.get_trg_langs() for sl in slangs: self.slangs.append(sl) self.svocabs[sl] = self.vocabs[sl] self.n_svocabs[sl] = len(self.svocabs[sl]) for tl in tlangs: self.tlangs.append(tl) self.tvocabs[tl] = self.vocabs[tl] self.n_tvocabs[tl] = len(self.tvocabs[tl]) # NOTE: for language-specific evaluation metrics (e.g. BLEU), # this will be overwritten by the 0th topology in 'val_tasks' in the conf file self.val_refs[tl] = self.opts.data['val_set'][tl] # Textual context size is always equal to enc_dim * 2 since # it is the concatenation of forward and backward hidden states if 'te_enc_dim' in self.opts.model: for sl in slangs: self.ctx_sizes[str(sl)] = self.opts.model['te_enc_dim'] * 2 # Check tying option if self.opts.model['td_tied_emb'] not in [False, '2way']: raise RuntimeError( "'{}' not recognized for td_tied_emb.".format(self.opts.model['td_tied_emb'])) self.td_type = self.opts.model['td_type'] # FIXME: this small hack because of string mismatch between Simple and Cond decoder # FIXME: this should be changed in cond_decoder.py if self.td_type == 'conditional' and self.opts.model['td_dec_init'] == 'mean': self.opts.model['td_dec_init'] = 'mean_ctx' # TODO: VISION generic init # TODO: SPEECH generic init # MPN options init self.use_z = self.opts.model['use_z'] self.use_mpn = self.opts.model['use_mpn'] self.use_decmpn = self.opts.model['use_decmpn'] self.pooling_type = self.opts.model['pooling_type'] ############################ # Create the max-margin loss ############################ if self.use_mpn or self.use_decmpn: assert len(self.topology.srcs) >= 2, \ "For MPN, there must be at least two different encoders defined in the overall topology." self.mm_loss = MaxMargin( margin=self.opts.model['margin'], # sim_function=self.opts.model['sim_function'], max_violation=self.opts.model['max_violation']) # Latent space options init self.z_size = self.opts.model['z_size'] self.z_len = self.opts.model['z_len'] self.z_transform = self.opts.model['z_transform'] self.z_in_size =
<filename>pde/tools/parameters.py """ Infrastructure for managing classes with parameters One aim is to allow easy management of inheritance of parameters. .. autosummary:: :nosignatures: Parameter DeprecatedParameter HideParameter Parameterized get_all_parameters .. codeauthor:: <NAME> <<EMAIL>> """ import logging from typing import Any, Dict, Sequence, Union import numpy as np from . import output from .misc import hybridmethod, import_class class Parameter: """class representing a single parameter""" def __init__( self, name: str, default_value=None, cls=object, description: str = "", hidden: bool = False, extra: Dict[str, Any] = None, ): """initialize a parameter Args: name (str): The name of the parameter default_value: The default value cls: The type of the parameter, which is used for conversion description (str): A string describing the impact of this parameter. This description appears in the parameter help hidden (bool): Whether the parameter is hidden in the description summary extra (dict): Extra arguments that are stored with the parameter """ self.name = name self.default_value = default_value self.cls = cls self.description = description self.hidden = hidden self.extra = {} if extra is None else extra if cls is not object: # check whether the default value is of the correct type converted_value = cls(default_value) if isinstance(converted_value, np.ndarray): valid_default = np.allclose(converted_value, default_value) else: valid_default = converted_value == default_value if not valid_default: logging.warning( "Default value `%s` does not seem to be of type `%s`", name, cls.__name__, ) def __repr__(self): return ( f'{self.__class__.__name__}(name="{self.name}", default_value=' f'"{self.default_value}", cls="{self.cls.__name__}", ' f'description="{self.description}", hidden={self.hidden})' ) __str__ = __repr__ def __getstate__(self): # replace the object class by its class path return { "name": str(self.name), "default_value": self.convert(), "cls": object.__module__ + "." + self.cls.__name__, "description": self.description, "hidden": self.hidden, "extra": self.extra, } def __setstate__(self, state): # restore the object from the class path state["cls"] = import_class(state["cls"]) # restore the state self.__dict__.update(state) def convert(self, value=None): """converts a `value` into the correct type for this parameter. If `value` is not given, the default value is converted. Note that this does not make a copy of the values, which could lead to unexpected effects where the default value is changed by an instance. Args: value: The value to convert Returns: The converted value, which is of type `self.cls` """ if value is None: value = self.default_value if self.cls is object: return value else: try: return self.cls(value) except ValueError: raise ValueError( f"Could not convert {value!r} to {self.cls.__name__} for parameter " f"'{self.name}'" ) class DeprecatedParameter(Parameter): """a parameter that can still be used normally but is deprecated""" pass class HideParameter: """a helper class that allows hiding parameters of the parent classes""" def __init__(self, name: str): """ Args: name (str): The name of the parameter """ self.name = name ParameterListType = Sequence[Union[Parameter, HideParameter]] class Parameterized: """a mixin that manages the parameters of a class""" parameters_default: ParameterListType = [] _subclasses: Dict[str, "Parameterized"] = {} def __init__(self, parameters: Dict[str, Any] = None): """initialize the parameters of the object Args: parameters (dict): A dictionary of parameters to change the defaults. The allowed parameters can be obtained from :meth:`~Parameterized.get_parameters` or displayed by calling :meth:`~Parameterized.show_parameters`. """ # set logger if this has not happened, yet if not hasattr(self, "_logger"): self._logger = logging.getLogger(self.__class__.__name__) # set parameters if they have not been initialized, yet if not hasattr(self, "parameters"): self.parameters = self._parse_parameters( parameters, include_deprecated=True ) def __init_subclass__(cls, **kwargs): # @NoSelf """register all subclasses to reconstruct them later""" # normalize the parameters_default attribute if hasattr(cls, "parameters_default") and isinstance( cls.parameters_default, dict ): # default parameters are given as a dictionary cls.parameters_default = [ Parameter(*args) for args in cls.parameters_default.items() ] # register the subclasses super().__init_subclass__(**kwargs) cls._subclasses[cls.__name__] = cls @classmethod def get_parameters( cls, include_hidden: bool = False, include_deprecated: bool = False, sort: bool = True, ) -> Dict[str, Parameter]: """return a dictionary of parameters that the class supports Args: include_hidden (bool): Include hidden parameters include_deprecated (bool): Include deprecated parameters sort (bool): Return ordered dictionary with sorted keys Returns: dict: a dictionary of instance of :class:`Parameter` with their names as keys. """ # collect the parameters from the class hierarchy parameters: Dict[str, Parameter] = {} for cls in reversed(cls.__mro__): if hasattr(cls, "parameters_default"): for p in cls.parameters_default: if isinstance(p, HideParameter): if include_hidden: parameters[p.name].hidden = True else: del parameters[p.name] else: parameters[p.name] = p # filter parameters based on hidden and deprecated flags def show(p): """helper function to decide whether parameter will be shown""" # show based on hidden flag? show1 = include_hidden or not p.hidden # show based on deprecated flag? show2 = include_deprecated or not isinstance(p, DeprecatedParameter) return show1 and show2 # filter parameters based on `show` result = { name: parameter for name, parameter in parameters.items() if show(parameter) } if sort: result = dict(sorted(result.items())) return result @classmethod def _parse_parameters( cls, parameters: Dict[str, Any] = None, check_validity: bool = True, allow_hidden: bool = True, include_deprecated: bool = False, ) -> Dict[str, Any]: """parse parameters Args: parameters (dict): A dictionary of parameters that will be parsed. check_validity (bool): Determines whether a `ValueError` is raised if there are keys in parameters that are not in the defaults. If `False`, additional items are simply stored in `self.parameters` allow_hidden (bool): Allow setting hidden parameters include_deprecated (bool): Include deprecated parameters """ if parameters is None: parameters = {} else: parameters = parameters.copy() # do not modify the original # obtain all possible parameters param_objs = cls.get_parameters( include_hidden=allow_hidden, include_deprecated=include_deprecated ) # initialize parameters with default ones from all parent classes result: Dict[str, Any] = {} for name, param_obj in param_objs.items(): if not allow_hidden and param_obj.hidden: continue # skip hidden parameters # take value from parameters or set default value result[name] = param_obj.convert(parameters.pop(name, None)) # update parameters with the supplied ones if check_validity and parameters: raise ValueError( f"Parameters `{sorted(parameters.keys())}` were provided for an " f"instance but are not defined for the class `{cls.__name__}`" ) else: result.update(parameters) # add remaining parameters return result def get_parameter_default(self, name): """return the default value for the parameter with `name` Args: name (str): The parameter name """ for cls in self.__class__.__mro__: if hasattr(cls, "parameters_default"): for p in cls.parameters_default: if isinstance(p, Parameter) and p.name == name: return p.default_value raise KeyError(f"Parameter `{name}` is not defined") @classmethod def _show_parameters( cls, description: bool = None, sort: bool = False, show_hidden: bool = False, show_deprecated: bool = False, parameter_values: Dict[str, Any] = None, ): """private method showing all parameters in human readable format Args: description (bool): Flag determining whether the parameter description is shown. The default is to show the description only when we are in a jupyter notebook environment. sort (bool): Flag determining whether the parameters are sorted show_hidden (bool): Flag determining whether hidden parameters are shown show_deprecated (bool): Flag determining whether deprecated parameters are shown parameter_values (dict): A dictionary with values to show. Parameters not in this dictionary are shown with their default value. All flags default to `False`. """ # determine whether we are in a jupyter notebook and can return HTML in_notebook = output.in_jupyter_notebook() if description is None: description = in_notebook # show only in notebook by default # set the templates for displaying the data if in_notebook: writer: output.OutputBase = output.JupyterOutput( '<style type="text/css">dl.py-pde_params dd {padding-left:2em}</style>' '<dl class="py-pde_params">', "</dl>", ) # templates for HTML output template = "<dt>{name} = {value!r}</dt>" if description: template += "<dd>{description}</dd>" template_object = template else: # template for normal output writer = output.BasicOutput() template = "{name}: {type} = {value!r}" template_object = "{name} = {value!r}" if description: template += " ({description})" template_object += " ({description})" # iterate over all parameters params = cls.get_parameters( include_hidden=show_hidden, include_deprecated=show_deprecated, sort=sort ) for param in params.values(): # initialize the data to show data = { "name": param.name, "type": param.cls.__name__, "description": param.description, } # determine the value to show if parameter_values is None: data["value"] = param.default_value else: data["value"] = parameter_values[param.name] # print the data to stdout if param.cls is object: writer(template_object.format(**data)) else: writer(template.format(**data)) writer.show() @hybridmethod def show_parameters( # @NoSelf cls, description: bool = None, # @NoSelf sort: bool = False, show_hidden: bool = False, show_deprecated: bool = False, ): """show all parameters in human readable format Args: description (bool): Flag
"""peewee-validates is a validator module designed to work with the Peewee ORM.""" import datetime import re from decimal import Decimal from decimal import InvalidOperation from inspect import isgeneratorfunction from inspect import isgenerator from collections import Iterable import peewee from dateutil.parser import parse as dateutil_parse try: from playhouse.fields import ManyToManyField except ImportError: from peewee import ManyToManyField __version__ = '1.0.8' __all__ = [ 'Field', 'Validator', 'ModelValidator', 'ValidationError', 'StringField', 'FloatField', 'IntegerField', 'DecimalField', 'DateField', 'TimeField', 'DateTimeField', 'BooleanField', 'ModelChoiceField', 'ManyModelChoiceField', ] PEEWEE3 = peewee.__version__ >= '3.0.0' DEFAULT_MESSAGES = { 'required': 'This field is required.', 'empty': 'This field must not be blank.', 'one_of': 'Must be one of the choices: {choices}.', 'none_of': 'Must not be one of the choices: {choices}.', 'equal': 'Must be equal to {other}.', 'regexp': 'Must match the pattern {pattern}.', 'matches': 'Must match the field {other}.', 'email': 'Must be a valid email address.', 'function': 'Failed validation for {function}.', 'length_high': 'Must be at most {high} characters.', 'length_low': 'Must be at least {low} characters.', 'length_between': 'Must be between {low} and {high} characters.', 'length_equal': 'Must be exactly {equal} characters.', 'range_high': 'Must be at most {high}.', 'range_low': 'Must be at least {low}.', 'range_between': 'Must be between {low} and {high}.', 'coerce_decimal': 'Must be a valid decimal.', 'coerce_date': 'Must be a valid date.', 'coerce_time': 'Must be a valid time.', 'coerce_datetime': 'Must be a valid datetime.', 'coerce_float': 'Must be a valid float.', 'coerce_int': 'Must be a valid integer.', 'related': 'Unable to find object with {field} = {values}.', 'list': 'Must be a list of values', 'unique': 'Must be a unique value.', 'index': 'Fields must be unique together.', } class ValidationError(Exception): """An exception class that should be raised when a validation error occurs on data.""" def __init__(self, key, *args, **kwargs): self.key = key self.kwargs = kwargs super().__init__(*args) def validate_required(): """ Validate that a field is present in the data. :raises: ``ValidationError('required')`` """ def required_validator(field, data): if field.value is None: raise ValidationError('required') return required_validator def validate_not_empty(): """ Validate that a field is not empty (blank string). :raises: ``ValidationError('empty')`` """ def empty_validator(field, data): if isinstance(field.value, str) and not field.value.strip(): raise ValidationError('empty') return empty_validator def validate_length(low=None, high=None, equal=None): """ Validate the length of a field with either low, high, or equal. Should work with anything that supports len(). :param low: Smallest length required. :param high: Longest length required. :param equal: Exact length required. :raises: ``ValidationError('length_low')`` :raises: ``ValidationError('length_high')`` :raises: ``ValidationError('length_between')`` :raises: ``ValidationError('length_equal')`` """ def length_validator(field, data): if field.value is None: return if equal is not None and len(field.value) != equal: raise ValidationError('length_equal', equal=equal) if low is not None and len(field.value) < low: key = 'length_low' if high is None else 'length_between' raise ValidationError(key, low=low, high=high) if high is not None and len(field.value) > high: key = 'length_high' if low is None else 'length_between' raise ValidationError(key, low=low, high=high) return length_validator def validate_one_of(values): """ Validate that a field is in one of the given values. :param values: Iterable of valid values. :raises: ``ValidationError('one_of')`` """ def one_of_validator(field, data): if field.value is None: return options = values if callable(options): options = options() if field.value not in options: raise ValidationError('one_of', choices=', '.join(map(str, options))) return one_of_validator def validate_none_of(values): """ Validate that a field is not in one of the given values. :param values: Iterable of invalid values. :raises: ``ValidationError('none_of')`` """ def none_of_validator(field, data): options = values if callable(options): options = options() if field.value in options: raise ValidationError('none_of', choices=str.join(', ', options)) return none_of_validator def validate_range(low=None, high=None): """ Validate the range of a field with either low, high, or equal. Should work with anything that supports '>' and '<' operators. :param low: Smallest value required. :param high: Longest value required. :raises: ``ValidationError('range_low')`` :raises: ``ValidationError('range_high')`` :raises: ``ValidationError('range_between')`` """ def range_validator(field, data): if field.value is None: return if low is not None and field.value < low: key = 'range_low' if high is None else 'range_between' raise ValidationError(key, low=low, high=high) if high is not None and field.value > high: key = 'range_high' if high is None else 'range_between' raise ValidationError(key, low=low, high=high) return range_validator def validate_equal(value): """ Validate the field value is equal to the given value. Should work with anything that supports '==' operator. :param value: Value to compare. :raises: ``ValidationError('equal')`` """ def equal_validator(field, data): if field.value is None: return if not (field.value == value): raise ValidationError('equal', other=value) return equal_validator def validate_matches(other): """ Validate the field value is equal to another field in the data. Should work with anything that supports '==' operator. :param value: Field key to compare. :raises: ``ValidationError('matches')`` """ def matches_validator(field, data): if field.value is None: return if not (field.value == data.get(other)): raise ValidationError('matches', other=other) return matches_validator def validate_regexp(pattern, flags=0): """ Validate the field matches the given regular expression. Should work with anything that supports '==' operator. :param pattern: Regular expresion to match. String or regular expression instance. :param pattern: Flags for the regular expression. :raises: ``ValidationError('equal')`` """ regex = re.compile(pattern, flags) if isinstance(pattern, str) else pattern def regexp_validator(field, data): if field.value is None: return if regex.match(str(field.value)) is None: raise ValidationError('regexp', pattern=pattern) return regexp_validator def validate_function(method, **kwargs): """ Validate the field matches the result of calling the given method. Example:: def myfunc(value, name): return value == name validator = validate_function(myfunc, name='tim') Essentially creates a validator that only accepts the name 'tim'. :param method: Method to call. :param kwargs: Additional keyword arguments passed to the method. :raises: ``ValidationError('function')`` """ def function_validator(field, data): if field.value is None: return if not method(field.value, **kwargs): raise ValidationError('function', function=method.__name__) return function_validator def validate_email(): """ Validate the field is a valid email address. :raises: ``ValidationError('email')`` """ user_regex = re.compile( r"(^[-!#$%&'*+/=?^`{}|~\w]+(\.[-!#$%&'*+/=?^`{}|~\w]+)*$" r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]' r'|\\[\001-\011\013\014\016-\177])*"$)', re.IGNORECASE | re.UNICODE) domain_regex = re.compile( r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' r'(?:[A-Z]{2,6}|[A-Z0-9-]{2,})$' r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)' r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE | re.UNICODE) domain_whitelist = ('localhost',) def email_validator(field, data): if field.value is None: return value = str(field.value) if '@' not in value: raise ValidationError('email') user_part, domain_part = value.rsplit('@', 1) if not user_regex.match(user_part): raise ValidationError('email') if domain_part in domain_whitelist: return if not domain_regex.match(domain_part): raise ValidationError('email') return email_validator def validate_model_unique(lookup_field, queryset, pk_field=None, pk_value=None): """ Validate the field is a unique, given a queryset and lookup_field. Example:: validator = validate_model_unique(User.email, User.select()) Creates a validator that can validate the uniqueness of an email address. :param lookup_field: Peewee model field that should be used for checking existing values. :param queryset: Queryset to use for lookup. :param pk_field: Field instance to use when excluding existing instance. :param pk_value: Field value to use when excluding existing instance. :raises: ``ValidationError('unique')`` """ def unique_validator(field, data): # If we have a PK, ignore it because it represents the current record. query = queryset.where(lookup_field == field.value) if pk_field and pk_value: query = query.where(~(pk_field == pk_value)) if query.count(): raise ValidationError('unique') return unique_validator def coerce_single_instance(lookup_field, value): """ Convert from whatever value is given to a scalar value for lookup_field. If value is a dict, then lookup_field.name is used to get the value from the dict. Example: lookup_field.name = 'id' value = {'id': 123, 'name': 'tim'} returns = 123 If value is a model, then lookup_field.name is extracted from the model. Example: lookup_field.name = 'id' value = <User id=123 name='tim'> returns = 123 Otherwise the value is returned as-is. :param lookup_field: Peewee model field used for getting name from value. :param value: Some kind of value (usually a dict, Model instance, or scalar). """ if isinstance(value, dict): return value.get(lookup_field.name) if isinstance(value, peewee.Model): return getattr(value, lookup_field.name) return value def isiterable_notstring(value): """ Returns True if the value is iterable but not a string. Otherwise returns False. :param value: Value to check. """ if isinstance(value, str): return False return isinstance(value, Iterable) or isgeneratorfunction(value) or isgenerator(value) class Field: """ Base class from which all other fields should be derived. :param default: Is this field required? :param default: Default value to be used if no incoming value is provided. :param validators: List of validator functions to run. """ __slots__ = ('value', 'required', 'default', 'validators') def __init__(self, required=False, default=None, validators=None): self.default = default self.value = None self.validators = validators or [] if required: self.validators.append(validate_required()) def coerce(self, value): """ Coerce the given value into some type. By default a no-op. Used by sub-classes to enforce specific types. If there is a problem with the
< len(target)-1: self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.pushbyte(idx)) self.bytecodes.append(bytecode.getproperty( abc.MultinameL(abc.NamespaceSetInfo(abc.NSPackage(''))))) self.do_assign(node) else: raise NotImplementedError(target) def do_assign(self, target): with self.assign(target, _swap=True): pass def visit_assign(self, node, void): assert void == True if self.mode == 'class_body' and isinstance(node.target, parser.Name)\ and node.target.value == '__slots__': return with self.assign(node.target): if node.operator.value != '=': if isinstance(node.target, parser.Name): reg = self.namespace[node.target.value] if isinstance(reg, Register): self.bytecodes.append(bytecode.getlocal(reg)) elif isinstance(reg, ClosureSlot): self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.getslot(reg.index)) else: raise NotImplementedError(reg) elif isinstance(node.target, parser.GetAttr): self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.getproperty( self.qname(node.target.name.value))) else: raise NotImplementedError(node.target) self.push_value(node.expr) if node.operator.value == '=': pass elif node.operator.value == '+=': self.bytecodes.append(bytecode.add()) elif node.operator.value == '-=': self.bytecodes.append(bytecode.subtract()) elif node.operator.value == '*=': self.bytecodes.append(bytecode.multiply()) elif node.operator.value == '/=': self.bytecodes.append(bytecode.divide()) elif node.operator.value == '%=': self.bytecodes.append(bytecode.modulo()) else: raise NotImplementedError(node.operator) def visit_delete(self, node, void): assert void == True self._delete(node.expr) def _delete(self, node): if isinstance(node, parser.Name): reg = self.namespace[node.value] if isinstance(reg, ClosureSlot): self.bytecodes.append(bytecode.getlocal(self.activation)) self.bytecodes.append(bytecode.pushundefined()) self.bytecodes.append(bytecode.setslot(reg.index)) elif isinstance(reg, Property): self.bytecodes.append(bytecode.getscopeobject(0)) self.bytecodes.append(bytecode.deleteproperty( reg.property_name)) self.bytecodes.append(bytecode.pop()) elif isinstance(reg, Register): self.bytecodes.append(bytecode.kill(reg)) else: raise NotImplementedError(reg) elif isinstance(node, parser.GetAttr): self.push_value(node.expr) self.bytecodes.append(bytecode.deleteproperty( self.qname(node.name.value))) self.bytecodes.append(bytecode.pop()) elif isinstance(node, parser.Subscr): self.push_value(node.expr) self.push_value(node.index) self.bytecodes.append(bytecode.deleteproperty( abc.MultinameL(abc.NamespaceSetInfo(abc.NSPackage(''))))) self.bytecodes.append(bytecode.pop()) elif isinstance(node, parser.Tuple): for n in node: self._delete(n) else: raise NotImplementedError(node) def visit_call(self, node, void): name = node.expr if isinstance(name, parser.Name): name = name.value val = self.find_name(name, node.expr) if isinstance(val, (Class, NewClass)): self.bytecodes.append(bytecode.getlex(val.property_name)) for i in node.arguments: self.push_value(i) self.bytecodes.append(bytecode.construct( len(node.arguments))) if void: self.bytecodes.append(bytecode.pop()) elif isinstance(val, ClsRegister): self.bytecodes.append(bytecode.getlocal(val)) for i in node.arguments: self.push_value(i) self.bytecodes.append(bytecode.construct( len(node.arguments))) if void: self.bytecodes.append(bytecode.pop()) elif isinstance(val, Builtin): getattr(self, 'call_' + val.name)(node, void) else: self.push_value(node.expr) self.bytecodes.append(bytecode.pushnull()) for i in node.arguments: self.push_value(i) self.bytecodes.append(bytecode.call(len(node.arguments))) if void: self.bytecodes.append(bytecode.pop()) else: if isinstance(name, parser.Call) and name.expr.value == 'Class'\ and len(name.arguments) == 1: self.push_value(name.arguments[0]) self.bytecodes.append(bytecode.coerce( abc.QName(abc.NSPackage(''), 'Class'))) for i in node.arguments: self.push_value(i) self.bytecodes.append(bytecode.construct(len(node.arguments))) if void: self.bytecodes.append(bytecode.pop()) return self.push_value(node.expr) self.bytecodes.append(bytecode.pushnull()) for i in node.arguments: self.push_value(i) self.bytecodes.append(bytecode.call(len(node.arguments))) if void: self.bytecodes.append(bytecode.pop()) def visit_varname(self, node, void): if void: return name = node.value for ns in chain((self,), self.parent_namespaces): if name in ns.namespace: break else: if self.mode == 'eval': self.namespace[name] = LocalProperty(self.qname(name)) ns = self elif self.mode == 'evalchildfunc': self.namespace[name] = Property(self.qname(name)) ns = self else: raise NameError(name, filename=self.filename, lineno=node.lineno, column=node.col) val = ns.namespace[name] if isinstance(val, bool): if val: self.bytecodes.append(bytecode.pushtrue()) else: self.bytecodes.append(bytecode.pushfalse()) elif isinstance(val, Register): assert ns is self self.bytecodes.append(bytecode.getlocal(val)) elif isinstance(val, ClosureSlot): if val.name in self.namespace: self.bytecodes.append(bytecode.getlocal(self.activation)) self.bytecodes.append(bytecode.getslot(val.index)) else: self.bytecodes.append(bytecode.getlex(self.qpriv(val.name))) elif isinstance(val, Property): if val.name in self.namespace: self.bytecodes.append(bytecode.getscopeobject(0)) self.bytecodes.append(bytecode.getproperty(val.property_name)) else: self.bytecodes.append(bytecode.getlex(val.property_name)) elif isinstance(val, Class): self.bytecodes.append(bytecode.getlex( val.cls.name)) elif val is None: self.bytecodes.append(bytecode.pushnull()) elif val is abc.Undefined(): self.bytecodes.append(bytecode.pushundefined()) else: raise NotImplementedError(val) def visit_string(self, node, void): if void: return self.bytecodes.append(bytecode.pushstring(node.value)) def visit_number(self, node, void): if void: return if isinstance(node.value, float): self.bytecodes.append(bytecode.pushdouble(node.value)) elif isinstance(node.value, int): if node.value < 128: self.bytecodes.append(bytecode.pushbyte(node.value)) elif node.value < 65536: self.bytecodes.append(bytecode.pushshort(node.value)) else: self.bytecodes.append(bytecode.pushint(node.value)) else: raise NotImplementedError(node) def _get_meta(self, node): if isinstance(node.expr, parser.Name): obj = self.find_name(node.expr.value, node.expr) if isinstance(obj, (Class, NewClass)): qname = self.qname(node.attribute.value) trait = obj.class_info.get_method_trait(qname, raw_trait=True) if hasattr(trait, 'metadata'): for m in trait.metadata: if m.name != 'pyzza': continue return m def visit_callattr(self, node, void): meta = self._get_meta(node) if meta and 'debuglevel' in meta.item_info and False: # TOFIX if not void: self.bytecodes.append(bytecode.pushint()) return self.push_value(node.expr) nargs = len(node.arguments) if meta and 'debuginfo' in meta.item_info: loginfo = meta.item_info['debuginfo'].split(',') for name in loginfo: if name == 'line': self.bytecodes.append(bytecode.pushint(node.lineno)) nargs += 1 elif name == 'file': self.bytecodes.append(bytecode.pushstring(self.filename)) nargs += 1 elif name == 'class': self.bytecodes.append(bytecode.pushstring( self.myclass.class_name.value if self.myclass else '')) nargs += 1 elif name == 'method': self.bytecodes.append(bytecode.pushstring( self.method_name.value)) nargs += 1 else: raise ValueError(name) for i in node.arguments: self.push_value(i) if void: self.bytecodes.append(bytecode.callpropvoid( self.qname(node.attribute.value), nargs)) else: self.bytecodes.append(bytecode.callproperty( self.qname(node.attribute.value), nargs)) def visit_getattr(self, node, void): if void: return self.push_value(node.expr) self.bytecodes.append(bytecode.getproperty(self.qname(node.name.value))) def visit_subscr(self, node, void): if void: return self.push_value(node.expr) self.push_value(node.index) self.bytecodes.append(bytecode.getproperty( abc.MultinameL(abc.NamespaceSetInfo(abc.NSPackage(''))))) def visit_super(self, node, void): if node.method.value == '__init__': assert void == True self.bytecodes.append(bytecode.getlocal_0()) for i in node.arguments: self.push_value(i) self.bytecodes.append(bytecode.constructsuper(len(node.arguments))) else: self.bytecodes.append(bytecode.getlocal_0()) for i in node.arguments: self.push_value(i) if void: self.bytecodes.append(bytecode.callsupervoid( self.qname(node.method.value), len(node.arguments))) else: self.bytecodes.append(bytecode.callsuper( self.qname(node.method.value), len(node.arguments))) def visit_list(self, node, void): if void: for i in node: self.execute(i) else: for i in node: self.push_value(i) self.bytecodes.append(bytecode.newarray(len(node))) def visit_dict(self, node, void): if void: for i in node: self.execute(i) else: assert len(node) % 2 == 0, node for i in node: self.push_value(i) self.bytecodes.append(bytecode.newobject(len(node)//2)) ##### Flow control ##### def visit_if(self, node, void): assert void == True endlabel = bytecode.Label() for (cond, suite) in node.ifs: self.push_value(cond) lab = bytecode.Label() self.bytecodes.append(bytecode.iffalse(lab)) self.exec_suite(suite) self.bytecodes.append(bytecode.jump(endlabel)) self.bytecodes.append(lab) if node.else_: self.exec_suite(node.else_) self.bytecodes.append(endlabel) def visit_inlineif(self, node, void): endlabel = bytecode.Label() self.push_value(node.cond) lab = bytecode.Label() self.bytecodes.append(bytecode.iffalse(lab)) self.execute(node.expr1, void) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.jump(endlabel)) self.bytecodes.append(bytecode.pop()) self.bytecodes.append(lab) self.execute(node.expr2, void) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(endlabel) def visit_for(self, node, void): assert void == True if isinstance(node.expr, parser.Call) \ and isinstance(node.expr.expr, parser.Name): val = self.find_name(node.expr.expr.value, node.expr.expr) if isinstance(val, Builtin): getattr(self, 'loop_' + val.name)(node) else: raise NotImplementedError(node.expr.expr) else: raise NotImplementedError(node.expr) def visit_while(self, node, void): assert void == True endlabel = bytecode.Label() contlab = bytecode.label() elselab = bytecode.Label() self.bytecodes.append(contlab) self.push_value(node.condition) self.bytecodes.append(bytecode.iffalse(elselab)) self.loopstack.append((contlab, endlabel)) self.exec_suite(node.body) self.loopstack.pop() self.bytecodes.append(bytecode.jump(contlab)) self.bytecodes.append(elselab) if node.else_: self.exec_suite(node.else_) self.bytecodes.append(endlabel) def visit_try(self, node, void): assert void startlabel = bytecode.Label() endbodylabel = bytecode.Label() elselabel = bytecode.Label() endlabel = bytecode.Label() self.bytecodes.append(startlabel) self.exec_suite(node.body) self.bytecodes.append(endbodylabel) self.bytecodes.append(bytecode.jump(elselabel)) for exc in node.excepts + [node.except_]: if not exc: continue catchlabel = bytecode.Label() excinfo = abc.ExceptionInfo() excinfo.exc_from = startlabel excinfo.exc_to = endbodylabel excinfo.target = catchlabel variable = None if isinstance(exc, tuple): excinfo.exc_type = self.find_name(exc[0].value, exc[0]).class_info.name if exc[1] is not None: excinfo.var_name = self.qname(exc[1].value) variable = exc[1] else: excinfo.var_name = None excbody = exc[2] else: excinfo.exc_type = abc.AnyType() excinfo.var_name = None excbody = exc self.exceptions.append(excinfo) self.bytecodes.append(catchlabel) self.bytecodes.append(bytecode.getlocal_0()) self.bytecodes.append(bytecode.pushscope()) if hasattr(self, 'activation'): self.bytecodes.append(bytecode.getlocal(self.activation)) self.bytecodes.append(bytecode.pushscope()) self.bytecodes.append(bytecode.newcatch(excinfo)) self.bytecodes.append(bytecode.pop()) if variable is None: self.bytecodes.append(bytecode.pop()) else: self.do_assign(variable) self.exec_suite(excbody) # TODO: kill variable self.bytecodes.append(bytecode.jump(endlabel)) self.bytecodes.append(elselabel) if node.else_: self.exec_suite(node.else_) self.bytecodes.append(endlabel) def visit_return(self, node, void): assert void == True self.push_value(node.expr) self.bytecodes.append(bytecode.returnvalue()) def visit_raise(self, node, void): assert void == True self.push_value(node.expr) self.bytecodes.append(bytecode.throw()) def visit_break(self, node, void): assert void == True self.bytecodes.append(bytecode.jump(self.loopstack[-1][1])) def visit_continue(self, node, void): assert void == True self.bytecodes.append(bytecode.jump(self.loopstack[-1][0])) ##### Boolean operations ##### def visit_not(self, node, void): if void: self.execute(node.expr) else: self.push_value(node.expr) self.bytecodes.append(bytecode.not_()) def visit_or(self, node, void): endlabel = bytecode.Label() self.push_value(node.left) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.iftrue(endlabel)) self.bytecodes.append(bytecode.pop()) self.push_value(node.right) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(endlabel) if void: self.bytecodes.append(bytecode.pop()) def visit_and(self, node, void): endlabel = bytecode.Label() self.push_value(node.left) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.iffalse(endlabel)) self.bytecodes.append(bytecode.pop()) self.push_value(node.right) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(endlabel) if void: self.bytecodes.append(bytecode.pop()) ##### Math ##### def visit_negate(self, node, void): if void: self.execute(node.expr) else: self.push_value(node.expr) self.bytecodes.append(bytecode.negate()) @binary def visit_add(self, node): self.bytecodes.append(bytecode.add()) @binary def visit_subtract(self, node): self.bytecodes.append(bytecode.subtract()) @binary def visit_multiply(self, node): self.bytecodes.append(bytecode.multiply()) @binary def visit_divide(self, node): self.bytecodes.append(bytecode.divide()) @binary def visit_modulo(self, node): self.bytecodes.append(bytecode.modulo()) ##### Comparison ##### @binary def visit_equal(self, node): self.bytecodes.append(bytecode.strictequals()) @binary def visit_notequal(self, node): self.bytecodes.append(bytecode.strictequals()) self.bytecodes.append(bytecode.not_()) @binary def visit_greater(self, node): self.bytecodes.append(bytecode.greaterthan()) @binary def visit_greatereq(self, node): self.bytecodes.append(bytecode.greaterequals()) @binary def visit_less(self, node): self.bytecodes.append(bytecode.lessthan()) @binary def visit_lesseq(self, node): self.bytecodes.append(bytecode.lessequals()) @binary def visit_bitand(self, node): self.bytecodes.append(bytecode.bitand()) @binary def visit_bitor(self, node): self.bytecodes.append(bytecode.bitor()) @binary def visit_bitxor(self, node): self.bytecodes.append(bytecode.bitxor()) @binary def visit_shl(self, node): self.bytecodes.append(bytecode.lshift()) @binary def visit_shr(self, node): self.bytecodes.append(bytecode.rshift()) ##### Built-in functions ##### def call_abs(self, node, void): assert len(node.arguments) == 1 endlabel = bytecode.Label() self.push_value(node.arguments[0]) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.pushbyte(0)) self.bytecodes.append(bytecode.ifge(endlabel)) self.bytecodes.append(bytecode.negate()) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(endlabel) def call_min(self, node, void): assert len(node.arguments) == 2 endlabel = bytecode.Label() self.push_value(node.arguments[0]) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.dup()) with self.extra_reg('*') as reg: self.push_value(node.arguments[1]) self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.setlocal(reg)) self.bytecodes.append(bytecode.ifle(endlabel)) self.bytecodes.append(bytecode.pop()) self.bytecodes.append(bytecode.getlocal(reg)) self.bytecodes.append(endlabel) def call_max(self, node, void): assert len(node.arguments) == 2 endlabel = bytecode.Label() self.push_value(node.arguments[0]) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.dup()) with self.extra_reg('*') as reg: self.push_value(node.arguments[1]) self.bytecodes.append(bytecode.dup()) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.setlocal(reg)) self.bytecodes.append(bytecode.ifge(endlabel)) self.bytecodes.append(bytecode.pop()) self.bytecodes.append(bytecode.getlocal(reg)) self.bytecodes.append(endlabel) def call_isinstance(self, node, void): # The following commented out code leads to segfault # at least on flashplayer 9.0.115.0 for linux # ... sorry #~ if isinstance(node.arguments[1], parser.Name): #~ name = self.find_name(node.arguments[1].value) #~ if isinstance(name, (Class, NewClass)): #~ self.push_value(node.arguments[0]) #~ print(name.name) #~ self.bytecodes.append(bytecode.istype(name.name)) #~ return self.push_value(node.arguments[0]) self.push_value(node.arguments[1]) self.bytecodes.append(bytecode.istypelate()) ##### Built-in iterators ##### def loop_objectiter(self, node, fun): endlabel = bytecode.Label() elselabel = bytecode.Label() assert len(node.expr.arguments) == 1, node.expr with self.extra_reg('*') as obj, \ self.extra_reg('int') as idx: contlabel = bytecode.Label() bodylabel = bytecode.label() self.push_value(node.expr.arguments[0]) self.bytecodes.append(bytecode.coerce_a()) self.bytecodes.append(bytecode.setlocal(obj)) self.bytecodes.append(bytecode.pushbyte(0)) self.bytecodes.append(bytecode.setlocal(idx)) self.bytecodes.append(bytecode.jump(contlabel)) self.bytecodes.append(bodylabel) if fun == 'keys': var = node.var[0] if len(node.var) == 1 else node.var with self.assign(var): self.bytecodes.append(bytecode.getlocal(obj)) self.bytecodes.append(bytecode.getlocal(idx)) self.bytecodes.append(bytecode.nextname()) elif fun == 'values': var = node.var[0] if len(node.var) == 1 else node.var with self.assign(var): self.bytecodes.append(bytecode.getlocal(obj)) self.bytecodes.append(bytecode.getlocal(idx)) self.bytecodes.append(bytecode.nextvalue()) elif fun == 'items': assert len(node.var) == 2 with self.assign(node.var[0]): self.bytecodes.append(bytecode.getlocal(obj)) self.bytecodes.append(bytecode.getlocal(idx)) self.bytecodes.append(bytecode.nextname()) with self.assign(node.var[1]): self.bytecodes.append(bytecode.getlocal(obj)) self.bytecodes.append(bytecode.getlocal(idx)) self.bytecodes.append(bytecode.nextvalue()) self.loopstack.append((contlabel, endlabel)) self.exec_suite(node.body) self.loopstack.pop() self.bytecodes.append(contlabel) self.bytecodes.append(bytecode.hasnext2(obj, idx))
from tkinter import * from tkinter import ttk import numpy as np import io import base64 from PIL import ImageTk, Image import requests import os import matplotlib from matplotlib import image as mpimg from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg url = "http://vcm-9111.vm.duke.edu:5000/" # url = "http://127.0.0.1:5000/" def editing_window(): """Editing window Initial GUI window that lets user enter their information and their requested editing steps. Args: none Returns: none """ def enter_data(): """Collect inputted data Collects username, image paths, requested image types to send to server. Args: none Returns: none """ success_label.grid_forget() from client import download_images entered_user = user.get() print("User: {}".format(entered_user)) entered_img_paths = img_path.get() upload_popup = Toplevel(root) if len(entered_img_paths) == 0: success_msg = 'No image paths entered' popup_window(upload_popup, success_msg, screen_w, screen_h) return img_paths = process_img_paths(entered_img_paths) print("Image paths:") for i in img_paths: print("\t{}".format(i)) entered_img_type = img_type.get() entered_1 = hist_eq.get() entered_2 = contr_stretch.get() entered_3 = log_comp.get() entered_4 = rev_vid.get() req_img_type, proc_steps = convert_inputs(entered_img_type, entered_1, entered_2, entered_3, entered_4) orig_images, filenames, success = get_img_data(img_paths) print("Filenames:") print("\t{}".format(filenames)) upload_success = upload_to_server(entered_user, orig_images, filenames, success, proc_steps) display_img = BooleanVar() display_check = ttk.Checkbutton(root, text='Display images', variable=display_img, onvalue=True, offvalue=False, command=lambda: display_images(display_img.get(), entered_user, req_img_type, img_paths, proc_steps, root, filenames, orig_images, success)) display_check.grid(column=0, row=10, sticky=W) orig_file_list = get_file_list(filenames, req_img_type, [True, False, False, False, False], success) file_list = get_file_list(filenames, req_img_type, proc_steps, success) popup_window(upload_popup, upload_success, screen_w, screen_h) download_btn.config(state=NORMAL) download_btn.config(command=lambda: dl_images(root, entered_user, orig_file_list, '', screen_w, screen_h)) download_btn2.config(state=NORMAL) download_btn2.config(command=lambda: dl_images(root, entered_user, file_list, '', screen_w, screen_h)) zip_msg = ttk.Label(root, text='Multiple files saved as download.zip') # if success.count(True) > 1: zip_msg.grid(column=0, columnspan=2, row=13, sticky=N) return # Main window root = Tk() root.title("Image Editor") screen_w = root.winfo_screenwidth() screen_h = root.winfo_screenheight() root_w = 0.3*screen_w root_h = 0.3*screen_h root.config(height=root_h, width=root_w) x = 0.38*screen_w y = 0.35*screen_h root.geometry('+%d+%d' % (x, y)) top_label = ttk.Label(root, text='Edit Your Image On Our Server!') top_label.grid(column=0, row=0, columnspan=2, sticky=N) # Enter username user_label = ttk.Label(root, text="Username:") user_label.grid(column=0, row=1, sticky=E) user = StringVar() user_entry = ttk.Entry(root, textvariable=user, width=25) user_entry.grid(column=1, row=1, sticky=W) instructions = "Separate pathnames with commas." instructions_label = ttk.Label(root, text=instructions) instructions_label.grid(column=0, row=2, columnspan=2, sticky=N) # Enter image paths img_label = ttk.Label(root, text="Image paths:") img_label.grid(column=0, row=3, sticky=E) img_path = StringVar() img_path_entry = ttk.Entry(root, textvariable=img_path, width=25) img_path_entry.grid(column=1, row=3, sticky=W) # Select download image type img_type = StringVar() type_label = ttk.Label(root, text="Download image type:") type_label.grid(column=0, row=4, sticky=E) type_dropdown = ttk.Combobox(root, textvariable=img_type) type_dropdown['values'] = ('JPEG', 'PNG', 'TIFF') type_dropdown.grid(column=1, row=4, sticky=W) type_dropdown.current(0) type_dropdown.config(state='readonly') # Check processing steps steps_label = ttk.Label(root, text="Processing steps:") steps_label.grid(column=0, row=5, sticky=E) hist_eq = BooleanVar() hist_eq.set(True) contr_stretch = BooleanVar() log_comp = BooleanVar() rev_vid = BooleanVar() hist_check = ttk.Checkbutton(root, text='Histogram Equalization', variable=hist_eq, onvalue=True, offvalue=False) hist_check.grid(column=1, row=5, sticky=W) contr_check = ttk.Checkbutton(root, text='Contrast Stretching', variable=contr_stretch, onvalue=True, offvalue=False) contr_check.grid(column=1, row=6, sticky=W) log_check = ttk.Checkbutton(root, text='Log Compression', variable=log_comp, onvalue=True, offvalue=False) log_check.grid(column=1, row=7, sticky=W) rev_check = ttk.Checkbutton(root, text='Reverse video', variable=rev_vid, onvalue=True, offvalue=False) rev_check.grid(column=1, row=8, sticky=W) upload_btn = ttk.Button(root, text='Upload file', command=enter_data, width=10) upload_btn.grid(column=0, row=9, columnspan=2, sticky=N) success_label = ttk.Label(root, text='') download_btn = Button(root, text='Download original image', state=DISABLED) download_btn.grid(column=0, columnspan=2, row=11, sticky=N) download_btn2 = Button(root, text='Download processed image(s)', state=DISABLED) download_btn2.grid(column=0, columnspan=2, row=12, sticky=N) s = ttk.Style() s.configure('Button', foreground=[('disabled', 'black')]) # Show GUI window root.mainloop() return def convert_inputs(entered_img_type, entered_1, entered_2, entered_3, entered_4): """Converts GUI user input into function inputs Gets requested image type and processing steps in the desired format. Args: entered_img_type (str): selected image type from GUI dropdown entered_1 (bool): histogram equalization? entered_2 (bool): contrast stretching? entered_3 (bool): log compression? entered_4 (bool): reverse video? Returns: req_img_type (str): requested image type proc_steps (list): processing steps as a list of booleans """ four_steps = [entered_1, entered_2, entered_3, entered_4] if entered_img_type == 'JPEG': req_img_type = '.jpg' elif entered_img_type == 'PNG': req_img_type = '.png' elif entered_img_type == 'TIFF': req_img_type = '.tiff' if not any(four_steps): proc_steps = [True, False, False, False, False] else: proc_steps = [False, entered_1, entered_2, entered_3, entered_4] return req_img_type, proc_steps def process_img_paths(input): """Processes image paths If one path is entered, return list of length 1 containing location. If multiple (comma-separated) paths are entered, return list containing all paths. Args: input (str): string containing image path(s) Returns: paths (list): list containing image path, or separated image paths """ input = input.replace(" ", "") paths = input.split(",") if '' in paths: paths.remove("") return paths def get_img_data(img_paths): """Gets image data Upload: Extracts data from image paths to upload to server as b64 strings. Unzips images if necessary. Download: Unzips downloaded files. Args: img_paths (list): list of image paths to process Returns: images (list): list of numpy arrays containing image data filenames (list): list of filenames to use. If zip included and unzipping successful, this list contains the image filenames, not the zip filename. Otherwise, it just states the zip filename. success (list): list of booleans denoting successful processing for each of the entries in filenames """ from image import read_img_as_b64, unzip images = [] filenames = [] success = [] is_zip = [(re.search('.zip', i) or re.search('.ZIP', i)) for i in img_paths] for i in range(len(img_paths)): curr_path = img_paths[i] # Check if image exists exists = os.path.isfile(curr_path) if exists: # Append unzipped images one by one if is_zip[i]: unzipped_images, names, temp_success = unzip(curr_path) if temp_success: for j in range(len(unzipped_images)): images.append(unzipped_images[j]) filenames.append(names[j]) success.append(True) else: images.append('') success.append(False) # Append non-zipped images normally elif curr_path.lower().endswith(('.png', '.jpg', 'jpeg', '.tiff')): """ img_obj = Image.open(curr_path) img_np = np.array(img_obj) images.append(img_np) """ images.append(read_img_as_b64(curr_path)) success.append(True) curr_filename = re.split('/', curr_path)[-1] filenames.append(curr_filename) # img_obj.close() # Don't send data if file is not an image else: images.append('') success.append(False) # File not found else: images.append('') filenames.append(img_paths[i]) success.append(False) return images, filenames, success def get_file_list(filenames, file_ext, proc_steps, success): """Gets file list This function takes the inputted filenames, requested file ext, and requested processing steps. The output file list includes information from images that were successfully uploaded. Args: filenames (list): list of filenames file_ext (str): requested file extension proc_steps (list): list of processing steps success (list): list of upload success Returns: file_list (list): list of properly formatted image info for download_images """ file_list = [] for i in range(len(filenames)): if success[i]: file_list.append([filenames[i], file_ext, proc_steps]) return file_list def upload_to_server(user, images, filenames, success, proc_steps): """Posts image to server Posts b64 strings to server Args: user (str): inputted username images (list): list of b64 strings filenames (list): attached original filenames success (list): whether images were successfully obtained proc_steps (list): image processing steps to take Returns: upload_success (str): message to print below upload button """ from client import add_new_user, upload_images status_codes = [400] list_for_upload = [] for i in range(len(filenames)): list_for_upload.append([filenames[i], images[i], proc_steps]) add_new_user(user) status_codes = upload_images(user, list_for_upload) if isinstance(status_codes, list): if all([True if i == 200 else False for i in status_codes["code"]]): upload_success = "Successfully uploaded" else: upload_success = "Upload failed for one or more images." upload_success += "\nImage display can still be opened" elif isinstance(status_codes, dict): if all([True if i == 200 else False for i in status_codes["code"]]): upload_success = "Successfully uploaded" else: upload_success = "Upload failed for one or more images." upload_success += "\nImage display can still be opened" return upload_success def popup_window(window, message, screen_w, screen_h): """Popup window Message pops up after images are uploaded or downloaded. Args: window (Toplevel): GUI popup window message (str): message string to display Returns: none """ def ok_click(window): """Destroy window when OK button clicked Args: window (Toplevel): GUI popup window Returns: none """ window.destroy() return msg_label = Label(window, text=message+'!') msg_label.grid(column=0, row=0, sticky=N, ipadx=0.01*screen_w) window.geometry('+%d+%d' % (0.43*screen_w, 0.48*screen_h)) popup_ok_btn = ttk.Button(window, text='OK', command=lambda: ok_click(window)) popup_ok_btn.grid(column=0, row=1, sticky=N) window.title("Message") return def dl_images(root, username, file_list, path, screen_w, screen_h): """Downloads images Calls download function and makes popup window. Args: root (Tk): main window username (str): username file_list (list): file list for download path (str): download path screen_w (int): screen width screen_h (int): screen height Returns: success_msg (str): success message """ from client import download_images _, status = download_images(username, file_list, path) success_msg = '' if all([True if i == 200 else False for i in status["code"]]): success_msg = "All images downloaded successfully" else: success_msg = "One or more requested images not\ndownloaded" download_popup = Toplevel(root) popup_window(download_popup, success_msg, screen_w, screen_h) return success_msg def display_images(run, user, img_type, img_paths, proc_steps, root, filenames, orig_images, success): """Display images
Qualifier='string' ) :type FunctionName: string :param FunctionName: [REQUIRED]\nThe name of the Lambda function, version, or alias.\n\nName formats\n\nFunction name - my-function (name-only), my-function:v1 (with alias).\nFunction ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function .\nPartial ARN - 123456789012:function:my-function .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n :type Qualifier: string :param Qualifier: Specify a version or alias to get details about a published version of the function. :rtype: dict ReturnsResponse Syntax { 'FunctionName': 'string', 'FunctionArn': 'string', 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'nodejs10.x'|'nodejs12.x'|'java8'|'java11'|'python2.7'|'python3.6'|'python3.7'|'python3.8'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'dotnetcore3.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'ruby2.7'|'provided', 'Role': 'string', 'Handler': 'string', 'CodeSize': 123, 'Description': 'string', 'Timeout': 123, 'MemorySize': 123, 'LastModified': 'string', 'CodeSha256': 'string', 'Version': 'string', 'VpcConfig': { 'SubnetIds': [ 'string', ], 'SecurityGroupIds': [ 'string', ], 'VpcId': 'string' }, 'DeadLetterConfig': { 'TargetArn': 'string' }, 'Environment': { 'Variables': { 'string': 'string' }, 'Error': { 'ErrorCode': 'string', 'Message': 'string' } }, 'KMSKeyArn': 'string', 'TracingConfig': { 'Mode': 'Active'|'PassThrough' }, 'MasterArn': 'string', 'RevisionId': 'string', 'Layers': [ { 'Arn': 'string', 'CodeSize': 123 }, ], 'State': 'Pending'|'Active'|'Inactive'|'Failed', 'StateReason': 'string', 'StateReasonCode': 'Idle'|'Creating'|'Restoring'|'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup', 'LastUpdateStatus': 'Successful'|'Failed'|'InProgress', 'LastUpdateStatusReason': 'string', 'LastUpdateStatusReasonCode': 'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup' } Response Structure (dict) -- Details about a function\'s configuration. FunctionName (string) -- The name of the function. FunctionArn (string) -- The function\'s Amazon Resource Name (ARN). Runtime (string) -- The runtime environment for the Lambda function. Role (string) -- The function\'s execution role. Handler (string) -- The function that Lambda calls to begin executing your function. CodeSize (integer) -- The size of the function\'s deployment package, in bytes. Description (string) -- The function\'s description. Timeout (integer) -- The amount of time in seconds that Lambda allows a function to run before stopping it. MemorySize (integer) -- The memory that\'s allocated to the function. LastModified (string) -- The date and time that the function was last updated, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD). CodeSha256 (string) -- The SHA256 hash of the function\'s deployment package. Version (string) -- The version of the Lambda function. VpcConfig (dict) -- The function\'s networking configuration. SubnetIds (list) -- A list of VPC subnet IDs. (string) -- SecurityGroupIds (list) -- A list of VPC security groups IDs. (string) -- VpcId (string) -- The ID of the VPC. DeadLetterConfig (dict) -- The function\'s dead letter queue. TargetArn (string) -- The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. Environment (dict) -- The function\'s environment variables. Variables (dict) -- Environment variable key-value pairs. (string) -- (string) -- Error (dict) -- Error messages for environment variables that couldn\'t be applied. ErrorCode (string) -- The error code. Message (string) -- The error message. KMSKeyArn (string) -- The KMS key that\'s used to encrypt the function\'s environment variables. This key is only returned if you\'ve configured a customer managed CMK. TracingConfig (dict) -- The function\'s AWS X-Ray tracing configuration. Mode (string) -- The tracing mode. MasterArn (string) -- For Lambda@Edge functions, the ARN of the master function. RevisionId (string) -- The latest updated revision of the function or alias. Layers (list) -- The function\'s layers . (dict) -- An AWS Lambda layer . Arn (string) -- The Amazon Resource Name (ARN) of the function layer. CodeSize (integer) -- The size of the layer archive in bytes. State (string) -- The current state of the function. When the state is Inactive , you can reactivate the function by invoking it. StateReason (string) -- The reason for the function\'s current state. StateReasonCode (string) -- The reason code for the function\'s current state. When the code is Creating , you can\'t invoke or modify the function. LastUpdateStatus (string) -- The status of the last update that was performed on the function. This is first set to Successful after function creation completes. LastUpdateStatusReason (string) -- The reason for the last update that was performed on the function. LastUpdateStatusReasonCode (string) -- The reason code for the last update that was performed on the function. Exceptions Lambda.Client.exceptions.ServiceException Lambda.Client.exceptions.ResourceNotFoundException Lambda.Client.exceptions.TooManyRequestsException Lambda.Client.exceptions.InvalidParameterValueException Examples The following example returns and configuration details for version 1 of a function named my-function. response = client.get_function_configuration( FunctionName='my-function', Qualifier='1', ) print(response) Expected Output: { 'CodeSha256': 'YFgDgEKG3ugvF1+pX64gV6tu9qNuIYNUdgJm8nCxsm4=', 'CodeSize': 5797206, 'Description': 'Process image objects from Amazon S3.', 'Environment': { 'Variables': { 'BUCKET': 'my-bucket-1xpuxmplzrlbh', 'PREFIX': 'inbound', }, }, 'FunctionArn': 'arn:aws:lambda:us-west-2:123456789012:function:my-function', 'FunctionName': 'my-function', 'Handler': 'index.handler', 'KMSKeyArn': 'arn:aws:kms:us-west-2:123456789012:key/b0844d6c-xmpl-4463-97a4-d49f50839966', 'LastModified': '2020-04-10T19:06:32.563+0000', 'LastUpdateStatus': 'Successful', 'MemorySize': 256, 'RevisionId': 'b75dcd81-xmpl-48a8-a75a-93ba8b5b9727', 'Role': 'arn:aws:iam::123456789012:role/lambda-role', 'Runtime': 'nodejs12.x', 'State': 'Active', 'Timeout': 15, 'TracingConfig': { 'Mode': 'Active', }, 'Version': '$LATEST', 'ResponseMetadata': { '...': '...', }, } :return: { 'FunctionName': 'string', 'FunctionArn': 'string', 'Runtime': 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'nodejs10.x'|'nodejs12.x'|'java8'|'java11'|'python2.7'|'python3.6'|'python3.7'|'python3.8'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'dotnetcore3.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'ruby2.7'|'provided', 'Role': 'string', 'Handler': 'string', 'CodeSize': 123, 'Description': 'string', 'Timeout': 123, 'MemorySize': 123, 'LastModified': 'string', 'CodeSha256': 'string', 'Version': 'string', 'VpcConfig': { 'SubnetIds': [ 'string', ], 'SecurityGroupIds': [ 'string', ], 'VpcId': 'string' }, 'DeadLetterConfig': { 'TargetArn': 'string' }, 'Environment': { 'Variables': { 'string': 'string' }, 'Error': { 'ErrorCode': 'string', 'Message': 'string' } }, 'KMSKeyArn': 'string', 'TracingConfig': { 'Mode': 'Active'|'PassThrough' }, 'MasterArn': 'string', 'RevisionId': 'string', 'Layers': [ { 'Arn': 'string', 'CodeSize': 123 }, ], 'State': 'Pending'|'Active'|'Inactive'|'Failed', 'StateReason': 'string', 'StateReasonCode': 'Idle'|'Creating'|'Restoring'|'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup', 'LastUpdateStatus': 'Successful'|'Failed'|'InProgress', 'LastUpdateStatusReason': 'string', 'LastUpdateStatusReasonCode': 'EniLimitExceeded'|'InsufficientRolePermissions'|'InvalidConfiguration'|'InternalError'|'SubnetOutOfIPAddresses'|'InvalidSubnet'|'InvalidSecurityGroup' } :returns: (string) -- """ pass def get_function_event_invoke_config(FunctionName=None, Qualifier=None): """ Retrieves the configuration for asynchronous invocation for a function, version, or alias. To configure options for asynchronous invocation, use PutFunctionEventInvokeConfig . See also: AWS API Documentation Exceptions Examples The following example returns the asynchronous invocation configuration for the BLUE alias of a function named my-function. Expected Output: :example: response = client.get_function_event_invoke_config( FunctionName='string', Qualifier='string' ) :type FunctionName: string :param FunctionName: [REQUIRED]\nThe name of the Lambda function, version, or alias.\n\nName formats\n\nFunction name - my-function (name-only), my-function:v1 (with alias).\nFunction ARN - arn:aws:lambda:us-west-2:123456789012:function:my-function .\nPartial ARN - 123456789012:function:my-function .\n\nYou can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length.\n :type Qualifier: string :param Qualifier: A version number or alias name. :rtype: dict ReturnsResponse Syntax { 'LastModified': datetime(2015, 1, 1), 'FunctionArn': 'string', 'MaximumRetryAttempts': 123, 'MaximumEventAgeInSeconds': 123, 'DestinationConfig': { 'OnSuccess': { 'Destination': 'string' }, 'OnFailure': { 'Destination': 'string' } } } Response Structure (dict) -- LastModified (datetime) -- The date and time that the configuration was last updated. FunctionArn (string) -- The Amazon Resource Name (ARN) of the function. MaximumRetryAttempts (integer) -- The maximum number of times to retry when the function returns an error. MaximumEventAgeInSeconds (integer) -- The maximum age of a request that Lambda sends to a function for processing. DestinationConfig (dict) -- A destination for events after they have been sent to a function for processing. Destinations Function - The Amazon Resource Name (ARN) of a Lambda function. Queue - The ARN of an SQS queue. Topic - The ARN of an SNS topic. Event Bus - The ARN of an Amazon EventBridge event bus. OnSuccess (dict) -- The destination configuration for successful invocations. Destination (string) -- The Amazon Resource Name (ARN) of the destination resource. OnFailure (dict) -- The destination configuration for failed invocations. Destination (string) -- The Amazon Resource Name (ARN) of the destination resource. Exceptions Lambda.Client.exceptions.ServiceException Lambda.Client.exceptions.ResourceNotFoundException Lambda.Client.exceptions.InvalidParameterValueException Lambda.Client.exceptions.TooManyRequestsException Examples The following example returns the asynchronous invocation configuration for the BLUE alias of a function named my-function. response = client.get_function_event_invoke_config( FunctionName='my-function', Qualifier='BLUE', ) print(response) Expected Output: { 'DestinationConfig': { 'OnFailure': { 'Destination': 'arn:aws:sqs:us-east-2:123456789012:failed-invocations', }, 'OnSuccess': { }, }, 'FunctionArn': 'arn:aws:lambda:us-east-2:123456789012:function:my-function:BLUE', 'LastModified': datetime(2016, 11, 21, 19, 49, 20, 0, 326, 0), 'MaximumEventAgeInSeconds': 3600, 'MaximumRetryAttempts': 0, 'ResponseMetadata': { '...': '...', }, } :return: { 'LastModified': datetime(2015, 1, 1), 'FunctionArn': 'string', 'MaximumRetryAttempts': 123, 'MaximumEventAgeInSeconds': 123, 'DestinationConfig': { 'OnSuccess': { 'Destination': 'string' }, 'OnFailure': { 'Destination': 'string' } } } :returns: Function - The Amazon Resource Name (ARN) of a Lambda function. Queue - The ARN of an SQS queue. Topic - The ARN of an SNS topic. Event Bus - The ARN of an Amazon EventBridge event bus. """ pass def get_layer_version(LayerName=None, VersionNumber=None): """ Returns information about a version of an AWS Lambda layer , with a link to download the layer archive that\'s valid for 10 minutes. See also: AWS API Documentation Exceptions Examples The following example returns information for version 1 of a layer named my-layer. Expected Output: :example: response = client.get_layer_version( LayerName='string', VersionNumber=123 ) :type LayerName: string :param LayerName: [REQUIRED]\nThe name or Amazon Resource Name (ARN) of the layer.\n :type VersionNumber: integer :param VersionNumber: [REQUIRED]\nThe version number.\n :rtype: dict ReturnsResponse Syntax { 'Content': { 'Location': 'string', 'CodeSha256': 'string', 'CodeSize': 123 }, 'LayerArn': 'string', 'LayerVersionArn': 'string', 'Description': 'string', 'CreatedDate': 'string', 'Version': 123, 'CompatibleRuntimes': [ 'nodejs'|'nodejs4.3'|'nodejs6.10'|'nodejs8.10'|'nodejs10.x'|'nodejs12.x'|'java8'|'java11'|'python2.7'|'python3.6'|'python3.7'|'python3.8'|'dotnetcore1.0'|'dotnetcore2.0'|'dotnetcore2.1'|'dotnetcore3.1'|'nodejs4.3-edge'|'go1.x'|'ruby2.5'|'ruby2.7'|'provided', ], 'LicenseInfo': 'string' } Response Structure (dict) -- Content (dict) -- Details about the layer version. Location (string) -- A link
from django.shortcuts import render, get_object_or_404 from blog.models import Post, Comment from users.models import Follow, Profile import sys from django.contrib.auth.models import User from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.db.models import Count from .forms import NewCommentForm def is_users(post_user, logged_user): return post_user == logged_user PAGINATION_COUNT = 3 class PostListView(LoginRequiredMixin, ListView): model = Post template_name = 'blog/home.html' context_object_name = 'posts' ordering = ['-date_posted'] paginate_by = PAGINATION_COUNT def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) all_users = [] data_counter = Post.objects.values('author')\ .annotate(author_count=Count('author'))\ .order_by('-author_count')[:6] for aux in data_counter: all_users.append(User.objects.filter(pk=aux['author']).first()) data['all_users'] = all_users print(all_users, file=sys.stderr) return data def get_queryset(self): user = self.request.user qs = Follow.objects.filter(user=user) follows = [user] for obj in qs: follows.append(obj.follow_user) return Post.objects.filter(author__in=follows).order_by('-date_posted') class UserPostListView(LoginRequiredMixin, ListView): model = Post template_name = 'blog/user_posts.html' context_object_name = 'posts' paginate_by = PAGINATION_COUNT def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_context_data(self, **kwargs): visible_user = self.visible_user() logged_user = self.request.user print(logged_user.username == '', file=sys.stderr) if logged_user.username == '' or logged_user is None: can_follow = False else: can_follow = (Follow.objects.filter(user=logged_user, follow_user=visible_user).count() == 0) data = super().get_context_data(**kwargs) data['user_profile'] = visible_user data['can_follow'] = can_follow return data def get_queryset(self): user = self.visible_user() return Post.objects.filter(author=user).order_by('-date_posted') def post(self, request, *args, **kwargs): if request.user.id is not None: follows_between = Follow.objects.filter(user=request.user, follow_user=self.visible_user()) if 'follow' in request.POST: new_relation = Follow(user=request.user, follow_user=self.visible_user()) if follows_between.count() == 0: new_relation.save() elif 'unfollow' in request.POST: if follows_between.count() > 0: follows_between.delete() return self.get(self, request, *args, **kwargs) class PostDetailView(DetailView): model = Post template_name = 'blog/post_detail.html' context_object_name = 'post' def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) comments_connected = Comment.objects.filter(post_connected=self.get_object()).order_by('-date_posted') data['comments'] = comments_connected data['form'] = NewCommentForm(instance=self.request.user) return data def post(self, request, *args, **kwargs): new_comment = Comment(content=request.POST.get('content'), author=self.request.user, post_connected=self.get_object()) new_comment.save() return self.get(self, request, *args, **kwargs) class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView): model = Post template_name = 'blog/post_delete.html' context_object_name = 'post' success_url = '/' def test_func(self): return is_users(self.get_object().author, self.request.user) class PostCreateView(LoginRequiredMixin, CreateView): model = Post fields = ['content'] template_name = 'blog/post_new.html' success_url = '/' def form_valid(self, form): form.instance.author = self.request.user return super().form_valid(form) def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) data['tag_line'] = 'Add a new post' return data class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView): model = Post fields = ['content'] template_name = 'blog/post_new.html' success_url = '/' def form_valid(self, form): form.instance.author = self.request.user return super().form_valid(form) def test_func(self): return is_users(self.get_object().author, self.request.user) def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) data['tag_line'] = 'Edit a post' return data class FollowsListView(ListView): model = Follow template_name = 'blog/follow.html' context_object_name = 'follows' def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_queryset(self): user = self.visible_user() return Follow.objects.filter(user=user).order_by('-date') def get_context_data(self, *, object_list=None, **kwargs): data = super().get_context_data(**kwargs) data['follow'] = 'follows' return data class FollowersListView(ListView): model = Follow template_name = 'blog/follow.html' context_object_name = 'follows' def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_queryset(self): user = self.visible_user() return Follow.objects.filter(follow_user=user).order_by('-date') def get_context_data(self, *, object_list=None, **kwargs): data = super().get_context_data(**kwargs) data['follow'] = 'followers' return data from django.shortcuts import render, get_object_or_404 from blog.models import Post, Comment from users.models import Follow, Profile import sys from django.contrib.auth.models import User from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.db.models import Count from .forms import NewCommentForm def is_users(post_user, logged_user): return post_user == logged_user PAGINATION_COUNT = 3 class PostListView(LoginRequiredMixin, ListView): model = Post template_name = 'blog/home.html' context_object_name = 'posts' ordering = ['-date_posted'] paginate_by = PAGINATION_COUNT def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) all_users = [] data_counter = Post.objects.values('author')\ .annotate(author_count=Count('author'))\ .order_by('-author_count')[:6] for aux in data_counter: all_users.append(User.objects.filter(pk=aux['author']).first()) data['all_users'] = all_users print(all_users, file=sys.stderr) return data def get_queryset(self): user = self.request.user qs = Follow.objects.filter(user=user) follows = [user] for obj in qs: follows.append(obj.follow_user) return Post.objects.filter(author__in=follows).order_by('-date_posted') class UserPostListView(LoginRequiredMixin, ListView): model = Post template_name = 'blog/user_posts.html' context_object_name = 'posts' paginate_by = PAGINATION_COUNT def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_context_data(self, **kwargs): visible_user = self.visible_user() logged_user = self.request.user print(logged_user.username == '', file=sys.stderr) if logged_user.username == '' or logged_user is None: can_follow = False else: can_follow = (Follow.objects.filter(user=logged_user, follow_user=visible_user).count() == 0) data = super().get_context_data(**kwargs) data['user_profile'] = visible_user data['can_follow'] = can_follow return data def get_queryset(self): user = self.visible_user() return Post.objects.filter(author=user).order_by('-date_posted') def post(self, request, *args, **kwargs): if request.user.id is not None: follows_between = Follow.objects.filter(user=request.user, follow_user=self.visible_user()) if 'follow' in request.POST: new_relation = Follow(user=request.user, follow_user=self.visible_user()) if follows_between.count() == 0: new_relation.save() elif 'unfollow' in request.POST: if follows_between.count() > 0: follows_between.delete() return self.get(self, request, *args, **kwargs) class PostDetailView(DetailView): model = Post template_name = 'blog/post_detail.html' context_object_name = 'post' def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) comments_connected = Comment.objects.filter(post_connected=self.get_object()).order_by('-date_posted') data['comments'] = comments_connected data['form'] = NewCommentForm(instance=self.request.user) return data def post(self, request, *args, **kwargs): new_comment = Comment(content=request.POST.get('content'), author=self.request.user, post_connected=self.get_object()) new_comment.save() return self.get(self, request, *args, **kwargs) class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView): model = Post template_name = 'blog/post_delete.html' context_object_name = 'post' success_url = '/' def test_func(self): return is_users(self.get_object().author, self.request.user) class PostCreateView(LoginRequiredMixin, CreateView): model = Post fields = ['content'] template_name = 'blog/post_new.html' success_url = '/' def form_valid(self, form): form.instance.author = self.request.user return super().form_valid(form) def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) data['tag_line'] = 'Add a new post' return data class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView): model = Post fields = ['content'] template_name = 'blog/post_new.html' success_url = '/' def form_valid(self, form): form.instance.author = self.request.user return super().form_valid(form) def test_func(self): return is_users(self.get_object().author, self.request.user) def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) data['tag_line'] = 'Edit a post' return data class FollowsListView(ListView): model = Follow template_name = 'blog/follow.html' context_object_name = 'follows' def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_queryset(self): user = self.visible_user() return Follow.objects.filter(user=user).order_by('-date') def get_context_data(self, *, object_list=None, **kwargs): data = super().get_context_data(**kwargs) data['follow'] = 'follows' return data class FollowersListView(ListView): model = Follow template_name = 'blog/follow.html' context_object_name = 'follows' def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_queryset(self): user = self.visible_user() return Follow.objects.filter(follow_user=user).order_by('-date') def get_context_data(self, *, object_list=None, **kwargs): data = super().get_context_data(**kwargs) data['follow'] = 'followers' return data from django.shortcuts import render, get_object_or_404 from blog.models import Post, Comment from users.models import Follow, Profile import sys from django.contrib.auth.models import User from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin from django.db.models import Count from .forms import NewCommentForm def is_users(post_user, logged_user): return post_user == logged_user PAGINATION_COUNT = 3 class PostListView(LoginRequiredMixin, ListView): model = Post template_name = 'blog/home.html' context_object_name = 'posts' ordering = ['-date_posted'] paginate_by = PAGINATION_COUNT def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) all_users = [] data_counter = Post.objects.values('author')\ .annotate(author_count=Count('author'))\ .order_by('-author_count')[:6] for aux in data_counter: all_users.append(User.objects.filter(pk=aux['author']).first()) data['all_users'] = all_users print(all_users, file=sys.stderr) return data def get_queryset(self): user = self.request.user qs = Follow.objects.filter(user=user) follows = [user] for obj in qs: follows.append(obj.follow_user) return Post.objects.filter(author__in=follows).order_by('-date_posted') class UserPostListView(LoginRequiredMixin, ListView): model = Post template_name = 'blog/user_posts.html' context_object_name = 'posts' paginate_by = PAGINATION_COUNT def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_context_data(self, **kwargs): visible_user = self.visible_user() logged_user = self.request.user print(logged_user.username == '', file=sys.stderr) if logged_user.username == '' or logged_user is None: can_follow = False else: can_follow = (Follow.objects.filter(user=logged_user, follow_user=visible_user).count() == 0) data = super().get_context_data(**kwargs) data['user_profile'] = visible_user data['can_follow'] = can_follow return data def get_queryset(self): user = self.visible_user() return Post.objects.filter(author=user).order_by('-date_posted') def post(self, request, *args, **kwargs): if request.user.id is not None: follows_between = Follow.objects.filter(user=request.user, follow_user=self.visible_user()) if 'follow' in request.POST: new_relation = Follow(user=request.user, follow_user=self.visible_user()) if follows_between.count() == 0: new_relation.save() elif 'unfollow' in request.POST: if follows_between.count() > 0: follows_between.delete() return self.get(self, request, *args, **kwargs) class PostDetailView(DetailView): model = Post template_name = 'blog/post_detail.html' context_object_name = 'post' def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) comments_connected = Comment.objects.filter(post_connected=self.get_object()).order_by('-date_posted') data['comments'] = comments_connected data['form'] = NewCommentForm(instance=self.request.user) return data def post(self, request, *args, **kwargs): new_comment = Comment(content=request.POST.get('content'), author=self.request.user, post_connected=self.get_object()) new_comment.save() return self.get(self, request, *args, **kwargs) class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView): model = Post template_name = 'blog/post_delete.html' context_object_name = 'post' success_url = '/' def test_func(self): return is_users(self.get_object().author, self.request.user) class PostCreateView(LoginRequiredMixin, CreateView): model = Post fields = ['content'] template_name = 'blog/post_new.html' success_url = '/' def form_valid(self, form): form.instance.author = self.request.user return super().form_valid(form) def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) data['tag_line'] = 'Add a new post' return data class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView): model = Post fields = ['content'] template_name = 'blog/post_new.html' success_url = '/' def form_valid(self, form): form.instance.author = self.request.user return super().form_valid(form) def test_func(self): return is_users(self.get_object().author, self.request.user) def get_context_data(self, **kwargs): data = super().get_context_data(**kwargs) data['tag_line'] = 'Edit a post' return data class FollowsListView(ListView): model = Follow template_name = 'blog/follow.html' context_object_name = 'follows' def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_queryset(self): user = self.visible_user() return Follow.objects.filter(user=user).order_by('-date') def get_context_data(self, *, object_list=None, **kwargs): data = super().get_context_data(**kwargs) data['follow'] = 'follows' return data class FollowersListView(ListView): model = Follow template_name = 'blog/follow.html' context_object_name = 'follows' def visible_user(self): return get_object_or_404(User, username=self.kwargs.get('username')) def get_queryset(self): user = self.visible_user() return Follow.objects.filter(follow_user=user).order_by('-date') def get_context_data(self, *, object_list=None, **kwargs): data = super().get_context_data(**kwargs) data['follow'] = 'followers' return data from django.shortcuts import render, get_object_or_404 from blog.models import Post, Comment from users.models import Follow, Profile import sys from django.contrib.auth.models import User from
<filename>apps/content/views.py from django.shortcuts import render from rest_framework import status from rest_framework.generics import ( ListAPIView, ListCreateAPIView, ListAPIView, RetrieveUpdateAPIView,) from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework.decorators import permission_classes from apps.configuration.models import Book from apps.hardspot.models import HardSpot from .models import Content,ContentContributors from .serializers import ( ContentListSerializer, BookNestedSerializer, BookListSerializer, ContentStatusListSerializer, SectionKeywordSerializer, SubSectionKeywordSerializer, SectionKeywordsSerializer, ChapterKeywordsSerializer, SubSectionKeywordsSerializer, KeywordSerializer, ContentContributorSerializer, ApprovedContentSerializer, ContentStatusSerializer, HardSpotCreateSerializer, ContentContributorsSerializer, SubSubSectionKeywordsSerializer, ContentStatusSerializerFileFormat, ) from django.utils.decorators import method_decorator from django.contrib.auth.decorators import permission_required from rest_framework.parsers import MultiPartParser from apps.dataupload.models import (Chapter, Section, SubSection, ChapterKeyword, SectionKeyword, SubSectionKeyword, SubSubSectionKeyword, ) import json import pandas as pd from evolve import settings from evolve import settings from azure.storage.blob import ( BlockBlobService, ContainerPermissions ) from datetime import datetime, timedelta import os import itertools from django.db.models import Q import threading account_name = settings.AZURE_ACCOUNT_NAME account_key = settings.AZURE_ACCOUNT_KEY CONTAINER_NAME= settings.AZURE_CONTAINER block_blob_service = BlockBlobService(account_name=account_name, account_key=account_key) class ContentList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer parser_classes = (MultiPartParser,) def get(self, request): try: queryset = self.get_queryset() serializer = ContentStatusListSerializer(queryset, many=True) context = {"success": True, "message": "Chapter List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Chapter list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def post(self, request,format=None): try: serializer = ContentListSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {"success": True, "message": "Created Successful", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {"success": False, "message": "Invalid Input Data to create content"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': "false", 'message': 'Failed to create content.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentRetrieveUpdate(RetrieveUpdateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: queryset = self.get_object() serializer = ContentListSerializer(queryset, many=True) context = {"success": True, "message": "Chapter List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) def put(self, request, pk, format=None): try: try: content_list = self.get_object() except Exception as error: context = {'success': "false", 'message': 'content Id does not exist.'} return Response(context, status=status.HTTP_404_NOT_FOUND) serializer = ContentListSerializer(content_list, data=request.data, context={"user":request.user}, partial=True) if serializer.is_valid(): serializer.save() context = {"success": True, "message": "Updation Successful","data": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {"success": False, "message": "Updation Failed"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': "false", 'message': 'Failed To Update content Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookNestedList(ListAPIView): queryset = Book.objects.all() serializer_class = BookNestedSerializer def get(self, request): try: subject = request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject, content_only=True) else: queryset = self.get_queryset().filter(content_only=True) serializer = BookNestedSerializer(queryset, many=True) context = {"success": True, "message": "Conetent List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class BookListView(ListAPIView): queryset = Book.objects.all() serializer_class = BookListSerializer def get(self, request): try: subject = request.query_params.get('subject', None) if subject is not None: queryset=self.get_queryset().filter(subject__id=subject) else: queryset = self.get_queryset() serializer = BookListSerializer(queryset, many=True) context = {"success": True, "message": "Content List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Conetent list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentApprovedList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=True) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=True) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=True) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=True) else: queryset = self.get_queryset().filter(approved=True) serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content Approved List", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Approved list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentPendingList(ListAPIView): queryset = Content.objects.all() serializer_class = KeywordSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False, approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False, approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False, approved_by=None) elif sub_sub_section_id is not None: queryset = self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id,approved=False,approved_by=None) else: queryset = self.get_queryset().filter(approved=False, approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content Pending List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Pending list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusList(ListCreateAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: if request.query_params.get('chapter', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('chapter', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('section', None)) elif request.query_params.get('section', None) is not None: queryset=self.get_queryset().filter(chapter_id=request.query_params.get('sub_section', None)) else: queryset = self.get_queryset() serializer = ContentListSerializer(queryset, many=True) context = {"success": True, "message": "Content Status List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Status list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentRejectedList(ListAPIView): queryset = Content.objects.all() serializer_class = ContentListSerializer def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section',None) if chapter_id is not None: queryset=self.get_queryset().filter(chapter__id=chapter_id, approved=False).exclude(approved_by=None) elif section_id is not None: queryset = self.get_queryset().filter(section__id=section_id, approved=False).exclude(approved_by=None) elif sub_section_id is not None: queryset = self.get_queryset().filter(sub_section__id=sub_section_id, approved=False).exclude(approved_by=None) elif sub_sub_section_id is not None: queryset =self.get_queryset().filter(sub_sub_section__id = sub_sub_section_id , approved = False).exclude(approved_by=None) else: queryset = self.get_queryset().filter(approved=False).exclude(approved_by=None) serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content Rejected List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content Rejected list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class Keywords(ListAPIView): queryset = Content.objects.all() def get(self, request): try: chapter_id = request.query_params.get('chapter', None) section_id = request.query_params.get('section', None) sub_section_id = request.query_params.get('sub_section', None) sub_sub_section_id = request.query_params.get('sub_sub_section', None) if chapter_id is not None: queryset=ChapterKeyword.objects.filter(chapter__id = chapter_id) serializer = ChapterKeywordsSerializer(queryset, many=True) elif section_id is not None: queryset = SectionKeyword.objects.filter(section__id = section_id) serializer = SectionKeywordsSerializer(queryset, many=True) elif sub_section_id is not None: queryset = SubSectionKeyword.objects.filter(sub_section__id = sub_section_id) serializer = SubSectionKeywordsSerializer(queryset, many=True) elif sub_sub_section_id is not None: queryset = SubSubSectionKeyword.objects.filter(sub_sub_section__id = sub_sub_section_id) serializer = SubSubSectionKeywordsSerializer(queryset, many=True) else: queryset = self.get_queryset() serializer = KeywordSerializer(queryset, many=True) context = {"success": True, "message": "Content List","data": serializer.data} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Content list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentContributorCreateView(ListCreateAPIView): queryset = ContentContributors.objects.all() serializer_class = ContentContributorSerializer def post(self, request): try: queryset = ContentContributors.objects.filter(first_name__iexact=request.data['first_name'].strip(),last_name__iexact=request.data['last_name'].strip(), mobile=request.data['mobile'].strip()).first() if queryset is not None: if str(queryset.email) == "" and request.data['email'] is not None: ContentContributors.objects.filter(id=queryset.id).update(email=request.data['email']) queryset.refresh_from_db() serializer = ContentContributorSerializer(queryset) context = {"success": True, "message": "Successful", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) else: serializer = ContentContributorSerializer(data=request.data) if serializer.is_valid(): serializer.save() context = {"success": True, "message": "Successful", "data": serializer.data} return Response(context, status=status.HTTP_200_OK) context = {"success": False, "message": "Invalid Input Data to create Pesonal details"} return Response(context, status=status.HTTP_400_BAD_REQUEST) except Exception as error: context = {'success': "false", 'message': 'Failed to Personal Details.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ApprovedContentDownloadView(ListAPIView): queryset = Book.objects.all() def get(self, request): try: final_list = [] import os from shutil import copyfile book = request.query_params.get('book', None) chapters=Chapter.objects.filter(book_id=book).order_by('id') serializer = ApprovedContentSerializer(chapters, many=True) for data in serializer.data: for d in data['chapter']: final_list.append(d) repeat_list=['Content Name','Content Link/Video Link','Content Rating (By Reviewer)','Comment (By Reviewer)', 'linked_keywords'] data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium', 'Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'Keywords',]+(list(itertools.chain.from_iterable(itertools.repeat(repeat_list, 5))))) exists = os.path.isfile('ApprovedContent.csv') path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('ApprovedContent.csv') data_frame.to_csv(path + 'ApprovedContent.csv', encoding="utf-8-sig", index=False) context = {"success": True, "message": "Activity List", "data": 'media/files/ApprovedContent.csv'} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) class ContentStatusDownloadView(RetrieveUpdateAPIView): queryset = HardSpot.objects.all() serializer_class = HardSpotCreateSerializer def get(self, request): try: final_list = [] import os from shutil import copyfile book_id = request.query_params.get('book', None) book_name="" if book_id is not None: book_name=Book.objects.get(id=book_id) chapters=Chapter.objects.filter(book__id=book_id).order_by('id') serializer = ContentStatusSerializer(chapters, many=True) for data in serializer.data: for d in data['chapter']: final_list.append(d) data_frame = pd.DataFrame(final_list , columns=['Board', 'Medium','Grade', 'Subject', 'Textbook Name', 'Level 1 Textbook Unit', 'Level 2 Textbook Unit', 'Level 3 Textbook Unit','Level 4 Textbook Unit', 'total', 'approved_contents', 'rejected_contents', 'pending_contents', 'hard_spots']) exists = os.path.isfile('{}_contentstatus.csv'.format(book_name)) path = settings.MEDIA_ROOT + '/files/' if exists: os.remove('{}_contentstatus.csv'.format(book_name)) # data_frame.to_excel(path + 'contentstatus.xlsx') data_frame.to_csv(path + str(book_name)+'_contentstatus.csv', encoding="utf-8-sig", index=False) context = {"success": True, "message": "Activity List","data": 'media/files/{}_contentstatus.csv'.format(book_name)} return Response(context, status=status.HTTP_200_OK) except Exception as error: context = {'success': "false", 'message': 'Failed to get Activity list.'} return Response(context, status=status.HTTP_500_INTERNAL_SERVER_ERROR) @permission_classes((IsAuthenticated,)) class ContentContributorsDownloadView(RetrieveUpdateAPIView):
1 def notify_gen_regvar_def(self, canon, user, cmt): """ generate register variable definition line args: canon - canonical register name (case-insensitive) user - user-defined register name cmt - comment to appear near definition returns: 0-ok """ return 1 def notify_setsgr(self, start_ea, end_ea, regnum, value, old_value, tag): """ The kernel has changed a segment register value args: startEA endEA regnum value old_value uchar tag (SR_... values) returns: 1-ok, 0-error """ return 1 def notify_set_compiler(self): """ The kernel has changed the compiler information """ pass def notify_is_basic_block_end(self, call_insn_stops_block): """ Is the current instruction end of a basic block? This function should be defined for processors with delayed jump slots. The current instruction is stored in 'cmd' args: call_insn_stops_block returns: 1-unknown, 0-no, 2-yes """ return 1 def notify_make_code(self, ea, size): """ An instruction is being created args: ea size returns: 1-ok, <=0-the kernel should stop """ return 1 def notify_make_data(self, ea, flags, tid, size): """ A data item is being created args: ea flags tid size returns: 1-ok, <=0-the kernel should stop """ return 1 def notify_moving_segm(self, start_ea, segm_name, segm_class, to_ea, flags): """ May the kernel move the segment? args: start_ea, segm_name, segm_class - segment to move to_ea - new segment start address int flags - combination of MSF_... bits returns: 1-yes, <=0-the kernel should stop """ return 1 def notify_move_segm(self, from_ea, start_ea, segm_name, segm_class): """ A segment is moved Fix processor dependent address sensitive information args: from_ea - old segment address start_ea, segm_name, segm_class - moved segment returns: nothing """ pass def notify_verify_noreturn(self, func_start_ea): """ The kernel wants to set 'noreturn' flags for a function args: func_start_ea Returns: 1-ok, any other value-do not set 'noreturn' flag """ return 1 def notify_verify_sp(self, func_start_ea): """ All function instructions have been analyzed Now the processor module can analyze the stack pointer for the whole function args: func_start_ea Returns: 1-ok, 0-bad stack pointer """ return 1 def notify_renamed(self, ea, new_name, is_local_name): """ The kernel has renamed a byte args: ea new_name is_local_name Returns: nothing. See also the 'rename' event """ pass def notify_set_func_start(self, pfn, new_ea): """ Function chunk start address will be changed args: func_start_ea, func_end_ea new_ea Returns: 1-ok,<=0-do not change """ return 1 def notify_set_func_end(self, pfn, new_end_ea): """ Function chunk end address will be changed args: func_start_ea, func_end_ea new_end_ea Returns: 1-ok,<=0-do not change """ return 1 def notify_treat_hindering_item(self, hindering_item_ea, new_item_flags, new_item_ea, new_item_length): """ An item hinders creation of another item args: hindering_item_ea new_item_flags new_item_ea new_item_length Returns: 1-no reaction, <=0-the kernel may delete the hindering item """ return 1 def notify_get_operand_string(self, opnum): """ Request text string for operand (cli, java, ...) args: opnum - the operand number; -1 means any string operand (cmd structure must contain info for the desired insn) Returns: requested """ return "" def notify_coagulate_dref(self, from_ea, to_ea, may_define, code_ea): """ data reference is being analyzed args: from_ea, to_ea, may_define, code_ea plugin may correct code_ea (e.g. for thumb mode refs, we clear the last bit) Returns: new code_ea or -1 - cancel dref analysis """ return 0 def simplify(self): itype = self.cmd.itype if itype == self.ifind("MI"): imm = self.cmd.Operands[1].value reg = self.cmd.Operands[0].reg self.mHelper[reg] = imm if not isHead(GetFlags(self.mHelper[reg])): do_unknown(ItemHead(self.mHelper[reg]), 0) ua_add_dref(0, self.mHelper[reg], dr_O) ''' try: buf = init_output_buffer(1024) out_name_expr(self.cmd, self.mHelper[reg], BADADDR) term_output_buffer() MakeRptCmt(self.cmd.ea, buf) ua_add_dref(0, self.mHelper[reg], dr_R) except: pass ''' elif itype == self.ifind("ML"): imm = self.cmd.Operands[1].value reg = self.cmd.Operands[0].reg self.mHelper[reg] = imm elif itype == self.ifind("MH"): try: imm = self.cmd.Operands[1].value reg = self.cmd.Operands[0].reg self.mHelper[reg] = (imm << 10) | (self.mHelper[reg] & 0x3ff) ''' buf = init_output_buffer(1024) out_name_expr(self.cmd, self.mHelper[reg], BADADDR) term_output_buffer() MakeRptCmt(self.cmd.ea, buf) ''' if not isHead(GetFlags(self.mHelper[reg])): do_unknown(ItemHead(self.mHelper[reg]), 0) ua_add_dref(0, self.mHelper[reg], dr_O) except: pass elif itype == self.ifind("LDT") or itype == self.ifind("STT"): try: off = self.cmd.Operands[1].addr base = self.cmd.Operands[1].phrase ''' buf = init_output_buffer(1024) out_name_expr(self.cmd, self.mHelper[base] + off, BADADDR) term_output_buffer() MakeRptCmt(self.cmd.ea, buf) ''' except: pass elif itype == self.ifind("LDS") or itype == self.ifind("STS"): try: off = self.cmd.Operands[1].addr base = self.cmd.Operands[1].phrase ''' buf = init_output_buffer(1024) out_name_expr(self.cmd, self.mHelper[base] + off, BADADDR) term_output_buffer() MakeRptCmt(self.cmd.ea, buf) ''' except: pass elif itype == self.ifind("LDW") or itype == self.ifind("STW"): try: off = self.cmd.Operands[1].addr base = self.cmd.Operands[1].phrase ''' buf = init_output_buffer(1024) out_name_expr(self.cmd, self.mHelper[base] + off, BADADDR) term_output_buffer() MakeRptCmt(self.cmd.ea, buf) ''' except: pass elif itype == self.ifind("XR"): try: ra = self.cmd.Operands[0].reg rb = self.cmd.Operands[1].reg rc = self.cmd.Operands[2].reg self.mHelper[ra] = self.mHelper[rc] ^ self.mHelper[rb] except: pass else: entry = self.instruc[self.cmd.itype] if entry['feature'] & CF_CHG1: ra = self.cmd.Operands[0].reg self.mHelper[ra] = None return # ---------------------------------------------------------------------- # The following callbacks are mandatory # def emu(self): """ Emulate instruction, create cross-references, plan to analyze subsequent instructions, modify flags etc. Upon entrance to this function all information about the instruction is in 'cmd' structure. If zero is returned, the kernel will delete the instruction. """ cmd = self.cmd features = cmd.get_canon_feature() addr = cmd.Operands[0].addr if (features & CF_CALL) and cmd.Operands[0].type == o_near: ua_add_cref(0, addr, fl_CN) if (features & CF_JUMP) and cmd.Operands[0].type == o_near: ua_add_cref(0, addr, fl_JN) if (cmd.get_canon_feature() & CF_STOP) == 0: ua_add_cref(0, cmd.ea + cmd.size, fl_F) ''' if cmd.Operands[1].type == o_displ and may_create_stkvars() and cmd.Operands[1].phrase == 28: # var_x(SP) pfn = get_func(cmd.ea) if pfn and ua_stkvar2(cmd.Operands[1], cmd.Operands[1].addr, STKVAR_VALID_SIZE): op_stkvar(cmd.ea, 1) ''' ''' if may_trace_sp(): if (features & CF_STOP) == 0: pfn = get_func(cmd.ea) if pfn and cmd.auxpref != 0: if cmd.Operands[1].specval > 0 and cmd.Operands[1].phrase == 29: delta = 0 if cmd.auxpref == 1: delta = cmd.Operands[1].specval * 3 else: delta = -cmd.Operands[1].specval * 3 add_auto_stkpnt2(pfn, cmd.ea + cmd.ea.size, delta) else: recalc_spd(cmd.ea) # recalculate SP register for the next insn ''' self.simplify() return 1 perms = ["N", "R", "RW", "RX"] def outop(self, op): """ Generate text representation of an instructon operand. This function shouldn't change the database, flags or anything else. All these actions should be performed only by u_emu() function. The output text is placed in the output buffer initialized with init_output_buffer() This function uses out_...() functions from ua.hpp to generate the operand text Returns: 1-ok, 0-operand is hidden. """ if op.type == o_reg: out_register(self.regNames[op.reg]) if op.specval > 1: OutChar('-') out_register(self.regNames[op.reg + op.specval - 1]) elif op.type == o_displ: out_symbol('[') out_register(self.regNames[op.phrase]) if op.addr: ''' if op.addr & 0x4000000: #negative number se = (op.addr | 0xf8000000) & 0xffffffff x = struct.unpack("<i", struct.pack("<I", se))[0] op.addr = x op.value = x else: op.value = op.addr ''' #op.type = o_imm # OutValue(op, OOFS_NEEDSIGN | OOF_SIGNED); OutValue(op, OOFW_32 | OOF_NUMBER | OOF_SIGNED | OOFS_NEEDSIGN) out_symbol(']') elif op.type == o_near: out_name_expr(op, op.addr, BADADDR) elif op.type == o_imm: if op.specflag2 == 1: out_keyword(self.perms[op.value]) elif op.specflag3 == 1: if not out_name_expr(op, op.addr, BADADDR): OutValue(op, OOF_ADDR); else: if op.value & 0x4000000: #negative number se = op.value | 0xf8000000 op.value = struct.unpack("<i", struct.pack("<I", se))[0] OutValue(op, OOFW_32 | OOF_NUMBER | OOF_SIGNED) return True suffixes = ["", "I", "D", "."] def out(self): """ Generate text representation of an instruction in 'cmd' structure. This function shouldn't change the database, flags or anything else. All these actions should be performed only by u_emu() function. Returns: nothing """ buf = init_output_buffer(1024) cmd = self.cmd OutMnem(7, self.suffixes[cmd.auxpref]) if cmd.itype != 3: out_one_operand(0) for i in range(1, 3): op = cmd[i] if op.type == o_void: break out_symbol(',') OutChar(' ') out_one_operand(i) term_output_buffer() cvar.gl_comm = 1 MakeLine(buf) conditions = ["N", "E", "L", "LE", "G", "GE", "NO", "O", "NS", "S", "SL", "SLE", "SG", "SGE", "???", ""] def dc_gen_2(self, cmd, mcode, mnem): cmd.itype = self.ifind(mnem) cmd.Operands[0].type = o_reg cmd.Operands[0].reg = (mcode >> (27-14)) & 0x1f cmd.Operands[0].dtyp = dt_3byte cmd.Operands[1].type = o_reg cmd.Operands[1].reg = (mcode >> (27-19)) & 0x1f cmd.Operands[1].dtyp = dt_3byte def dc_gen_3(self, cmd, mcode, mnem): cmd.itype = self.ifind(mnem) cmd.Operands[0].type = o_reg cmd.Operands[0].reg = get_ra_27(mcode) cmd.Operands[0].dtyp = dt_3byte cmd.Operands[1].type = o_reg cmd.Operands[1].reg = get_rb_27(mcode) cmd.Operands[1].dtyp = dt_3byte cmd.Operands[2].type = o_reg cmd.Operands[2].reg = get_rc_27(mcode) cmd.Operands[2].dtyp = dt_3byte def dc_gen_3_imm(self, cmd, mcode, mnem): cmd.itype = self.ifind(mnem) cmd.Operands[0].type = o_reg cmd.Operands[0].reg = get_ra_27(mcode) cmd.Operands[1].type = o_reg cmd.Operands[1].reg = get_rb_27(mcode) cmd.Operands[2].type = o_imm cmd.Operands[2].value = (mcode >> 3) & 0x7f def dc_ad(self,
""" with pytest.raises(vtq.QueryInitError) as excinfo: query = vtq.Query( coords=SkyCoord('00h42.5m', '+41d12m'), source_names=['test', 'test2'], ) assert str(excinfo.value).startswith( "The number of entered source names" ) def test_init_failure_invalid_planet(self) -> None: """ Tests the initialisation failure of a Query object. Specifically when an invalid planet is entered. Returns: None """ with pytest.raises(ValueError) as excinfo: query = vtq.Query( planets=['Earth'] ) assert str(excinfo.value) == "Invalid planet object provided!" def test_init_failure_base_folder(self, mocker) -> None: """ Tests the initialisation failure of a Query object. Specifically when the base folder directory has not been specified, or set in the environment. Args: mocker: The pytest-mock mocker object. Returns: None """ mocker_getenv = mocker.patch( 'os.getenv', return_value=None ) with pytest.raises(vtq.QueryInitError) as excinfo: query = vtq.Query( planets=['Mars'] ) assert str(excinfo.value).startswith( "The base folder directory could not be determined!" ) def test_init_failure_base_folder_not_found(self) -> None: """ Tests the initialisation failure of a Query object. Specifically when no specified base folder is not found. Returns: None """ with pytest.raises(vtq.QueryInitError) as excinfo: test_dir = '/testing/folder' query = vtq.Query( planets=['Mars'], base_folder=test_dir ) assert str(excinfo.value) == f"Base folder {test_dir} not found!" def test_init_failure_stokes_v_tiles(self, mocker) -> None: """ Tests the initialisation failure of a Query object. Specifically when Stokes V is requested on tile images. Args: mocker: The pytest-mock mocker object. Returns: None """ with pytest.raises(vtq.QueryInitError) as excinfo: isdir_mocker = mocker.patch( 'vasttools.query.os.path.isdir', return_value=True ) test_dir = '/testing/folder' query = vtq.Query( epochs='all-vast', planets=['Mars'], base_folder=test_dir, stokes='v', use_tiles=True ) assert str(excinfo.value).startswith( "Problems found in query settings!" ) def test_init_failure_no_sources_in_footprint( self, pilot_moc_mocker: MOC, mocker ) -> None: """ Tests the initialisation failure of a Query object. Specifically when none of the entered coordinates are in the footprint. Args: pilot_moc_mocker: The direct loaded epoch 1 MOC (for some reason this does not load correctly if not done like this in the test environment). mocker: The pytest-mock mocker object. Returns: None """ with pytest.raises(vtq.QueryInitError) as excinfo: isdir_mocker = mocker.patch( 'vasttools.query.os.path.isdir', return_value=True ) mocker_data_available = mocker.patch( 'vasttools.query.Query._check_data_availability', return_value=True ) mocker_moc_open = mocker.patch( 'mocpy.MOC.from_fits', return_value=pilot_moc_mocker ) test_dir = '/testing/folder' test_coords = SkyCoord( ['00h42.5m', '17h42.5m'], ['+41d12m', '-82d12m'] ) query = vtq.Query( epochs='all-vast', coords=test_coords, base_folder=test_dir, ) assert str(excinfo.value) == ( 'No sources remaining. None of the entered coordinates' ' are found in the VAST Pilot survey footprint!' ) def test_init_settings(self, mocker) -> None: """ Tests the initialisation of a Query object. Tests that all provided settings are set correctly in the final Query object. Args: mocker: The pytest-mock mocker object. Returns: None """ isdir_mocker = mocker.patch( 'vasttools.query.os.path.isdir', return_value=True ) mocker_data_available = mocker.patch( 'vasttools.query.Query._check_data_availability', return_value=True ) test_dir = '/testing/folder' epochs = '1,2,3x' stokes = 'I' crossmatch_radius = 1. use_tiles = True max_sep = 3. islands = False no_rms = True matches_only = True sort_output = True forced_fits = True forced_allow_nan = True forced_cluster_threshold = 7.5 output_dir = '/output/here' incl_observed = False expected_settings = { 'epochs': ["1", "2", "3x"], 'stokes': stokes, 'crossmatch_radius': Angle(crossmatch_radius * u.arcsec), 'max_sep': max_sep, 'islands': islands, 'no_rms': no_rms, 'matches_only': matches_only, 'sort_output': sort_output, 'forced_fits': forced_fits, 'forced_allow_nan': forced_allow_nan, 'forced_cluster_threshold': forced_cluster_threshold, 'output_dir': output_dir, 'search_around': False, 'tiles': use_tiles, 'incl_observed': False } query = vtq.Query( planets=['Mars'], base_folder=test_dir, stokes=stokes, epochs=epochs, crossmatch_radius=crossmatch_radius, max_sep=max_sep, use_islands=islands, use_tiles=use_tiles, no_rms=no_rms, matches_only=matches_only, sort_output=sort_output, forced_fits=forced_fits, forced_allow_nan=forced_allow_nan, forced_cluster_threshold=forced_cluster_threshold, output_dir=output_dir, incl_observed=incl_observed ) assert query.settings == expected_settings @pytest.mark.parametrize( "epoch_exists,data_dir_exists,images_exist," "cats_exist,rmsmaps_exist,no_rms,all_available", [ (True, True, True, True, True, False, True), (True, True, True, True, True, True, True), (False, True, True, True, True, False, False), (True, False, True, True, True, False, False), (True, True, False, True, True, False, False), (True, True, True, False, True, False, False), (True, True, True, True, False, False, False), (True, True, True, True, False, True, True), ], ids=('all-available', 'all-available-no-rms', 'no-epoch', 'no-data-dir', 'no-image-dir', 'no-selavy-dir', 'no-rms-dir-rms', 'no-rms-dir-no-rms' ) ) def test__check_data_availability(self, epoch_exists: bool, data_dir_exists: bool, images_exist: bool, cats_exist: bool, rmsmaps_exist: bool, no_rms: bool, all_available: bool, tmp_path ) -> None: """ Test the data availability check Args: epoch_exists: The epoch directory exists. data_dir_exists: The data directory (i.e. COMBINED/TILES) exists. images_exist: The image directory (e.g. STOKESI_IMAGES) exists. cats_exist: The selavy directory (e.g. STOKESI_SELAVY) exists. rmsmaps_exist: The RMS map directory (e.g. STOKESI_RMSMAPS) exists. no_rms: The `no_rms` Query option has been selected. all_available: The expected result from _check_data_availability(). tmp_path: Pathlib temporary directory path. Returns: None. """ stokes = "I" epoch = "10x" data_type = "COMBINED" base_dir = tmp_path epoch_dir = base_dir / f"EPOCH{epoch}" data_dir = epoch_dir / data_type image_dir = data_dir / f"STOKES{stokes}_IMAGES" selavy_dir = data_dir / f"STOKES{stokes}_SELAVY" rms_dir = data_dir / f"STOKES{stokes}_RMSMAPS" if epoch_exists: epoch_dir.mkdir() if data_dir_exists: data_dir.mkdir() if images_exist: image_dir.mkdir() if cats_exist: selavy_dir.mkdir() if rmsmaps_exist: rms_dir.mkdir() if all_available: expectation = does_not_raise() message = 'None' else: expectation = pytest.raises(vtq.QueryInitError) message = ("Not all requested data is available! " "Please address and try again.") with expectation as e: query = vtq.Query( epochs=epoch, planets=['Mars'], base_folder=base_dir, stokes=stokes, no_rms=no_rms ) assert str(e) == message def test__field_matching( self, vast_query_psrj2129: vtq.Query, vast_fields_object_dummy: pd.DataFrame, field_centres_dummy: pd.DataFrame, mocker ) -> None: """ Tests the field matching method of the query object. Checks that the matching fields are correctly identified from the dummy input data. Args: vast_query_psrj2129: The dummy Query instance that includes a search for PSR J2129-04. vast_fields_object_dummy: The dummy fields available to perform the search against. field_centres_dummy: The dummy field centres file. mocker: The pytest-mock mocker object. Returns: None """ field_sc = SkyCoord( vast_fields_object_dummy["RA_HMS"], vast_fields_object_dummy["DEC_DMS"], unit=(u.hourangle, u.deg) ) field_centres_sc = SkyCoord( field_centres_dummy["centre-ra"], field_centres_dummy["centre-dec"], unit=(u.deg, u.deg) ) field_centre_names = field_centres_dummy.field row = vast_query_psrj2129.query_df.iloc[0] results = vast_query_psrj2129._field_matching( row, field_sc, vast_fields_object_dummy.FIELD_NAME, field_centres_sc, field_centre_names ) assert np.all(results[0] == np.array( ['VAST_2118-06A', 'VAST_2143-06A'] )) assert results[1] == 'VAST_2118-06A' assert results[2] == ['1', '2'] def test_find_fields( self, vast_query_psrj2129: vtq.Query, fields_df_expected_result: pd.DataFrame, mocker ) -> None: """ Tests the front facing find fields method. The Dask called is mocked and the expected result is returned. This is ok as the function is previously tested. Args: vast_query_psrj2129: The dummy Query instance that includes a search for PSR J2129-04. fields_df_expected_result: The expected fields_df result of the search. mocker: The pytest-mock mocker object. Returns: None """ mocked_field_matching_results = ( [np.array(['VAST_2118-06A', 'VAST_2143-06A'], dtype='object')], 'VAST_2118-06A', [['1', '2']], [[ ['1', 'VAST_2118-06A', 9668, '2019-08-27 18:52:00.556', 887.5], ['2', 'VAST_2118-06A', 10342, '2019-10-30 10:11:56.913', 887.5] ]], [[9668, 10342]], [['2019-08-27 18:52:00.556', '2019-10-30 10:11:56.913']], [[887.5, 887.5]] ) dask_from_pandas_mocker = mocker.patch( 'vasttools.query.dd.from_pandas', ) ( dask_from_pandas_mocker .return_value .apply .return_value .compute .return_value ) = mocked_field_matching_results vast_query_psrj2129.find_fields() assert vast_query_psrj2129.fields_df.equals( fields_df_expected_result() ) @pytest.mark.parametrize("tiles, conv, islands, expected_file", [(True, False, None, 'selavy-image.i.VAST_2118-06A.SB9668.cont' '.taylor.0.restored.components.corrected.xml' ), (True, True, None, 'selavy-image.i.VAST_2118-06A.SB9668.cont' '.taylor.0.restored.conv.components.corrected' '.xml' ), (False, None, True, 'selavy-VAST_2118-06A.EPOCH01.I.conv' '.islands.xml' ), (False, None, False, 'selavy-VAST_2118-06A.EPOCH01.I.conv' '.components.xml' ) ], ids=('tiles-noconv', 'tiles-conv', 'comb-islands', 'comb-noislands' ) ) def test__get_selavy_path( self, vast_query_psrj2129_fields: vtq.Query, tiles: bool, conv: list, islands: bool, expected_file: str, mocker ) -> None: """ Tests adding the paths to the combined data in the query. Assumes the standard VAST Pilot directory and file structure. Args: vast_query_psrj2129_fields: The dummy Query instance that includes a search for PSR J2129-04 with the included found fields data. tiles: Whether to query the TILES or COMBINED data. conv: Whether `.conv` is present in the filename. This argument is only relevant if tiles is True islands: Whether to return the islands or components catalogue. This argument is only relevant if tiles is False. expected_file: The expected filename to be returned. mocker: The pytest-mock mocker object. Returns: None """ epoch_string = 'EPOCH01' test_query = vast_query_psrj2129_fields test_query.settings['tiles'] = tiles test_query.settings['islands'] = islands row = test_query.fields_df.loc[0] if conv is not None: mock_selavy_isfile = mocker.patch( 'vasttools.query.Path.is_file', return_value=conv ) path = test_query._get_selavy_path(epoch_string, row) assert os.path.split(path)[1] == expected_file def test__add_files_combined( self, vast_query_psrj2129_fields: vtq.Query, mocker ) -> None: """ Tests adding the paths to the combined data in the query. Assumes the standard VAST Pilot directory and file structure. Args: vast_query_psrj2129_fields: The dummy Query instance that includes a search for PSR J2129-04 with the included found fields data. mocker: The pytest-mock mocker object. Returns: None """ expected_results = ( '/testing/folder/EPOCH01/COMBINED/STOKESI_SELAVY' '/selavy-VAST_2118-06A.EPOCH01.I.conv.components.xml', '/testing/folder/EPOCH01/COMBINED/STOKESI_IMAGES' '/VAST_2118-06A.EPOCH01.I.conv.fits', '/testing/folder/EPOCH01/COMBINED/STOKESI_RMSMAPS' '/noiseMap.VAST_2118-06A.EPOCH01.I.conv.fits' ) test_query = vast_query_psrj2129_fields mock_selavy_path = mocker.patch( 'vasttools.query.Query._get_selavy_path', return_value=expected_results[0] ) results = test_query._add_files(test_query.fields_df.loc[0]) assert results == expected_results @pytest.mark.parametrize("corrected, stokes", [(True, "I"), (True, "V"), (False, "I"), (False, "V"), ],
| x-tcp:// | DB-dialect+driver://. " "For empty scheme, params are a file path with '-' meaning standard output. " "For x-tcp scheme, params are TCP host[:port=14380]. " "For DB, use SQLAlchemy engine URL. " "(default=sqlite:///<dagman-output-file>.stampede.db)", default=None, ) grp.add_option( "-e", "--encoding", action="store", dest="enc", metavar="FORMAT", help="How to encode log events: bson | json| bp (default=bp)", ) parser.add_option_group(grp) # Parse command-line options (options, args) = parser.parse_args() # Remaining argument is .dag.dagman.out file if len(args) != 1: parser.print_help() sys.exit(1) out = args[0] if not out.endswith(".dagman.out"): parser.print_help() sys.exit(1) # Turn into absolute filename out = os.path.abspath(out) # Infer run directory run = os.path.dirname(out) if not os.path.isdir(run): logger.critical("Run directory %s does not exist" % run) exit(1) os.chdir(run) # Get the location of the properties file from braindump top_level_wf_params = utils.slurp_braindb(run) top_level_prop_file = None if "properties" in top_level_wf_params: top_level_prop_file = top_level_wf_params["properties"] # Create the full path by using the submit_dir key from braindump if "submit_dir" in top_level_wf_params: top_level_prop_file = os.path.join( top_level_wf_params["submit_dir"], top_level_prop_file ) # Parse, and process properties props = properties.Properties() props.new(config_file=options.config_properties, rundir_propfile=top_level_prop_file) # PM-948 check to see if any addon command line # options are specified in properties addon_cmd_prop = props.property("pegasus.monitord.arguments") addon_cmd_options = [] if addon_cmd_prop is not None: addon_cmd_options = addon_cmd_prop.split(" ") cmd_options = sys.argv[1:] + addon_cmd_options # parse again with extra options that might # been specified in the properties file logger.info("Final Command line options are: %s" % cmd_options) (options, args) = parser.parse_args(cmd_options) # Set logging level if options.vb <= 0: lvl = logging.INFO elif options.vb == 1: lvl = logging.DEBUG else: lvl = logging.TRACE root_logger.setLevel(lvl) # Resolve command-line options conflicts if options.event_dest is not None and options.no_events is not None: logger.critical( "the --no-events and --dest options conflict, please use only one of them" ) sys.exit(1) # Check if user wants to override pid checking if options.skip_pid_check is not None: skip_pid_check = True # Make sure no other pegasus-monitord instances are running... pid_filename = os.path.join(run, "monitord.pid") if not skip_pid_check and utils.pid_running(pid_filename): logger.critical( "it appears that pegasus-monitord is still running on this workflow... exiting" ) sys.exit(43) utils.write_pid_file(pid_filename) atexit.register(delete_pid_file) # Parse notification-related properties if int(props.property("pegasus.monitord.notifications.timeout") or -1) >= 0: notifications_timeout = int( props.property("pegasus.monitord.notifications.timeout") ) if int(props.property("pegasus.monitord.notifications.max") or -1) >= 0: max_parallel_notifications = int( props.property("pegasus.monitord.notifications.max") ) if max_parallel_notifications == 0: logger.warning( "maximum parallel notifications set to 0, disabling notifications..." ) do_notifications = False if not utils.make_boolean(props.property("pegasus.monitord.notifications") or "true"): do_notifications = False # Parse stdout/stderr disable parsing property if utils.make_boolean( props.property("pegasus.monitord.stdout.disable.parsing") or "false" ): store_stdout_stderr = False if options.adjustment is not None: adjustment = options.adjustment condor_daemon = options.condor_daemon if options.jsd is not None: jsd = options.jsd if options.millisleep is not None: millisleep = options.millisleep if options.replay_mode is not None: replay_mode = options.replay_mode # Replay mode always runs in foreground condor_daemon = False # No notifications in replay mode do_notifications = False if options.no_notify is not None: do_notifications = False if options.notifications_max is not None: max_parallel_notifications = options.notifications_max if max_parallel_notifications == 0: do_notifications = False if max_parallel_notifications < 0: logger.critical("notifications-max must be integer >= 0") sys.exit(1) if options.notifications_timeout is not None: notifications_timeout = options.notifications_timeout if notifications_timeout < 0: logger.critical("notifications-timeout must be integer >= 0") sys.exit(1) if notifications_timeout > 0 and notifications_timeout < 5: logger.warning( "notifications-timeout set too low... notification scripts may not have enough time to complete... continuing anyway..." ) if options.disable_subworkflows is not None: follow_subworkflows = False if options.db_stats is not None: db_stats = options.db_stats if options.keep_state is not None: keep_state = options.keep_state if options.skip_stdout is not None: store_stdout_stderr = False if options.output_dir is not None: output_dir = options.output_dir try: if not os.path.exists(output_dir): os.makedirs(output_dir) except OSError: logger.critical("cannot create directory %s. exiting..." % (output_dir)) sys.exit(1) if options.event_dest is None: if options.no_events is not None: # Turn off event generation no_events = True else: if props.property("pegasus.monitord.events") is not None: # Set event generation according to properties (default is True) no_events = not utils.make_boolean( props.property("pegasus.monitord.events") ) else: # Default is to generate events no_events = False event_dest = connection.url_by_properties( options.config_properties, connection.DBType.WORKFLOW, run, rundir_properties=top_level_prop_file, ) else: # Use command-line option event_dest = options.event_dest if options.fast_start is False: fast_start_mode = False # check if specified in properties and override from options fast_start_property = props.property("pegasus.monitord.fast_start") if fast_start_property is not None: fast_start_mode = utils.make_boolean(fast_start_property) dashboard_event_dest = connection.url_by_properties( options.config_properties, connection.DBType.MASTER, rundir_properties=top_level_prop_file, ) if options.enc is not None: # Get encoding from command-line options encoding = options.enc else: if props.property("pegasus.monitord.encoding") is not None: # Get encoding from property encoding = props.property("pegasus.monitord.encoding") if encoding is None: encoding = DEFAULT_ENCODING # Check if the user-provided jsd file is an absolute path, if so, we # disable recursive mode if jsd is not None: if os.path.isabs(jsd): # Yes, this is an absolute path follow_subworkflows = False logger.warning( "jsd file is an absolute filename, disabling sub-workflow tracking" ) # # --- functions --------------------------------------------------------------------------- # def add(wf, jobid, event, sched_id=None, status=None, reason=None): """ This function processes events related to jobs' state changes. It creates a new job, when needed, and by calling the workflow's update_job_state method, it causes output to be generated (both to jobstate.log and to the backend configured to receive events). wf is the workflow object for this operation, jobid is the id for the job (job_name), event is the actual state associated with this event (SUBMIT, EXECUTE, etc). sched_id is the scheduler's id for this particular job instance, and status is the exitcode for the job. This function returns the job_submit_seq for the corresponding jobid. """ my_site = None my_time = None my_job_submit_seq = None # Remove existing site info during replanning if event in unsubmitted_events: if jobid in wf._job_site: del wf._job_site[jobid] if jobid in wf._walltime: del wf._walltime[jobid] # Variables originally from submit file information if jobid in wf._job_site: my_site = wf._job_site[jobid] if jobid in wf._walltime: my_time = wf._walltime[jobid] # A PRE_SCRIPT_START event always means a new job if event == "PRE_SCRIPT_STARTED": # This is a new job, we need to add it to the workflow my_job_submit_seq = wf.add_job(jobid, event) # PM-1390 ensure you parse the condor submit file when # prescript starts. as we still need to send composite event # when prescript fails for a job # Obtain planning information from the submit file when entering Condor, # Figure out how long the job _intends_ to run maximum my_time, my_site = wf.parse_job_sub_file(jobid, my_job_submit_seq) if my_site == "!!SITE!!": my_site = None # Remember the run-site if my_site is not None: logger.debug("job {} is planned for site {}".format(jobid, my_site)) wf._job_site[jobid] = my_site else: logger.debug("job %s does not have a site information!" % (jobid)) # A DAGMAN_SUBMIT event requires a new job (unless this was # already done by a PRE_SCRIPT_STARTED event, but we let the # add_job function figure this out). # DAGMAN_SUBMIT corresponds to lines in dagman.out matching Submitting Condor Node *... if event == "DAGMAN_SUBMIT": wf._last_submitted_job = jobid my_job_submit_seq = wf.add_job(jobid, event) # PM-1068 parse the job submit file on DAGMAN_SUBMIT event instead of SUBMIT or SUBMIT_FAILED # Obtain planning information from the submit file when entering Condor, # Figure out how long the job _intends_ to run maximum my_time, my_site = wf.parse_job_sub_file(jobid, my_job_submit_seq) if my_site == "!!SITE!!": my_site = None # If not None, convert into seconds if my_time is not None: my_time = my_time * 60 logger.debug("job %s requests %d s walltime" % (jobid, my_time)) wf._walltime[jobid] = my_time else: logger.debug("job %s does not request a walltime" % (jobid)) # Remember the run-site if my_site is not None: logger.debug("job {} is planned for site {}".format(jobid, my_site)) wf._job_site[jobid] = my_site else: logger.debug("job %s does not have a site information!" % (jobid)) # Nothing else to do... we should stop here... return my_job_submit_seq # A SUBMIT event brings sched id and job type information (it can also be # a new job for us when there is no PRE_SCRIPT) if event == "SUBMIT" or event == "SUBMIT_FAILED": # Add job to our workflow (if not alredy there), will update sched_id in both cases my_job_submit_seq = wf.add_job(jobid, event, sched_id=sched_id) # Get job_submit_seq if we don't already have it if my_job_submit_seq is None: my_job_submit_seq = wf.find_jobid(jobid) if my_job_submit_seq is None: logger.warning("cannot find job_submit_seq for job: %s" % (jobid)) # Nothing else to do... return None # Make sure job has the updated state wf.update_job_state( jobid, sched_id, my_job_submit_seq, event, status, my_time, reason ) return my_job_submit_seq def process_dagman_out(wf, log_line): """ This function processes a log line from the dagman.out file and calls either the add function to generate a
with negative, (n)empty, not empty", "expected_options": ["empty,not empty"], "expected_negatives": ["empty"], "expected_lca_count": 1, }, ) @ddt.unpack def test_negative_lca_create(self, lca_to_import, expected_options, expected_negatives, expected_lca_count): """Test LCA with negative options is created when create tmpl via import. Test that local Custom Attribute with negative options is created when creating new Assessment Template via import. """ audit = ggrc_factories.AuditFactory() audit_id = audit.id asmt_tmpl_data = collections.OrderedDict([ ("object_type", "Assessment Template"), ("Code*", ""), ("Audit*", audit.slug), ("Title*", "AssessmentTemplate Title"), ("Default Assessment Type*", "Control"), ("Default Assignees*", "Auditors"), ("Custom Attributes", lca_to_import), ]) self._login() response = self.import_data(asmt_tmpl_data) self._check_csv_response(response, {}) tmpl = self._get_query_by_audit_for( all_models.AssessmentTemplate, audit_id).one() self._assert_negative_options( cads=self._get_asmt_tmpl_lcas(tmpl), expected_cad_count=expected_lca_count, expected_options=expected_options, expected_negatives=expected_negatives, ) @ddt.ddt class TestExportWithSOX302(query_helper.WithQueryApi, BaseTestWithSOX302): """Test export of of `WithSOX302Flow` objects.""" def _assert_sox_302_enabled_flag_expored(self, exported_data, obj_name, expected_value): # type: (dict, str, str) -> None # pylint: disable=invalid-name """Assert that `sox_302_enabled` has expected value in exported data. For this assertion to pass, following conditions should be met: - Given object name should be present in exported data; - There should be only one object of provided type in exported data. - Value of "SOX 302 assessment workflow" in exported data should match with passed `expected_value`. Args: exported_data (dict): Dict representing exported object. Keys are field names as they should be named in resulting .CSV file. obj_name (str): Capitalized human readable object name. expected_value (str): Expected value of "SOX 302 assessment workflow" column in exported data. """ self.assertIn(obj_name, exported_data) self.assertEqual(1, len(exported_data[obj_name])) exported_obj_data = exported_data[obj_name][0] self.assertEqual( exported_obj_data["SOX 302 assessment workflow"], expected_value, ) @ddt.data( {"obj_value": True, "exp_value": "yes"}, {"obj_value": False, "exp_value": "no"}, ) @ddt.unpack def test_sox_302_tmpl_export(self, obj_value, exp_value): """Test `SOX 302 assessment workflow` is exported correctly for tmpl.""" tmpl = ggrc_factories.AssessmentTemplateFactory(sox_302_enabled=obj_value) tmpl_id = tmpl.id self._login() exported_data = self.export_parsed_csv([ self._make_query_dict( "AssessmentTemplate", expression=["id", "=", tmpl_id], ) ]) self._assert_sox_302_enabled_flag_expored( exported_data, "Assessment Template", exp_value, ) @ddt.data( {"obj_value": True, "exp_value": "yes"}, {"obj_value": False, "exp_value": "no"}, ) @ddt.unpack def test_sox_302_asmt_export(self, obj_value, exp_value): """Test `SOX 302 assessment workflow` is exported correctly for asmt.""" asmt = ggrc_factories.AssessmentFactory(sox_302_enabled=obj_value) asmt_id = asmt.id self._login() exported_data = self.export_parsed_csv([ self._make_query_dict( "Assessment", expression=["id", "=", asmt_id], ) ]) self._assert_sox_302_enabled_flag_expored( exported_data, "Assessment", exp_value, ) @ddt.ddt class TestApiWithSOX302(BaseTestWithSOX302): """Test REST API functonality of `WithSOX302Flow` objects.""" def setUp(self): """Set up for SOX 302 REST API test case.""" super(TestApiWithSOX302, self).setUp() self.api = api_helper.Api() @ddt.data( {"sent_value": True, "exp_value": True}, {"sent_value": False, "exp_value": False}, ) @ddt.unpack def test_sox_302_tmpl_create(self, sent_value, exp_value): """Test SOX302 enabled={exp_value} when create asmt tmpl via API.""" audit = ggrc_factories.AuditFactory() audit_id = audit.id response = self.api.post( all_models.AssessmentTemplate, { "assessment_template": { "audit": {"id": audit.id}, "context": {"id": audit.context.id}, "default_people": { "assignees": "Admin", "verifiers": "Admin", }, "title": "AssessmentTemplate Title", "sox_302_enabled": sent_value, }, }, ) self.assert201(response) tmpl = self._get_query_by_audit_for( all_models.AssessmentTemplate, audit_id).one() self._assert_sox_302_enabled_flag(tmpl, exp_value) @ddt.data( {"init_value": True, "sent_value": True, "exp_value": True}, {"init_value": True, "sent_value": False, "exp_value": False}, {"init_value": False, "sent_value": True, "exp_value": True}, {"init_value": False, "sent_value": False, "exp_value": False}, ) @ddt.unpack def test_sox_302_tmpl_update(self, init_value, sent_value, exp_value): """Test SOX302 enabled={exp_value} when update asmt tmpl via API.""" tmpl = ggrc_factories.AssessmentTemplateFactory( sox_302_enabled=init_value) tmpl_id = tmpl.id response = self.api.put( tmpl, { "sox_302_enabled": sent_value, }, ) self.assert200(response) tmpl = self._refresh_object(tmpl.__class__, tmpl_id) self._assert_sox_302_enabled_flag(tmpl, exp_value) @ddt.data( {"orig_value": True, "exp_value": True}, {"orig_value": False, "exp_value": False}, ) @ddt.unpack def test_sox_302_tmpl_clone(self, orig_value, exp_value): """Test AssessmentTemplate SOX 302 enabled={0} when clone via API.""" tmpl = ggrc_factories.AssessmentTemplateFactory( sox_302_enabled=orig_value) audit_id = tmpl.audit.id tmpl_id = tmpl.id response = self.api.send_request( self.api.client.post, api_link="/api/assessment_template/clone", data=[{ "sourceObjectIds": [tmpl.id], "destination": { "type": "Audit", "id": tmpl.audit.id, }, "mappedObjects": [] }], ) self.assert200(response) tmpl_q = self._get_query_by_audit_for( all_models.AssessmentTemplate, audit_id) tmpl_clone = tmpl_q.filter( all_models.AssessmentTemplate.id != tmpl_id).one() self._assert_sox_302_enabled_flag(tmpl_clone, exp_value) @ddt.data( {"sent_value": True, "exp_value": False}, {"sent_value": False, "exp_value": False}, ) @ddt.unpack def test_sox_302_immut_asmt_create(self, sent_value, exp_value): """Test SOX 302 enabled is immutable when create asmt via API. Test `sox_302_enabled` on Assessment could not be set via API if there isn't any AssessmentTemplate provided in request data. SOX 302 enabled flag is read only on Assessment and could be set only from template. """ audit = ggrc_factories.AuditFactory() audit_id = audit.id response = self.api.post( all_models.Assessment, { "assessment": { "audit": {"id": audit.id}, "title": "Assessment Title", "sox_302_enabled": sent_value, }, }, ) self.assert201(response) asmt = self._get_query_by_audit_for(all_models.Assessment, audit_id).one() self._assert_sox_302_enabled_flag(asmt, exp_value) @ddt.data( {"tmpl_value": True, "sent_value": True, "exp_value": True}, {"tmpl_value": False, "sent_value": True, "exp_value": False}, {"tmpl_value": True, "sent_value": False, "exp_value": True}, {"tmpl_value": False, "sent_value": False, "exp_value": False}, ) @ddt.unpack def test_sox_302_asmt_with_tmpl_create(self, tmpl_value, sent_value, exp_value): # pylint: disable=invalid-name """Test SOX 302 enabled is mutable when create asmt with tmpl via API. Test `sox_302_enabled` on Assessment could be set via API if there is an AssessmentTemplate provided in request data. SOX 302 enabled flag is read only on Assessment and could be set only from template. """ with ggrc_factories.single_commit(): audit = ggrc_factories.AuditFactory() asmt_tmpl = ggrc_factories.AssessmentTemplateFactory( audit=audit, sox_302_enabled=tmpl_value, ) audit_id = audit.id response = self.api.post( all_models.Assessment, { "assessment": { "audit": {"id": audit.id}, "template": {"id": asmt_tmpl.id}, "title": "Assessment Title", "sox_302_enabled": sent_value, }, }, ) self.assert201(response) asmt = self._get_query_by_audit_for(all_models.Assessment, audit_id).one() self._assert_sox_302_enabled_flag(asmt, exp_value) @ddt.data( {"init_value": True, "sent_value": True, "exp_value": True}, {"init_value": True, "sent_value": False, "exp_value": True}, {"init_value": False, "sent_value": True, "exp_value": False}, {"init_value": False, "sent_value": False, "exp_value": False}, ) @ddt.unpack def test_sox_302_immut_asmt_upd(self, init_value, sent_value, exp_value): """Test SOX 302 enabled is immutable when update asmt via API. Test `sox_302_enabled` on Assessment could not be updated via API. SOX 302 enabled flag is read only on Assessment and could be set only during creation with AssessmentTemplate. """ asmt = ggrc_factories.AssessmentFactory(sox_302_enabled=init_value) asmt_id = asmt.id response = self.api.put( asmt, { "sox_302_enabled": sent_value, }, ) self.assert200(response) asmt = self._refresh_object(asmt.__class__, asmt_id) self._assert_sox_302_enabled_flag(asmt, exp_value) @ddt.ddt class TestQueriesWithSOX302(query_helper.WithQueryApi, BaseTestWithSOX302): """Test query API filtering for `WithSOX302Flow` objects.""" def setUp(self): super(TestQueriesWithSOX302, self).setUp() self.api = api_helper.Api() def _assert_right_obj_found(self, query_result, expected_obj_type, expected_obj_id): # type: (dict, str, int) -> None """Assert that only expected object was found by query API. For this assertion to pass, following conditions should be met: - Given query result should contain only one object; - ID of object in the query result should match with the given expected object ID. Args: query_result (dict): Dict representing result of query API request. expected_obj_type (str): Expected type of found object. expected_obj_id (int): Expected ID of found object. """ response_asmt_tmpl = query_result[0][expected_obj_type] self.assertEqual(1, response_asmt_tmpl["count"]) self.assertEqual( response_asmt_tmpl["values"][0]["id"], expected_obj_id, ) @ddt.data( {"obj_value": True, "filter_by_value": "yes"}, {"obj_value": False, "filter_by_value": "no"}, ) @ddt.unpack def test_sox_302_enabled_filter_tmpl(self, obj_value, filter_by_value): # pylint: disable=invalid-name """Test tmpl could be filtered by sox_302_enabled field.""" with ggrc_factories.single_commit(): tmpl = ggrc_factories.AssessmentTemplateFactory( sox_302_enabled=obj_value) ggrc_factories.AssessmentTemplateFactory(sox_302_enabled=(not obj_value)) searched_tmpl_id = tmpl.id query_request_data = [ self._make_query_dict( "AssessmentTemplate", expression=["SOX 302 assessment workflow", "=", filter_by_value], ), ] response = self.api.send_request( self.api.client.post, data=query_request_data, api_link="/query" ) self.assert200(response) self._assert_right_obj_found( response.json, "AssessmentTemplate", searched_tmpl_id, ) @ddt.data( {"obj_value": True, "filter_by_value": "yes"}, {"obj_value": False, "filter_by_value": "no"}, ) @ddt.unpack def test_sox_302_enabled_filter(self, obj_value, filter_by_value): """Test asmt could be filtered by sox_302_enabled field.""" with ggrc_factories.single_commit(): asmt = ggrc_factories.AssessmentFactory(sox_302_enabled=obj_value) ggrc_factories.AssessmentFactory(sox_302_enabled=(not obj_value)) searched_amst_id = asmt.id query_request_data = [ self._make_query_dict( "Assessment", expression=["SOX 302 assessment workflow", "=", filter_by_value], ), ] response = self.api.send_request( self.api.client.post, data=query_request_data, api_link="/query" ) self.assert200(response) self._assert_right_obj_found( response.json, "Assessment", searched_amst_id, ) @ddt.ddt class TestStatusFlowWithSOX302(BaseTestWithSOX302): """Test status flow for `WithSOX302Flow` objects.""" def setUp(self): """Set up for SOX 302 status flow test case.""" super(TestStatusFlowWithSOX302, self).setUp() self.api = api_helper.Api() def _assert_status_field(self, obj, expected_value): # type: (db.Model, bool) -> None """Assert that `status` field has expected value on object. For this assertion to pass, following conditions should be met: - Given object should be derived from `Statusable` mixin; - Value of `status` field on object should match `expected_value`. Args: obj (db.Model): Instance of db.Model class on which value of `status` flag should be checked. expected_value (bool): Expected value of `status` field on the given object. """ self.assertTrue(isinstance(obj, statusable.Statusable)) self.assertIsNotNone(obj) self.assertEqual( obj.status, expected_value, ) @staticmethod def _setup_local_custom_attributes(obj, cad_cav_pairs): # type: (db.Model, list) -> None """Setup custom attribute definitions and values for the given object. Create custom attribute definitions and and values for them from the given `cad_cav_pairs` list of string representations and attach them to `obj`. Args: obj (db.Model): Object for which custom attributes should be created. cad_cav_pairs (list): List containing string representation of custom attributes and their values in form: [("<Type>; <Title>; <Option1,...>; <Negative1,...>", <Value>),...] """ flag_enum = all_models.CustomAttributeDefinition.MultiChoiceMandatoryFlags for cad, cav in cad_cav_pairs: cad_type, cad_title, cad_options,
<reponame>jiosue/PythonGames # -*- coding: utf-8 -*- """ Robot Vacuum Cleaner """ from collections import deque import random import os if os.sys.version_info.major > 2: xrange = range import tkinter as tk else: import Tkinter as tk #### METHODS #### def scale_vector(vector, velocity): """ Create unit vector. Multiply each component of unit vector by the magnitude of the desired vector (velocity). """ try: x = float(vector[0])/((vector[0]**2+vector[1]**2)**.5) y = float(vector[1])/((vector[0]**2+vector[1]**2)**.5) return int(x*velocity), int(y*velocity) except ZeroDivisionError: return None, None def get_random_velocity(velocity): """ Create random direction vector. Scale direction vector with scale_vector method. """ vx, vy = None, None while vx == None and vy == None: vector = (random.random()*random.choice([-1, 1]), random.random()*random.choice([-1, 1])) vx, vy = scale_vector(vector, velocity) return vx, vy def make_grid(furniture, dimension): """ Scale actual (x, y) positions down to a grid (dictionary) with keys (Nx*1, Ny*1) where Nx and Ny range from 1 to dimension[0] and 1 to dimension[1] respectively. The keys are mapped to a boolean indicating whether that tile is occupied with furniture (True) or not (False). furniture: list with pixle locations. Each element ~ (x, y, x+dx, y+dy). dimension: tuple, x by y dimensions (x, y). returns: grid = {(1, 1): False, (2, 1): True, ...} """ #dx, dy are width and height of tiles. dx = furniture[0][2] - furniture[0][0] dy = furniture[0][3] - furniture[0][1] w, h = dx*dimension[0], dy*dimension[1] grid = {} for y in xrange(1, dimension[1]+1): for x in xrange(1, dimension[0]+1): grid[(x, y)] = False y_grid = 0 for y in xrange(dy//2, h, dy): y_grid += 1 x_grid = 0 for x in xrange(dx//2, w, dx): x_grid += 1 for element in furniture: if x >= element[0] and x <= element[2] \ and y >= element[1] and y <= element[3]: grid[(x_grid, y_grid)] = True break return grid def get_neighbors(position): """ Generator. Yields positions to the left, to the right, above, and below the current position. """ deltas = [(1, 0), (-1, 0), (0, 1), (0, -1)] for d in deltas: yield position[0]+d[0], position[1]+d[1] #def find_accessable_tiles_RECURSIVE(grid, position, l=set()): # """ # Finds all non-furniture locations that are accessable # when starting at position 'position'. # *** Mutates l *** # Assumes position is not at a point such that grid[position] == True. # In other words, the initial positions is valid and is not occupied. # grid: dict mapping a Grid to booleans (tiles with/without furniture). # i.e. grid = {(1, 1): False, (2, 1): True, ...} # position: tuple (x, y) # l: list # """ # l.add(position) # for n in get_neighbors(position): # if n in grid and n not in l and not grid[n]: # find_accessable_tiles(grid, n, l) # return l def find_accessable_tiles(grid, position): """ Finds all tiles that are accessable from starting position. Returns a set of all accessable tiles. """ accessable = set() accessable.add(position) tile_queue = deque() #imported from collections tile_queue.append(position) while tile_queue: current = tile_queue.popleft() for n in get_neighbors(current): if n in grid and n not in accessable and not grid[n]: accessable.add(n) tile_queue.append(n) return accessable def is_furniture_valid(furniture, dimension): """ Checks to see if all non-furniture tiles can be accessed when starting initially at position (1, 1). furniture: list of (x, y, x+dx, y+dy). dimension: tuple, x by y dimensions (x, y). """ if not furniture: #Rooms with no furniture are valid. return True grid = make_grid(furniture, dimension) #Start position is (1, 1). accessable_tiles = find_accessable_tiles(grid, (1, 1)) #Compare accessable tiles to all non-furniture tiles. for element in grid: #if a tile doesn't have furniture AND is not accessible - not valid. if not grid[element] and element not in accessable_tiles: return False return True #### OBJECT DEFINITIONS #### class Rumba(object): """ Dealing with the actual Rumba robot on the screen - red square. canvas: tk.Canvas object. position: tuple (x, y). width: int width of square. """ def __init__(self, canvas, position, width): self.can, self.width = canvas, width self.Draw(position) def Draw(self, position): x, y = position x1, y1 = x + self.width, y + self.width x2, y2 = x + self.width, y - self.width x3, y3 = x - self.width, y - self.width x4, y4 = x - self.width, y + self.width self.vacuum = self.can.create_polygon(x1, y1, x2, y2, x3, y3, x4, y4, fill="red") self.line1 = self.can.create_line(x1, y1, x2, y2, fill="black") self.line2 = self.can.create_line(x2, y2, x3, y3, fill="black") self.line3 = self.can.create_line(x3, y3, x4, y4, fill="black") self.line4 = self.can.create_line(x1, y1, x4, y4, fill="black") def update_position(self, new_position): x, y = new_position x1, y1 = x + self.width, y + self.width x2, y2 = x + self.width, y - self.width x3, y3 = x - self.width, y - self.width x4, y4 = x - self.width, y + self.width self.can.coords(self.vacuum, x1, y1, x2, y2, x3, y3, x4, y4) self.can.coords(self.line1, x1, y1, x2, y2) self.can.coords(self.line2, x2, y2, x3, y3) self.can.coords(self.line3, x3, y3, x4, y4) self.can.coords(self.line4, x1, y1, x4, y4) class Grid(object): """ The grid that the vacuum will clean. canvas: tk.Canvas object. dimension: tuple of number of tiles (x, y). screen: tuple of size of canvas (w, h). furniture: boolean - if room will have furniture. """ def __init__(self, canvas, dimension, screen, furniture=True): self.can, self.dimension = canvas, dimension self.w, self.h = screen self.create_tiles(furniture) def create_tiles(self, furniture): """ Finds a valid configuration of furniture and tiles. Then, calls self.draw_tiles to draw configuration. """ #dx, dy are width and height of tiles. dx, dy = self.w//self.dimension[0], self.h//self.dimension[1] #adjust screen size for discrepincies in forcing int divition. self.w, self.h = self.dimension[0]*dx, self.dimension[1]*dy self.can.config(width=self.w, height=self.h) valid = False while not valid: tiles, furniture_tiles = [], [] for y in xrange(0, self.h, dy): for x in xrange(0, self.w, dx): #(0, 0) is always a non-furniture tile. if not furniture or random.random() <= 0.8 or (x, y) == (0, 0): tiles.append((x, y, x+dx, y+dy)) else: furniture_tiles.append((x, y, x+dx, y+dy)) valid = is_furniture_valid(furniture_tiles, self.dimension) self.draw_tiles(tiles, furniture_tiles) def draw_tiles(self, tiles, furniture_tiles): """ Draws a configuration of furniture and tiles. tiles: list of position tuples, (x, y, x+dx, y+dy). furniture_tiles: same as tiles but only for furniture. """ self.furniture = furniture_tiles for element in self.furniture: x, y = element[0], element[1] dx, dy = element[2] - x, element[3] - y self.can.create_rectangle(x, y, x+dx, y+dy, fill="green") self.tiles = {} for element in tiles: x, y = element[0], element[1] dx, dy = element[2] - x, element[3] - y self.tiles[element] = [4, self.can.create_rectangle(x, y, x+dx, y+dy, fill="black")] def get_tile(self, position): x, y = position for element in self.tiles: if (x >= element[0] and x <= element[2] and y >= element[1] and y <= element[3]): return element def clean_tile(self, position): """ Takes 4 times to clean a tile. Usually, vacuum will clean 2 at a time though. *** On some screens, 'dark grey' is lighter than 'grey'. *** """ tile = self.get_tile(position) self.tiles[tile][0] -= 1 if self.tiles[tile][0] == 0: self.can.itemconfig(self.tiles[tile][1], fill="white") elif self.tiles[tile][0] == 1: self.can.itemconfig(self.tiles[tile][1], fill="light grey") elif self.tiles[tile][0] == 2: self.can.itemconfig(self.tiles[tile][1], fill="grey") elif self.tiles[tile][0] == 3: self.can.itemconfig(self.tiles[tile][1], fill="dark grey") def is_grid_cleaned(self): for element in self.tiles.values(): if element[0] > 0: return False return True def get_dimension(self): return self.dimension def get_grid_size(self): return (self.w, self.h) def get_furniture(self): return self.furniture class Robot(object): """ Completes the numerical simulation. grid: a Grid object. canvas: a tk.Canvas object. v: int speed of robot. """ def __init__(self, grid, canvas, v): self.grid = grid self.w, self.h = self.grid.get_grid_size() self.furniture = self.grid.get_furniture() self.v = v self.set_random_velocity() average_size = sum(self.grid.get_grid_size())/2 average_dimension = sum(self.grid.get_dimension())/2 self.robot_width = int((average_size/average_dimension)*0.3) #initial position self.x, self.y = self.robot_width, self.robot_width self.rumba = Rumba(canvas, (self.x, self.y), self.robot_width) def is_valid_position(self, position): x, y = position if x + self.robot_width >= self.w or x - self.robot_width <= 0: return False elif y + self.robot_width >= self.h or y - self.robot_width <= 0: return False for element in self.furniture: #element is of the form (x, y, x+dx, y+dy) if x >= element[0] and x <= element[2]: if y >= element[1] and y <=
self.halfedges f = H[:,1] vi = H[:,0] vj = H[H[:,2],0] if order: i = self.face_ordered_halfedges() f = f[i] vi = vi[i] vj = vj[i] else: i = np.where(H[:,1] >= 0)[0] f = f[i] vi = vi[i] vj = vj[i] if sort: i = np.argsort(f) vi = vi[i] vj = vj[i] return f, vi, vj def face_vertices_iterators(self): H = self.halfedges i = self.face_ordered_halfedges() vi = H[i,0] f = H[i,1] return f, vi def face_edges_iterators(self): H = self.halfedges i = self.face_ordered_halfedges() ei = H[i,5] f = H[i,1] return f, ei def edge_vertices_iterators(self, sort=False): H = self.halfedges e = H[:,5] vi = H[:,0] if sort: i = np.argsort(H[:,5]) e = e[i] vi = vi[i] return e, vi def vertex_double_ring_vertices_iterators(self): #import time #t0 = time.time() v, vj = self.vertex_ring_vertices_iterators(sort=True) M = coo_matrix((vj, (v, vj)), shape=(self.V, self.V)) M = M.todense() ring = np.copy(M) while v.shape[0] > 0: vi, j = np.unique(v, True) ring[vi] += M[vj[j]] v = np.delete(v, j) vj = np.delete(vj, j) #t4 = time.time() #print(t4-t0) return ring.nonzero() # ------------------------------------------------------------------------- # Ring lists # ------------------------------------------------------------------------- def vertex_ring_vertices_list(self): ring_list = [[] for i in range(self.V)] v, vj = self.vertex_ring_vertices_iterators(order=True) for i in range(len(v)): ring_list[v[i]].append(vj[i]) return ring_list def vertex_double_ring_vertices_list(self): ring_list = [[] for i in range(self.V)] v, vj = self.vertex_double_ring_vertices_iterators() for i in range(len(v)): ring_list[v[i]].append(vj[i]) return ring_list def vertex_ring_edges_list(self): ring_list = [[] for i in range(self.V)] v, ej = self.vertex_ring_edges_iterators(order=True) for i in range(len(v)): ring_list[v[i]].append(ej[i]) return ring_list def vertex_ring_faces_list(self): ring_list = [[] for i in range(self.V)] v, fj = self.vertex_ring_faces_iterators(order=True) for i in range(len(v)): ring_list[v[i]].append(fj[i]) return ring_list #-------------------------------------------------------------------------- # Faces #-------------------------------------------------------------------------- def face_lengths(self): H = self.halfedges f = H[H[:,1] >= 0,1] f = f[np.argsort(f)] i = np.ones((f.shape), 'i') lengths = utilities.sum_repeated(i, f) return lengths def cell_arrays(self): H = self.halfedges i = self.face_ordered_halfedges() vi = H[i,0] f = H[i,1] i = np.ones((f.shape[0]), 'i') j = np.arange(f.shape[0]) _, k = np.unique(f, True) lengths = utilities.sum_repeated(i, f) index = j[k] cells = np.insert(vi, index, lengths) cell_types = lengths - 3 cell_types[np.where(cell_types[:] > 2)[0]] = 2 return cells, cell_types def faces_list(self): faces_list = [[] for i in range(self.F)] fi, vj = self.face_vertices_iterators() for i in range(len(fi)): faces_list[fi[i]].append(vj[i]) return faces_list def face_triangles(self): H = np.copy(self.halfedges) h = np.argsort(H[:,1]) h = h[np.where(H[h,1] >= 0)] f = H[h,1] f_i, j = np.unique(f, True) one = np.arange(j.shape[0]) f = np.delete(f, j) f = np.delete(f, j-one) f = np.delete(f, j-2*one) T = np.column_stack((H[j,0], H[H[j,2],0], H[H[H[j,2],2],0])) nex = H[H[H[j,2],2],2] face_index = f_i offset = 0 while len(f) > 0: f_i, j = np.unique(f, True) T_i = np.column_stack((T[offset+f_i,-1], H[nex[f_i],0], T[f_i,0])) f = np.delete(f, j) nex = H[nex,2] T = np.vstack((T, T_i)) face_index = np.hstack((face_index, f_i)) offset += len(f_i) T = np.array(T,dtype=int) # Hui add return T, face_index # ------------------------------------------------------------------------- # Edges # ------------------------------------------------------------------------- def edge_vertices(self): H = self.halfedges v = H[np.argsort(H[:,5]),0] v1 = v[0::2] v2 = v[1::2] return v1, v2 def edge_faces(self): H = self.halfedges f = H[np.argsort(H[:,5]),1] f1 = f[0::2] f2 = f[1::2] return f1, f2 def vertices_edge_map(self): H = self.halfedges v1 = H[:,0] v2 = H[H[:,4],0] e = H[:,5] edge_map = coo_matrix((e, (v1,v2)), shape=(self.V, self.V)) edge_map = edge_map.tocsc() return edge_map def vertices_edge_faces_maps(self): H = self.halfedges v1 = H[:,0] v2 = H[H[:,4],0] f1 = H[:,1] f2 = H[H[:,4],1] f1Map = coo_matrix((f1, (v1,v2)), shape=(self.V, self.V)) f2Map = coo_matrix((f2, (v1,v2)), shape=(self.V, self.V)) f1Map = f1Map.tocsc() f2Map = f2Map.tocsc() return f1Map, f2Map def edge_halfedges(self): H = self.halfedges e = np.argsort(H[:,5]) h1 = e[0::2] h2 = e[1::2] return h1, h2 # ------------------------------------------------------------------------- # Boundary # ------------------------------------------------------------------------- def boundary_vertices(self): H = self.halfedges b = np.where(H[:,1] == -1)[0] v = H[b,0] return v def inner_halfedges(self): H = self.halfedges h = np.where(H[:,1] != -1)[0] return h def boundary_halfedges(self): H = self.halfedges b = np.where(H[:,1] == -1)[0] return b def boundary_faces(self): H = self.halfedges b = self.boundary_halfedges() f = H[H[b,4],1] return f # Hui : change f from e def inner_vertices(self): b = self.boundary_vertices() v = np.arange(self.V) mask = np.invert(np.in1d(v, b)) v = v[mask] return v def boundary_curves(self, corner_split=False): H = self.halfedges boundaries = [] boundary_halfedges = [] for h in range(H.shape[0]): if H[h,1] == -1 and h not in boundary_halfedges: boundary = [] h_he = h boundary_halfedges.append(h_he) boundary.append(H[h_he,0]) h_he = H[h_he,2] while h_he != h: boundary_halfedges.append(h_he) boundary.append(H[h_he,0]) h_he = H[h_he,2] boundaries.append(np.array(boundary)) if corner_split: corner_boundaries = [] corners = self.mesh_corners() for boundary in boundaries: indices = np.arange(len(boundary)) c = indices[np.in1d(boundary, corners)] boundary = np.split(boundary, c) for i in range(len(boundary) - 1): a = boundary[i] boundary[i] = np.insert(a, a.shape ,boundary[i+1][0]) if len(boundary) > 1: boundary[0] = np.hstack((boundary[-1], boundary[0])) del boundary[-1] corner_boundaries.extend(boundary) boundaries = corner_boundaries return boundaries def boundary_curves_halfedges(self, corner_split=False): H = self.halfedges boundaries = [] visited = [] for h in range(H.shape[0]): if H[h,1] == -1 and h not in visited: boundary = [] h_he = h boundary.append(h_he) h_he = H[h_he,2] while h_he != h: boundary.append(h_he) h_he = H[h_he,2] boundaries.append(np.array(boundary)) visited.extend(boundary) if corner_split: corner_boundaries = [] corners = self.mesh_corners() for boundary in boundaries: indices = np.arange(len(boundary)) c = indices[np.in1d(H[boundary,0], corners)] boundary = np.split(boundary, c) if len(boundary) > 1: boundary[0] = np.hstack((boundary[-1], boundary[0])) del boundary[-1] corner_boundaries.extend(boundary) boundaries = corner_boundaries return boundaries def boundary_polylines(self): polylines = [] curves = self.boundary_curves(corner_split=False) for curve in curves: polyline = Polyline(self.vertices[curve,:], closed=True) polyline.corner_tolerance = self.corner_tolerance polylines.append(polyline) return polylines def mesh_corners(self): H = self.halfedges b = np.where(H[:,1] == -1)[0] v0 = H[b,0] vp = H[H[b,3],0] vn = H[H[b,2],0] Vp = self.vertices[v0,:] - self.vertices[vp,:] Vn = self.vertices[vn,:] - self.vertices[v0,:] Vp = Vp / np.linalg.norm(Vp, axis=1, keepdims=True) Vn = Vn / np.linalg.norm(Vn, axis=1, keepdims=True) C = np.einsum('ij,ij->i', Vp, Vn) corners = v0[np.where(C[:] < self.corner_tolerance)[0]] return corners def double_boundary_vertices(self): bf = self.are_boundary_faces() v, fj = self.vertex_ring_faces_iterators(sort=True) bf = bf[fj] bf = utilities.sum_repeated(bf, v) bf = np.where(bf > 0)[0] return bf def boundary_edges(self): H = self.halfedges ind = np.where(H[:,1] == -1) e = H[ind,5] e = np.unique(e) return e # ------------------------------------------------------------------------- # Global queries # ------------------------------------------------------------------------- def are_boundary_edges(self): H = self.halfedges B = H[np.argsort(H[:,5])] B = B[:,1] == -1 bound = np.logical_or(B[0::2], B[1::2]) return bound def are_boundary_faces(self): H = self.halfedges f = np.where(H[:,1] != -1)[0] B = H[H[f,4],1] == -1 i = np.argsort(H[f,1]) bound = utilities.sum_repeated(B[i], H[f,1]) return bound # ------------------------------------------------------------------------- # Normals # ------------------------------------------------------------------------- def face_vector_areas(self): f, v1, v2 = self.face_edge_vertices_iterators(order=True) V1 = self.vertices[v1,:] V2 = self.vertices[v2,:] N = np.cross(V1,V2) normals = utilities.sum_repeated(N, f) return 0.5 * normals def face_normals(self): N = self.face_vector_areas() N = N / np.linalg.norm(N, axis=1, keepdims=True) return N def vertex_normals(self): "note: mean of neighbouring (not unit) face normals" N = self.face_vector_areas() v, fi = self.vertex_ring_faces_iterators(sort=True) N = N[fi,:] normals = utilities.sum_repeated(N, v) normals = normals / np.linalg.norm(normals, axis=1, keepdims=True) return normals def edge_normals(self): N = self.face_normals() N = np.insert(N, N.shape[0], 0, axis=0) f1, f2 = self.edge_faces() normals = N[f1] + N[f2] normals = normals / np.linalg.norm(normals, axis=1, keepdims=True) return normals def boundary_normals(self): H = self.halfedges b = np.where(H[:,1] == -1)[0] face_normals = self.face_normals() N1 = face_normals[H[H[b,4],1]] N2 = face_normals[H[H[H[b,3],4],1]] normals = np.zeros((self.V, 3)) E1 = self.vertices[H[H[b,2],0]] - self.vertices[H[b,0]] E2 = self.vertices[H[b,0]] - self.vertices[H[H[b,3],0]] N = np.cross(N1,E1) + np.cross(N2,E2) N = N / np.linalg.norm(N, axis=1, keepdims=True) normals[H[b,0],:] = N return normals def boundary_tangents(self, normalize=True): H = self.halfedges b = np.where(H[:,1] == -1)[0] V1 = self.vertices[H[H[b,3],0]] V2 = self.vertices[H[H[b,2],0]] T = (V2 - V1) if normalize: T = T / np.linalg.norm(T, keepdims=True) else: T = T/2 tangents = np.zeros((self.V, 3)) tangents[H[b,0],:] = T return tangents # ------------------------------------------------------------------------- # Area # ------------------------------------------------------------------------- def face_areas(self): N = self.face_vector_areas() A = np.linalg.norm(N, axis=1) return A def area(self): A = self.face_areas() A = np.sum(A) return A def vertex_ring_areas(self): L = self.face_lengths() A = self.face_areas() v, fi = self.vertex_ring_faces_iterators(sort=True) ring_area = A[fi]/L[fi] ring_area = utilities.sum_repeated(ring_area, v) return ring_area # ------------------------------------------------------------------------- # Closeness # ------------------------------------------------------------------------- def make_kdtree(self): kdtree = spatial.cKDTree(self.vertices) self._kdtree = kdtree def closest_vertices(self, points, make_tree=False): if self._kdtree is None: self.make_kdtree() elif make_tree: self.make_kdtree() closest = self._kdtree.query(points)[1] return closest # ------------------------------------------------------------------------- # Geometry # ------------------------------------------------------------------------- def edge_mid_points(self): v1,
""" The ``util`` module is used to contain utility functions and classes that have practical utility within the context of this application but which could conceivably be co-opted to serve in a different context for a different application if needed. As such, there are no hardcoded magic numbers or any such application-specific logic that might hinder their porting to a different package or program. """ __all__ = [ "JsonModelHTMLParser", "determine_system_language", "get_json_file", "has_rights", "is_fandom_wiki_base_url", "log_msg", "log_prompt", "pretty_print", "split_delimited_string_into_list" ] __author__ = "<NAME>" __version__ = "0.1" import ctypes import html.parser import json import locale import os import re import sys import urllib.parse class JsonModelHTMLParser(html.parser.HTMLParser): def __init__(self): """ The ``JsonModelHTMLParser`` class is a subclass extending the base ``html.parser`` ``HTMLParser`` class that is used for parsing HTML strings into ProseMirror-style ``jsonModel`` JSON objects used by Fandom's UCP Message Walls to represent user input. At present, the class only looks for ``<p>`` and ``<a>`` elements, though support is planned for bold, underline, and italic elements in future as this sort of text formatting is permissible in Message Wall thread content. """ html.parser.HTMLParser.__init__(self) # Temporary holding stacks for <p> and <a> HTML elements self.paragraphs_stack = [] self.links_stack = [] # Base jsonModel object self.json_model = { "type": "doc", "content": [] } def handle_starttag(self, tag, attrs): """ The ``handle_starttag`` function overrides its parent class's default implementation and accepts a pair of arguments, namely a string named ``tag`` representing the plaintext name of the HTML tag encountered (p, a, div, span, etc.) and a list named ``attrs`` containing key/value tuples that represent the element's attributes (href, title, etc.) The function checks for ``<p>`` and ``<a>`` elements. If the present tag is of the former, it adds a new paragraph node to the class instance's ``paragraphs_stack`` storage stack. If the present tag is a link, the function creates a new link node, populates its ``mark`` list with the link's ``href`` and ``title`` attributes, pushes the new node onto the class instance's ``links_stack`` storage stack, and likewise pushes the node onto the closest paragraph node's ``content`` list. :param tag: The plaintext string representation of the currently viewed tag (p, a, div, span, etc.) without the ``<>`` brackets :param attrs: A list of key/value tuples containing the attributes of the current HTML element :return: None """ if tag == "p": self.paragraphs_stack.append({ "type": "paragraph", "content": [] }) elif tag == "a": new_link = { "type": "text", "marks": [ { "type": "link", "attrs": {} } ] } # jsonModel only cares for href and title link attributes for attr in attrs: if attr[0] == "href": new_link["marks"][0]["attrs"]["href"] = attr[1] elif attr[0] == "title": new_link["marks"][0]["attrs"]["title"] = attr[1] self.links_stack.append(new_link) self.paragraphs_stack[-1]["content"].append(new_link) def handle_endtag(self, tag): """ The ``handle_endtag`` function overrides its parent class's default implementation and accepts a single argument, namely a string named ``tag`` representing the plaintext name of the HTML tag encountered (p, a, div, span, etc.). The function serves to indicate that the end of the current element has been reached. Within the context of the class and its intended use, this indicates that the "current" elements sitting at the end of the class's two helper stacks, ``paragraphs_stack`` and ``links_stack``, can be popped off. In the event of encountered ``</p>`` tags, the popped paragraph JSON object node is then added to and preserved int the master ``json_model`` object as part of the essential representational structure. :param tag: The plaintext string representation of the currently viewed tag (p, a, div, span, etc.) without the ``<>`` brackets :return: None """ if tag == "p": self.json_model["content"].append(self.paragraphs_stack.pop()) return elif tag == "a": self.links_stack.pop() def handle_data(self, data): """ The ``handle_data`` function overrides its parent class's default implementation and accepts a single argument, namely a string named ``data`` that represents the contents of the HTML element currently under inspection by the parser. If there is a link node on the appropriate ``links_stack``, the data belongs to that link, so it is added to that link object as the value of a key titled "text." However, if there are no extant link nodes and there is a paragraph node on the ``paragraphs_stack``, the data lies between ``<p>`` tags, so a new text object is created and added to current paragraph object's ``content`` list. :param data: A string constituting the plaintext contents of the currently inspected HTML tag :return: None """ if len(self.links_stack): self.links_stack[-1]["text"] = data elif len(self.paragraphs_stack): self.paragraphs_stack[-1]["content"].append({ "type": "text", "text": data }) def error(self, message): """ The ``handle_data`` function overrides its parent class's default implementation and accepts a single argument, namely a string named ``message`` describing the nature of the error encountered in the course of parsing the HTML. This method is not used in the class's usual use cases, and simply logs the parameter message in the console. :param message: A string describing the specific error encountered in the course of parsing the input HTML :return: None """ log_msg(message, True) def get_json_model_as_string(self): """ The ``get_json_model_as_string`` function is the only custom class method included in the ``JsonModelHTMLParser`` class. It serves simply to return the value of the class instance's ``json_model`` field, a JSON object representing the HTML input and layout, as a string. The purpose of this operation is related to the intended use of the string jsonModel within the context of the ``wikia.php`` endpoints as a required POST parameter. :return: The method returns the jsonModel existing as a member field of the class instance as a string for inclusion in POST requests made to the appropriate ``wikia.php`` endpoints """ return json.dumps(self.json_model, separators=(',', ':')) def determine_system_language(): """ The (admittedly janky) ``determine_system_language`` function is used to detect and determine the system language being used on the computer running the application. As this differs for Windows and UNIX-based operating systems, two approaches are used, though if the operating system is not "nt" (Windows) or "posix" (Linux/Mac OS), the language code "en" is returned by default for English. :return: A two-character string representing the abbreviation of the detected system language ("en" for "en_US" and "en_UK", etc.) """ if os.name == "nt": windll = ctypes.windll.kernel32.GetUserDefaultUILanguage() return locale.windows_locale[windll].split("_")[0] elif os.name == "posix": return locale.getdefaultlocale()[0].split("_")[0] else: return "en" def get_json_file(filename): """ The ``get_json_file`` function is a simple helper function that serves to open, load, and return the contents of the JSON file indicated in the input ``filename`` formal parameter. :param filename: A string indicating the location and name of the desired JSON file to open :return: The contents of the indicated JSON file are returned for subsequent usage """ with open(filename, "r") as jf: return json.load(jf) def has_rights(groups, permissible): """ The ``has_rights`` function is used to determine whether the user whose list of user rights is passed as the ``groups`` formal parameter possesses the required permissions necessary to undertake whatever restricted operation is intended. The list produced by ``re.findall`` is coerced into a boolean and returned as the returned status of the function. :param groups: A list of strings denoting the specific user rights groups to which the currently queried user belongs according to the MediaWiki API's ``list=users`` endpoint :param permissible: A list of strings denoting the target user rights against which to compare the user's usergroups for membership :return: A boolean denoting whether the user has the permissions necessary to undertake whatever operation is intended """ return bool(re.findall(rf'(?=({"|".join(permissible)}))', "|".join(groups))) def is_fandom_wiki_base_url(url): """ The ``is_fandom_wiki_base_url`` helper function is used to determine whether a given URL has a base URL address corresponding to one of the permissible Wikia/Fandom domains, namely, ``wikia.org`` and ``fandom.com``. The formal parameter, ``url``, is expected to be a base URL, and its subdomain (if any) is popped off prior to comparison. A boolean is returned as the return value indicating whether the domain of the parameter matches one of the Wikia/Fandom default domains. :param url: A string representing the desired URL for which the function will check its base address for compliance with a ``wikia.org`` or ``fandom.com`` domain. :return: A boolean representing whether the parameter url's base address
# MINLP written by GAMS Convert at 04/21/18 13:52:24 # # Equation counts # Total E G L N X C B # 1499 337 504 658 0 0 0 0 # # Variable counts # x b i s1s s2s sc si # Total cont binary integer sos1 sos2 scont sint # 1009 673 336 0 0 0 0 0 # FX 7 7 0 0 0 0 0 0 # # Nonzero counts # Total const NL DLL # 4423 3943 480 0 # # Reformulation has removed 1 variable and 1 equation from pyomo.environ import * model = m = ConcreteModel() m.b1 = Var(within=Binary,bounds=(0,1),initialize=0) m.b2 = Var(within=Binary,bounds=(0,1),initialize=0) m.b3 = Var(within=Binary,bounds=(0,1),initialize=0) m.b4 = Var(within=Binary,bounds=(0,1),initialize=0) m.b5 = Var(within=Binary,bounds=(0,1),initialize=0) m.b6 = Var(within=Binary,bounds=(0,1),initialize=0) m.b7 = Var(within=Binary,bounds=(0,1),initialize=0) m.b8 = Var(within=Binary,bounds=(0,1),initialize=0) m.b9 = Var(within=Binary,bounds=(0,1),initialize=0) m.b10 = Var(within=Binary,bounds=(0,1),initialize=0) m.b11 = Var(within=Binary,bounds=(0,1),initialize=0) m.b12 = Var(within=Binary,bounds=(0,1),initialize=0) m.b13 = Var(within=Binary,bounds=(0,1),initialize=0) m.b14 = Var(within=Binary,bounds=(0,1),initialize=0) m.b15 = Var(within=Binary,bounds=(0,1),initialize=0) m.b16 = Var(within=Binary,bounds=(0,1),initialize=0) m.b17 = Var(within=Binary,bounds=(0,1),initialize=0) m.b18 = Var(within=Binary,bounds=(0,1),initialize=0) m.b19 = Var(within=Binary,bounds=(0,1),initialize=0) m.b20 = Var(within=Binary,bounds=(0,1),initialize=0) m.b21 = Var(within=Binary,bounds=(0,1),initialize=0) m.b22 = Var(within=Binary,bounds=(0,1),initialize=0) m.b23 = Var(within=Binary,bounds=(0,1),initialize=0) m.b24 = Var(within=Binary,bounds=(0,1),initialize=0) m.b25 = Var(within=Binary,bounds=(0,1),initialize=0) m.b26 = Var(within=Binary,bounds=(0,1),initialize=0) m.b27 = Var(within=Binary,bounds=(0,1),initialize=0) m.b28 = Var(within=Binary,bounds=(0,1),initialize=0) m.b29 = Var(within=Binary,bounds=(0,1),initialize=0) m.b30 = Var(within=Binary,bounds=(0,1),initialize=0) m.b31 = Var(within=Binary,bounds=(0,1),initialize=0) m.b32 = Var(within=Binary,bounds=(0,1),initialize=0) m.b33 = Var(within=Binary,bounds=(0,1),initialize=0) m.b34 = Var(within=Binary,bounds=(0,1),initialize=0) m.b35 = Var(within=Binary,bounds=(0,1),initialize=0) m.b36 = Var(within=Binary,bounds=(0,1),initialize=0) m.b37 = Var(within=Binary,bounds=(0,1),initialize=0) m.b38 = Var(within=Binary,bounds=(0,1),initialize=0) m.b39 = Var(within=Binary,bounds=(0,1),initialize=0) m.b40 = Var(within=Binary,bounds=(0,1),initialize=0) m.b41 = Var(within=Binary,bounds=(0,1),initialize=0) m.b42 = Var(within=Binary,bounds=(0,1),initialize=0) m.b43 = Var(within=Binary,bounds=(0,1),initialize=0) m.b44 = Var(within=Binary,bounds=(0,1),initialize=0) m.b45 = Var(within=Binary,bounds=(0,1),initialize=0) m.b46 = Var(within=Binary,bounds=(0,1),initialize=0) m.b47 = Var(within=Binary,bounds=(0,1),initialize=0) m.b48 = Var(within=Binary,bounds=(0,1),initialize=0) m.b49 = Var(within=Binary,bounds=(0,1),initialize=0) m.b50 = Var(within=Binary,bounds=(0,1),initialize=0) m.b51 = Var(within=Binary,bounds=(0,1),initialize=0) m.b52 = Var(within=Binary,bounds=(0,1),initialize=0) m.b53 = Var(within=Binary,bounds=(0,1),initialize=0) m.b54 = Var(within=Binary,bounds=(0,1),initialize=0) m.b55 = Var(within=Binary,bounds=(0,1),initialize=0) m.b56 = Var(within=Binary,bounds=(0,1),initialize=0) m.b57 = Var(within=Binary,bounds=(0,1),initialize=0) m.b58 = Var(within=Binary,bounds=(0,1),initialize=0) m.b59 = Var(within=Binary,bounds=(0,1),initialize=0) m.b60 = Var(within=Binary,bounds=(0,1),initialize=0) m.b61 = Var(within=Binary,bounds=(0,1),initialize=0) m.b62 = Var(within=Binary,bounds=(0,1),initialize=0) m.b63 = Var(within=Binary,bounds=(0,1),initialize=0) m.b64 = Var(within=Binary,bounds=(0,1),initialize=0) m.b65 = Var(within=Binary,bounds=(0,1),initialize=0) m.b66 = Var(within=Binary,bounds=(0,1),initialize=0) m.b67 = Var(within=Binary,bounds=(0,1),initialize=0) m.b68 = Var(within=Binary,bounds=(0,1),initialize=0) m.b69 = Var(within=Binary,bounds=(0,1),initialize=0) m.b70 = Var(within=Binary,bounds=(0,1),initialize=0) m.b71 = Var(within=Binary,bounds=(0,1),initialize=0) m.b72 = Var(within=Binary,bounds=(0,1),initialize=0) m.b73 = Var(within=Binary,bounds=(0,1),initialize=0) m.b74 = Var(within=Binary,bounds=(0,1),initialize=0) m.b75 = Var(within=Binary,bounds=(0,1),initialize=0) m.b76 = Var(within=Binary,bounds=(0,1),initialize=0) m.b77 = Var(within=Binary,bounds=(0,1),initialize=0) m.b78 = Var(within=Binary,bounds=(0,1),initialize=0) m.b79 = Var(within=Binary,bounds=(0,1),initialize=0) m.b80 = Var(within=Binary,bounds=(0,1),initialize=0) m.b81 = Var(within=Binary,bounds=(0,1),initialize=0) m.b82 = Var(within=Binary,bounds=(0,1),initialize=0) m.b83 = Var(within=Binary,bounds=(0,1),initialize=0) m.b84 = Var(within=Binary,bounds=(0,1),initialize=0) m.b85 = Var(within=Binary,bounds=(0,1),initialize=0) m.b86 = Var(within=Binary,bounds=(0,1),initialize=0) m.b87 = Var(within=Binary,bounds=(0,1),initialize=0) m.b88 = Var(within=Binary,bounds=(0,1),initialize=0) m.b89 = Var(within=Binary,bounds=(0,1),initialize=0) m.b90 = Var(within=Binary,bounds=(0,1),initialize=0) m.b91 = Var(within=Binary,bounds=(0,1),initialize=0) m.b92 = Var(within=Binary,bounds=(0,1),initialize=0) m.b93 = Var(within=Binary,bounds=(0,1),initialize=0) m.b94 = Var(within=Binary,bounds=(0,1),initialize=0) m.b95 = Var(within=Binary,bounds=(0,1),initialize=0) m.b96 = Var(within=Binary,bounds=(0,1),initialize=0) m.b97 = Var(within=Binary,bounds=(0,1),initialize=0) m.b98 = Var(within=Binary,bounds=(0,1),initialize=0) m.b99 = Var(within=Binary,bounds=(0,1),initialize=0) m.b100 = Var(within=Binary,bounds=(0,1),initialize=0) m.b101 = Var(within=Binary,bounds=(0,1),initialize=0) m.b102 = Var(within=Binary,bounds=(0,1),initialize=0) m.b103 = Var(within=Binary,bounds=(0,1),initialize=0) m.b104 = Var(within=Binary,bounds=(0,1),initialize=0) m.b105 = Var(within=Binary,bounds=(0,1),initialize=0) m.b106 = Var(within=Binary,bounds=(0,1),initialize=0) m.b107 = Var(within=Binary,bounds=(0,1),initialize=0) m.b108 = Var(within=Binary,bounds=(0,1),initialize=0) m.b109 = Var(within=Binary,bounds=(0,1),initialize=0) m.b110 = Var(within=Binary,bounds=(0,1),initialize=0) m.b111 = Var(within=Binary,bounds=(0,1),initialize=0) m.b112 = Var(within=Binary,bounds=(0,1),initialize=0) m.b113 = Var(within=Binary,bounds=(0,1),initialize=0) m.b114 = Var(within=Binary,bounds=(0,1),initialize=0) m.b115 = Var(within=Binary,bounds=(0,1),initialize=0) m.b116 = Var(within=Binary,bounds=(0,1),initialize=0) m.b117 = Var(within=Binary,bounds=(0,1),initialize=0) m.b118 = Var(within=Binary,bounds=(0,1),initialize=0) m.b119 = Var(within=Binary,bounds=(0,1),initialize=0) m.b120 = Var(within=Binary,bounds=(0,1),initialize=0) m.b121 = Var(within=Binary,bounds=(0,1),initialize=0) m.b122 = Var(within=Binary,bounds=(0,1),initialize=0) m.b123 = Var(within=Binary,bounds=(0,1),initialize=0) m.b124 = Var(within=Binary,bounds=(0,1),initialize=0) m.b125 = Var(within=Binary,bounds=(0,1),initialize=0) m.b126 = Var(within=Binary,bounds=(0,1),initialize=0) m.b127 = Var(within=Binary,bounds=(0,1),initialize=0) m.b128 = Var(within=Binary,bounds=(0,1),initialize=0) m.b129 = Var(within=Binary,bounds=(0,1),initialize=0) m.b130 = Var(within=Binary,bounds=(0,1),initialize=0) m.b131 = Var(within=Binary,bounds=(0,1),initialize=0) m.b132 = Var(within=Binary,bounds=(0,1),initialize=0) m.b133 = Var(within=Binary,bounds=(0,1),initialize=0) m.b134 = Var(within=Binary,bounds=(0,1),initialize=0) m.b135 = Var(within=Binary,bounds=(0,1),initialize=0) m.b136 = Var(within=Binary,bounds=(0,1),initialize=0) m.b137 = Var(within=Binary,bounds=(0,1),initialize=0) m.b138 = Var(within=Binary,bounds=(0,1),initialize=0) m.b139 = Var(within=Binary,bounds=(0,1),initialize=0) m.b140 = Var(within=Binary,bounds=(0,1),initialize=0) m.b141 = Var(within=Binary,bounds=(0,1),initialize=0) m.b142 = Var(within=Binary,bounds=(0,1),initialize=0) m.b143 = Var(within=Binary,bounds=(0,1),initialize=0) m.b144 = Var(within=Binary,bounds=(0,1),initialize=0) m.b145 = Var(within=Binary,bounds=(0,1),initialize=0) m.b146 = Var(within=Binary,bounds=(0,1),initialize=0) m.b147 = Var(within=Binary,bounds=(0,1),initialize=0) m.b148 = Var(within=Binary,bounds=(0,1),initialize=0) m.b149 = Var(within=Binary,bounds=(0,1),initialize=0) m.b150 = Var(within=Binary,bounds=(0,1),initialize=0) m.b151 = Var(within=Binary,bounds=(0,1),initialize=0) m.b152 = Var(within=Binary,bounds=(0,1),initialize=0) m.b153 = Var(within=Binary,bounds=(0,1),initialize=0) m.b154 = Var(within=Binary,bounds=(0,1),initialize=0) m.b155 = Var(within=Binary,bounds=(0,1),initialize=0) m.b156 = Var(within=Binary,bounds=(0,1),initialize=0) m.b157 = Var(within=Binary,bounds=(0,1),initialize=0) m.b158 = Var(within=Binary,bounds=(0,1),initialize=0) m.b159 = Var(within=Binary,bounds=(0,1),initialize=0) m.b160 = Var(within=Binary,bounds=(0,1),initialize=0) m.b161 = Var(within=Binary,bounds=(0,1),initialize=0) m.b162 = Var(within=Binary,bounds=(0,1),initialize=0) m.b163 = Var(within=Binary,bounds=(0,1),initialize=0) m.b164 = Var(within=Binary,bounds=(0,1),initialize=0) m.b165 = Var(within=Binary,bounds=(0,1),initialize=0) m.b166 = Var(within=Binary,bounds=(0,1),initialize=0) m.b167 = Var(within=Binary,bounds=(0,1),initialize=0) m.b168 = Var(within=Binary,bounds=(0,1),initialize=0) m.b169 = Var(within=Binary,bounds=(0,1),initialize=0) m.b170 = Var(within=Binary,bounds=(0,1),initialize=0) m.b171 = Var(within=Binary,bounds=(0,1),initialize=0) m.b172 = Var(within=Binary,bounds=(0,1),initialize=0) m.b173 = Var(within=Binary,bounds=(0,1),initialize=0) m.b174 = Var(within=Binary,bounds=(0,1),initialize=0) m.b175 = Var(within=Binary,bounds=(0,1),initialize=0) m.b176 = Var(within=Binary,bounds=(0,1),initialize=0) m.b177 = Var(within=Binary,bounds=(0,1),initialize=0) m.b178 = Var(within=Binary,bounds=(0,1),initialize=0) m.b179 = Var(within=Binary,bounds=(0,1),initialize=0) m.b180 = Var(within=Binary,bounds=(0,1),initialize=0) m.b181 = Var(within=Binary,bounds=(0,1),initialize=0) m.b182 = Var(within=Binary,bounds=(0,1),initialize=0) m.b183 = Var(within=Binary,bounds=(0,1),initialize=0) m.b184 = Var(within=Binary,bounds=(0,1),initialize=0) m.b185 = Var(within=Binary,bounds=(0,1),initialize=0) m.b186 = Var(within=Binary,bounds=(0,1),initialize=0) m.b187 = Var(within=Binary,bounds=(0,1),initialize=0) m.b188 = Var(within=Binary,bounds=(0,1),initialize=0) m.b189 = Var(within=Binary,bounds=(0,1),initialize=0) m.b190 = Var(within=Binary,bounds=(0,1),initialize=0) m.b191 = Var(within=Binary,bounds=(0,1),initialize=0) m.b192 = Var(within=Binary,bounds=(0,1),initialize=0) m.b193 = Var(within=Binary,bounds=(0,1),initialize=0) m.b194 = Var(within=Binary,bounds=(0,1),initialize=0) m.b195 = Var(within=Binary,bounds=(0,1),initialize=0) m.b196 = Var(within=Binary,bounds=(0,1),initialize=0) m.b197 = Var(within=Binary,bounds=(0,1),initialize=0) m.b198 = Var(within=Binary,bounds=(0,1),initialize=0) m.b199 = Var(within=Binary,bounds=(0,1),initialize=0) m.b200 = Var(within=Binary,bounds=(0,1),initialize=0) m.b201 = Var(within=Binary,bounds=(0,1),initialize=0) m.b202 = Var(within=Binary,bounds=(0,1),initialize=0) m.b203 = Var(within=Binary,bounds=(0,1),initialize=0) m.b204 = Var(within=Binary,bounds=(0,1),initialize=0) m.b205 = Var(within=Binary,bounds=(0,1),initialize=0) m.b206 = Var(within=Binary,bounds=(0,1),initialize=0) m.b207 = Var(within=Binary,bounds=(0,1),initialize=0) m.b208 = Var(within=Binary,bounds=(0,1),initialize=0) m.b209 = Var(within=Binary,bounds=(0,1),initialize=0) m.b210 = Var(within=Binary,bounds=(0,1),initialize=0) m.b211 = Var(within=Binary,bounds=(0,1),initialize=0) m.b212 = Var(within=Binary,bounds=(0,1),initialize=0) m.b213 = Var(within=Binary,bounds=(0,1),initialize=0) m.b214 = Var(within=Binary,bounds=(0,1),initialize=0) m.b215 = Var(within=Binary,bounds=(0,1),initialize=0) m.b216 = Var(within=Binary,bounds=(0,1),initialize=0) m.b217 = Var(within=Binary,bounds=(0,1),initialize=0) m.b218 = Var(within=Binary,bounds=(0,1),initialize=0) m.b219 = Var(within=Binary,bounds=(0,1),initialize=0) m.b220 = Var(within=Binary,bounds=(0,1),initialize=0) m.b221 = Var(within=Binary,bounds=(0,1),initialize=0) m.b222 = Var(within=Binary,bounds=(0,1),initialize=0) m.b223 = Var(within=Binary,bounds=(0,1),initialize=0) m.b224 = Var(within=Binary,bounds=(0,1),initialize=0) m.b225 = Var(within=Binary,bounds=(0,1),initialize=0) m.b226 = Var(within=Binary,bounds=(0,1),initialize=0) m.b227 = Var(within=Binary,bounds=(0,1),initialize=0) m.b228 = Var(within=Binary,bounds=(0,1),initialize=0) m.b229 = Var(within=Binary,bounds=(0,1),initialize=0) m.b230 = Var(within=Binary,bounds=(0,1),initialize=0) m.b231 = Var(within=Binary,bounds=(0,1),initialize=0) m.b232 = Var(within=Binary,bounds=(0,1),initialize=0) m.b233 = Var(within=Binary,bounds=(0,1),initialize=0) m.b234 = Var(within=Binary,bounds=(0,1),initialize=0) m.b235 = Var(within=Binary,bounds=(0,1),initialize=0) m.b236 = Var(within=Binary,bounds=(0,1),initialize=0) m.b237 = Var(within=Binary,bounds=(0,1),initialize=0) m.b238 = Var(within=Binary,bounds=(0,1),initialize=0) m.b239 = Var(within=Binary,bounds=(0,1),initialize=0) m.b240 = Var(within=Binary,bounds=(0,1),initialize=0) m.b241 = Var(within=Binary,bounds=(0,1),initialize=0) m.b242 = Var(within=Binary,bounds=(0,1),initialize=0) m.b243 = Var(within=Binary,bounds=(0,1),initialize=0) m.b244 = Var(within=Binary,bounds=(0,1),initialize=0) m.b245 = Var(within=Binary,bounds=(0,1),initialize=0) m.b246 = Var(within=Binary,bounds=(0,1),initialize=0) m.b247 = Var(within=Binary,bounds=(0,1),initialize=0) m.b248 = Var(within=Binary,bounds=(0,1),initialize=0) m.b249 = Var(within=Binary,bounds=(0,1),initialize=0) m.b250 = Var(within=Binary,bounds=(0,1),initialize=0) m.b251 = Var(within=Binary,bounds=(0,1),initialize=0) m.b252 = Var(within=Binary,bounds=(0,1),initialize=0) m.b253 = Var(within=Binary,bounds=(0,1),initialize=0) m.b254 = Var(within=Binary,bounds=(0,1),initialize=0) m.b255 = Var(within=Binary,bounds=(0,1),initialize=0) m.b256 = Var(within=Binary,bounds=(0,1),initialize=0) m.b257 = Var(within=Binary,bounds=(0,1),initialize=0) m.b258 = Var(within=Binary,bounds=(0,1),initialize=0) m.b259 = Var(within=Binary,bounds=(0,1),initialize=0) m.b260 = Var(within=Binary,bounds=(0,1),initialize=0) m.b261 = Var(within=Binary,bounds=(0,1),initialize=0) m.b262 = Var(within=Binary,bounds=(0,1),initialize=0) m.b263 = Var(within=Binary,bounds=(0,1),initialize=0) m.b264 = Var(within=Binary,bounds=(0,1),initialize=0) m.b265 = Var(within=Binary,bounds=(0,1),initialize=0) m.b266 = Var(within=Binary,bounds=(0,1),initialize=0) m.b267 = Var(within=Binary,bounds=(0,1),initialize=0) m.b268 = Var(within=Binary,bounds=(0,1),initialize=0) m.b269 = Var(within=Binary,bounds=(0,1),initialize=0) m.b270 = Var(within=Binary,bounds=(0,1),initialize=0) m.b271 = Var(within=Binary,bounds=(0,1),initialize=0) m.b272 = Var(within=Binary,bounds=(0,1),initialize=0) m.b273 = Var(within=Binary,bounds=(0,1),initialize=0) m.b274 = Var(within=Binary,bounds=(0,1),initialize=0) m.b275 = Var(within=Binary,bounds=(0,1),initialize=0) m.b276 = Var(within=Binary,bounds=(0,1),initialize=0) m.b277 = Var(within=Binary,bounds=(0,1),initialize=0) m.b278 = Var(within=Binary,bounds=(0,1),initialize=0) m.b279 = Var(within=Binary,bounds=(0,1),initialize=0) m.b280 = Var(within=Binary,bounds=(0,1),initialize=0) m.b281 = Var(within=Binary,bounds=(0,1),initialize=0) m.b282 = Var(within=Binary,bounds=(0,1),initialize=0) m.b283 = Var(within=Binary,bounds=(0,1),initialize=0) m.b284 = Var(within=Binary,bounds=(0,1),initialize=0) m.b285 = Var(within=Binary,bounds=(0,1),initialize=0) m.b286 = Var(within=Binary,bounds=(0,1),initialize=0) m.b287 = Var(within=Binary,bounds=(0,1),initialize=0) m.b288 = Var(within=Binary,bounds=(0,1),initialize=0) m.b289 = Var(within=Binary,bounds=(0,1),initialize=0) m.b290 = Var(within=Binary,bounds=(0,1),initialize=0) m.b291 = Var(within=Binary,bounds=(0,1),initialize=0) m.b292 = Var(within=Binary,bounds=(0,1),initialize=0) m.b293 = Var(within=Binary,bounds=(0,1),initialize=0) m.b294 = Var(within=Binary,bounds=(0,1),initialize=0) m.b295 = Var(within=Binary,bounds=(0,1),initialize=0) m.b296 = Var(within=Binary,bounds=(0,1),initialize=0) m.b297 = Var(within=Binary,bounds=(0,1),initialize=0) m.b298 = Var(within=Binary,bounds=(0,1),initialize=0) m.b299 = Var(within=Binary,bounds=(0,1),initialize=0) m.b300 = Var(within=Binary,bounds=(0,1),initialize=0) m.b301 = Var(within=Binary,bounds=(0,1),initialize=0) m.b302 = Var(within=Binary,bounds=(0,1),initialize=0) m.b303 = Var(within=Binary,bounds=(0,1),initialize=0) m.b304 = Var(within=Binary,bounds=(0,1),initialize=0) m.b305 = Var(within=Binary,bounds=(0,1),initialize=0) m.b306 = Var(within=Binary,bounds=(0,1),initialize=0) m.b307 = Var(within=Binary,bounds=(0,1),initialize=0) m.b308 = Var(within=Binary,bounds=(0,1),initialize=0) m.b309 = Var(within=Binary,bounds=(0,1),initialize=0) m.b310 = Var(within=Binary,bounds=(0,1),initialize=0) m.b311 = Var(within=Binary,bounds=(0,1),initialize=0) m.b312 = Var(within=Binary,bounds=(0,1),initialize=0) m.b313 = Var(within=Binary,bounds=(0,1),initialize=0) m.b314 = Var(within=Binary,bounds=(0,1),initialize=0) m.b315 = Var(within=Binary,bounds=(0,1),initialize=0) m.b316 = Var(within=Binary,bounds=(0,1),initialize=0) m.b317 = Var(within=Binary,bounds=(0,1),initialize=0) m.b318 = Var(within=Binary,bounds=(0,1),initialize=0) m.b319 = Var(within=Binary,bounds=(0,1),initialize=0) m.b320 = Var(within=Binary,bounds=(0,1),initialize=0) m.b321 = Var(within=Binary,bounds=(0,1),initialize=0) m.b322 = Var(within=Binary,bounds=(0,1),initialize=0) m.b323 = Var(within=Binary,bounds=(0,1),initialize=0) m.b324 = Var(within=Binary,bounds=(0,1),initialize=0) m.b325 = Var(within=Binary,bounds=(0,1),initialize=0) m.b326 = Var(within=Binary,bounds=(0,1),initialize=0) m.b327 = Var(within=Binary,bounds=(0,1),initialize=0) m.b328 = Var(within=Binary,bounds=(0,1),initialize=0) m.b329 = Var(within=Binary,bounds=(0,1),initialize=0) m.b330 = Var(within=Binary,bounds=(0,1),initialize=0) m.b331 = Var(within=Binary,bounds=(0,1),initialize=0) m.b332 = Var(within=Binary,bounds=(0,1),initialize=0) m.b333 = Var(within=Binary,bounds=(0,1),initialize=0) m.b334 = Var(within=Binary,bounds=(0,1),initialize=0) m.b335 = Var(within=Binary,bounds=(0,1),initialize=0) m.b336 = Var(within=Binary,bounds=(0,1),initialize=0) m.x337 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x338 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x339 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x340 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x341 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x342 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x343 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x344 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x345 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x346 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x347 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x348 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x349 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x350 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x351 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x352 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x353 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x354 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x355 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x356 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x357 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x358 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x359 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x360 = Var(within=Reals,bounds=(0,188.08),initialize=0) m.x361 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x362 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x363 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x364 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x365 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x366 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x367 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x368 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x369 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x370 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x371 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x372 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x373 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x374 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x375 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x376 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x377 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x378 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x379 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x380 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x381 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x382 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x383 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x384 = Var(within=Reals,bounds=(0,237.14),initialize=0) m.x385 = Var(within=Reals,bounds=(0,60),initialize=0) m.x386 = Var(within=Reals,bounds=(0,60),initialize=0) m.x387 = Var(within=Reals,bounds=(0,60),initialize=0) m.x388 = Var(within=Reals,bounds=(0,60),initialize=0) m.x389 = Var(within=Reals,bounds=(0,60),initialize=0) m.x390 = Var(within=Reals,bounds=(0,60),initialize=0) m.x391 = Var(within=Reals,bounds=(0,60),initialize=0) m.x392 = Var(within=Reals,bounds=(0,60),initialize=0) m.x393 = Var(within=Reals,bounds=(0,60),initialize=0) m.x394 = Var(within=Reals,bounds=(0,60),initialize=0) m.x395 = Var(within=Reals,bounds=(0,60),initialize=0) m.x396 = Var(within=Reals,bounds=(0,60),initialize=0) m.x397 = Var(within=Reals,bounds=(0,60),initialize=0) m.x398 = Var(within=Reals,bounds=(0,60),initialize=0) m.x399 = Var(within=Reals,bounds=(0,60),initialize=0) m.x400 = Var(within=Reals,bounds=(0,60),initialize=0) m.x401 = Var(within=Reals,bounds=(0,60),initialize=0) m.x402 = Var(within=Reals,bounds=(0,60),initialize=0) m.x403 = Var(within=Reals,bounds=(0,60),initialize=0) m.x404 = Var(within=Reals,bounds=(0,60),initialize=0) m.x405 = Var(within=Reals,bounds=(0,60),initialize=0) m.x406 = Var(within=Reals,bounds=(0,60),initialize=0) m.x407 = Var(within=Reals,bounds=(0,60),initialize=0) m.x408 = Var(within=Reals,bounds=(0,60),initialize=0) m.x409 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x410 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x411 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x412 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x413 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x414 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x415 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x416 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x417 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x418 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x419 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x420 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x421 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x422 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x423 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x424 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x425 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x426 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x427 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x428 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x429 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x430 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x431 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x432 = Var(within=Reals,bounds=(0,185.99),initialize=0) m.x433 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x434 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x435 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x436 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x437 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x438 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x439 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x440 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x441 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x442 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x443 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x444 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x445 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x446 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x447 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x448 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x449 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x450 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x451 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x452 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x453 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x454 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x455 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x456 = Var(within=Reals,bounds=(0,201.02),initialize=0) m.x457 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x458 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x459 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x460 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x461 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x462 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x463 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x464 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x465 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x466 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x467 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x468 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x469 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x470 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x471 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x472 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x473 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x474 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x475 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x476 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x477 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x478 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x479 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x480 = Var(within=Reals,bounds=(0,134.02),initialize=0) m.x481 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x482 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x483 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x484 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x485 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x486 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x487 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x488 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x489 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x490 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x491 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x492 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x493 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x494 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x495 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x496 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x497 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x498 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x499 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x500 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x501 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x502 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x503 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x504 = Var(within=Reals,bounds=(0,117.01),initialize=0) m.x505 = Var(within=Reals,bounds=(0,None),initialize=0) m.x506 = Var(within=Reals,bounds=(0,None),initialize=0) m.x507 = Var(within=Reals,bounds=(0,None),initialize=0) m.x508 = Var(within=Reals,bounds=(0,None),initialize=0) m.x509 = Var(within=Reals,bounds=(0,None),initialize=0) m.x510 = Var(within=Reals,bounds=(0,None),initialize=0) m.x511 = Var(within=Reals,bounds=(0,None),initialize=0) m.x512 = Var(within=Reals,bounds=(0,None),initialize=0) m.x513 = Var(within=Reals,bounds=(0,None),initialize=0) m.x514 = Var(within=Reals,bounds=(0,None),initialize=0) m.x515 = Var(within=Reals,bounds=(0,None),initialize=0) m.x516 = Var(within=Reals,bounds=(0,None),initialize=0) m.x517 = Var(within=Reals,bounds=(0,None),initialize=0) m.x518 = Var(within=Reals,bounds=(0,None),initialize=0) m.x519 = Var(within=Reals,bounds=(0,None),initialize=0) m.x520 = Var(within=Reals,bounds=(0,None),initialize=0) m.x521 = Var(within=Reals,bounds=(0,None),initialize=0) m.x522 = Var(within=Reals,bounds=(0,None),initialize=0) m.x523 = Var(within=Reals,bounds=(0,None),initialize=0) m.x524 = Var(within=Reals,bounds=(0,None),initialize=0) m.x525 = Var(within=Reals,bounds=(0,None),initialize=0) m.x526 = Var(within=Reals,bounds=(0,None),initialize=0) m.x527 = Var(within=Reals,bounds=(0,None),initialize=0) m.x528 = Var(within=Reals,bounds=(0,None),initialize=0) m.x529 = Var(within=Reals,bounds=(0,None),initialize=0) m.x530 = Var(within=Reals,bounds=(0,None),initialize=0) m.x531 = Var(within=Reals,bounds=(0,None),initialize=0) m.x532 = Var(within=Reals,bounds=(0,None),initialize=0) m.x533 = Var(within=Reals,bounds=(0,None),initialize=0) m.x534 = Var(within=Reals,bounds=(0,None),initialize=0) m.x535 = Var(within=Reals,bounds=(0,None),initialize=0) m.x536 = Var(within=Reals,bounds=(0,None),initialize=0) m.x537 = Var(within=Reals,bounds=(0,None),initialize=0) m.x538 = Var(within=Reals,bounds=(0,None),initialize=0) m.x539 = Var(within=Reals,bounds=(0,None),initialize=0) m.x540 = Var(within=Reals,bounds=(0,None),initialize=0) m.x541 = Var(within=Reals,bounds=(0,None),initialize=0) m.x542 = Var(within=Reals,bounds=(0,None),initialize=0) m.x543 = Var(within=Reals,bounds=(0,None),initialize=0) m.x544 = Var(within=Reals,bounds=(0,None),initialize=0) m.x545 = Var(within=Reals,bounds=(0,None),initialize=0) m.x546 = Var(within=Reals,bounds=(0,None),initialize=0) m.x547 = Var(within=Reals,bounds=(0,None),initialize=0) m.x548 = Var(within=Reals,bounds=(0,None),initialize=0) m.x549 = Var(within=Reals,bounds=(0,None),initialize=0) m.x550 = Var(within=Reals,bounds=(0,None),initialize=0) m.x551 = Var(within=Reals,bounds=(0,None),initialize=0) m.x552 = Var(within=Reals,bounds=(0,None),initialize=0) m.x553 = Var(within=Reals,bounds=(0,None),initialize=0) m.x554 = Var(within=Reals,bounds=(0,None),initialize=0) m.x555 = Var(within=Reals,bounds=(0,None),initialize=0) m.x556 = Var(within=Reals,bounds=(0,None),initialize=0) m.x557 = Var(within=Reals,bounds=(0,None),initialize=0) m.x558 = Var(within=Reals,bounds=(0,None),initialize=0) m.x559 = Var(within=Reals,bounds=(0,None),initialize=0) m.x560 = Var(within=Reals,bounds=(0,None),initialize=0) m.x561 = Var(within=Reals,bounds=(0,None),initialize=0) m.x562 = Var(within=Reals,bounds=(0,None),initialize=0) m.x563 = Var(within=Reals,bounds=(0,None),initialize=0) m.x564 = Var(within=Reals,bounds=(0,None),initialize=0) m.x565 = Var(within=Reals,bounds=(0,None),initialize=0) m.x566 = Var(within=Reals,bounds=(0,None),initialize=0) m.x567 = Var(within=Reals,bounds=(0,None),initialize=0) m.x568 = Var(within=Reals,bounds=(0,None),initialize=0) m.x569 = Var(within=Reals,bounds=(0,None),initialize=0) m.x570 = Var(within=Reals,bounds=(0,None),initialize=0) m.x571 = Var(within=Reals,bounds=(0,None),initialize=0) m.x572 = Var(within=Reals,bounds=(0,None),initialize=0) m.x573 = Var(within=Reals,bounds=(0,None),initialize=0) m.x574 = Var(within=Reals,bounds=(0,None),initialize=0) m.x575 = Var(within=Reals,bounds=(0,None),initialize=0) m.x576 = Var(within=Reals,bounds=(0,None),initialize=0) m.x577 = Var(within=Reals,bounds=(0,None),initialize=0) m.x578 = Var(within=Reals,bounds=(0,None),initialize=0) m.x579 = Var(within=Reals,bounds=(0,None),initialize=0) m.x580 = Var(within=Reals,bounds=(0,None),initialize=0) m.x581 = Var(within=Reals,bounds=(0,None),initialize=0) m.x582 = Var(within=Reals,bounds=(0,None),initialize=0) m.x583 = Var(within=Reals,bounds=(0,None),initialize=0) m.x584 = Var(within=Reals,bounds=(0,None),initialize=0) m.x585 = Var(within=Reals,bounds=(0,None),initialize=0) m.x586 = Var(within=Reals,bounds=(0,None),initialize=0) m.x587 = Var(within=Reals,bounds=(0,None),initialize=0) m.x588 = Var(within=Reals,bounds=(0,None),initialize=0) m.x589 = Var(within=Reals,bounds=(0,None),initialize=0) m.x590 = Var(within=Reals,bounds=(0,None),initialize=0) m.x591 = Var(within=Reals,bounds=(0,None),initialize=0) m.x592 = Var(within=Reals,bounds=(0,None),initialize=0) m.x593 = Var(within=Reals,bounds=(0,None),initialize=0) m.x594 = Var(within=Reals,bounds=(0,None),initialize=0) m.x595 = Var(within=Reals,bounds=(0,None),initialize=0) m.x596