code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6): """ Optimise value of x using levenberg marquardt """ if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)
Optimise value of x using levenberg marquardt
Below is the the instruction that describes the task: ### Input: Optimise value of x using levenberg marquardt ### Response: def levenberg_marquardt(self, start_x=None, damping=1.0e-3, tolerance=1.0e-6): """ Optimise value of x using levenberg marquardt """ if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_levenberg_marquardt(start_x, self._a, self._c, tolerance)
def write_name(self, name): """Writes a domain name to the packet""" try: # Find existing instance of this name in packet # index = self.names[name] except KeyError: # No record of this name already, so write it # out as normal, recording the location of the name # for future pointers to it. # self.names[name] = self.size parts = name.split('.') if parts[-1] == '': parts = parts[:-1] for part in parts: self.write_utf(part) self.write_byte(0) return # An index was found, so write a pointer to it # self.write_byte((index >> 8) | 0xC0) self.write_byte(index)
Writes a domain name to the packet
Below is the the instruction that describes the task: ### Input: Writes a domain name to the packet ### Response: def write_name(self, name): """Writes a domain name to the packet""" try: # Find existing instance of this name in packet # index = self.names[name] except KeyError: # No record of this name already, so write it # out as normal, recording the location of the name # for future pointers to it. # self.names[name] = self.size parts = name.split('.') if parts[-1] == '': parts = parts[:-1] for part in parts: self.write_utf(part) self.write_byte(0) return # An index was found, so write a pointer to it # self.write_byte((index >> 8) | 0xC0) self.write_byte(index)
def _check_with_label(self, selector, checked, locator=None, allow_label_click=None, visible=None, wait=None, **kwargs): """ Args: selector (str): The selector for the type of element that should be checked/unchecked. checked (bool): Whether the element should be checked. locator (str, optional): Which element to check. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. visible (bool | str, optional): The desired element visibility. Defaults to :data:`capybara.ignore_hidden_elements`. wait (int | float, optional): The number of seconds to wait to check the element. Defaults to :data:`capybara.default_max_wait_time`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ if allow_label_click is None: allow_label_click = capybara.automatic_label_click @self.synchronize(wait=BaseQuery.normalize_wait(wait)) def check_with_label(): element = None try: element = self.find(selector, locator, visible=visible, **kwargs) element.set(checked) except Exception as e: if not allow_label_click or not self._should_catch_error(e): raise try: if not element: element = self.find(selector, locator, visible="all", **kwargs) label = self.find("label", field=element, visible=True) if element.checked != checked: label.click() except Exception: raise e check_with_label()
Args: selector (str): The selector for the type of element that should be checked/unchecked. checked (bool): Whether the element should be checked. locator (str, optional): Which element to check. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. visible (bool | str, optional): The desired element visibility. Defaults to :data:`capybara.ignore_hidden_elements`. wait (int | float, optional): The number of seconds to wait to check the element. Defaults to :data:`capybara.default_max_wait_time`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Below is the the instruction that describes the task: ### Input: Args: selector (str): The selector for the type of element that should be checked/unchecked. checked (bool): Whether the element should be checked. locator (str, optional): Which element to check. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. visible (bool | str, optional): The desired element visibility. Defaults to :data:`capybara.ignore_hidden_elements`. wait (int | float, optional): The number of seconds to wait to check the element. Defaults to :data:`capybara.default_max_wait_time`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. ### Response: def _check_with_label(self, selector, checked, locator=None, allow_label_click=None, visible=None, wait=None, **kwargs): """ Args: selector (str): The selector for the type of element that should be checked/unchecked. checked (bool): Whether the element should be checked. locator (str, optional): Which element to check. allow_label_click (bool, optional): Attempt to click the label to toggle state if element is non-visible. Defaults to :data:`capybara.automatic_label_click`. visible (bool | str, optional): The desired element visibility. Defaults to :data:`capybara.ignore_hidden_elements`. wait (int | float, optional): The number of seconds to wait to check the element. Defaults to :data:`capybara.default_max_wait_time`. **kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`. """ if allow_label_click is None: allow_label_click = capybara.automatic_label_click @self.synchronize(wait=BaseQuery.normalize_wait(wait)) def check_with_label(): element = None try: element = self.find(selector, locator, visible=visible, **kwargs) element.set(checked) except Exception as e: if not allow_label_click or not self._should_catch_error(e): raise try: if not element: element = self.find(selector, locator, visible="all", **kwargs) label = self.find("label", field=element, visible=True) if element.checked != checked: label.click() except Exception: raise e check_with_label()
def datetime(self): """ [datetime.datetime] 当前快照数据的时间戳 """ try: dt = self._tick_dict['datetime'] except (KeyError, ValueError): return datetime.datetime.min else: if not isinstance(dt, datetime.datetime): if dt > 10000000000000000: # ms return convert_ms_int_to_datetime(dt) else: return convert_int_to_datetime(dt) return dt
[datetime.datetime] 当前快照数据的时间戳
Below is the the instruction that describes the task: ### Input: [datetime.datetime] 当前快照数据的时间戳 ### Response: def datetime(self): """ [datetime.datetime] 当前快照数据的时间戳 """ try: dt = self._tick_dict['datetime'] except (KeyError, ValueError): return datetime.datetime.min else: if not isinstance(dt, datetime.datetime): if dt > 10000000000000000: # ms return convert_ms_int_to_datetime(dt) else: return convert_int_to_datetime(dt) return dt
def dispatch(self, request): """Takes a request and dispatches its data to a jsonrpc method. :param request: a werkzeug request with json data :type request: werkzeug.wrappers.Request :return: json output of the corresponding method :rtype: str .. versionadded:: 0.1.0 """ def _wrapped(): messages = self._get_request_messages(request) results = [self._dispatch_and_handle_errors(message) for message in messages] non_notification_results = [x for x in results if x is not None] if len(non_notification_results) == 0: return None elif len(messages) == 1: return non_notification_results[0] else: return non_notification_results result, _ = self._handle_exceptions(_wrapped) if result is not None: return self._encode_complete_result(result)
Takes a request and dispatches its data to a jsonrpc method. :param request: a werkzeug request with json data :type request: werkzeug.wrappers.Request :return: json output of the corresponding method :rtype: str .. versionadded:: 0.1.0
Below is the the instruction that describes the task: ### Input: Takes a request and dispatches its data to a jsonrpc method. :param request: a werkzeug request with json data :type request: werkzeug.wrappers.Request :return: json output of the corresponding method :rtype: str .. versionadded:: 0.1.0 ### Response: def dispatch(self, request): """Takes a request and dispatches its data to a jsonrpc method. :param request: a werkzeug request with json data :type request: werkzeug.wrappers.Request :return: json output of the corresponding method :rtype: str .. versionadded:: 0.1.0 """ def _wrapped(): messages = self._get_request_messages(request) results = [self._dispatch_and_handle_errors(message) for message in messages] non_notification_results = [x for x in results if x is not None] if len(non_notification_results) == 0: return None elif len(messages) == 1: return non_notification_results[0] else: return non_notification_results result, _ = self._handle_exceptions(_wrapped) if result is not None: return self._encode_complete_result(result)
def inv_std_norm_cdf(x): """ Inverse cumulative standard Gaussian distribution Based on Winitzki, S. (2008) """ z = 2*x -1 ln1z2 = np.log(1-z**2) a = 8*(np.pi -3)/(3*np.pi*(4-np.pi)) b = 2/(np.pi * a) + ln1z2/2 inv_erf = np.sign(z) * np.sqrt( np.sqrt(b**2 - ln1z2/a) - b ) return np.sqrt(2) * inv_erf
Inverse cumulative standard Gaussian distribution Based on Winitzki, S. (2008)
Below is the the instruction that describes the task: ### Input: Inverse cumulative standard Gaussian distribution Based on Winitzki, S. (2008) ### Response: def inv_std_norm_cdf(x): """ Inverse cumulative standard Gaussian distribution Based on Winitzki, S. (2008) """ z = 2*x -1 ln1z2 = np.log(1-z**2) a = 8*(np.pi -3)/(3*np.pi*(4-np.pi)) b = 2/(np.pi * a) + ln1z2/2 inv_erf = np.sign(z) * np.sqrt( np.sqrt(b**2 - ln1z2/a) - b ) return np.sqrt(2) * inv_erf
def _wait_new_conf(self): """Ask the daemon to drop its configuration and wait for a new one This overrides the default method from GenericInterface :return: None """ with self.app.conf_lock: logger.warning("My master Arbiter wants me to wait for a new configuration.") self.app.cur_conf = {}
Ask the daemon to drop its configuration and wait for a new one This overrides the default method from GenericInterface :return: None
Below is the the instruction that describes the task: ### Input: Ask the daemon to drop its configuration and wait for a new one This overrides the default method from GenericInterface :return: None ### Response: def _wait_new_conf(self): """Ask the daemon to drop its configuration and wait for a new one This overrides the default method from GenericInterface :return: None """ with self.app.conf_lock: logger.warning("My master Arbiter wants me to wait for a new configuration.") self.app.cur_conf = {}
def plotsignal(self,fig=None,saveplot=True,folder=None,figformat='png',**kwargs): """ Plots TransitSignal Calls :func:`TransitSignal.plot`, saves to provided folder. :param fig: (optional) Argument for :func:`plotutils.setfig`. :param saveplot: (optional) Whether to save figure. :param folder: (optional) Folder to which to save plot :param figformat: (optional) Desired format for figure. :param **kwargs: Additional keyword arguments passed to :func:`TransitSignal.plot`. """ if folder is None: folder = self.folder self.trsig.plot(plot_trap=True,fig=fig,**kwargs) if saveplot: plt.savefig('%s/signal.%s' % (folder,figformat)) plt.close()
Plots TransitSignal Calls :func:`TransitSignal.plot`, saves to provided folder. :param fig: (optional) Argument for :func:`plotutils.setfig`. :param saveplot: (optional) Whether to save figure. :param folder: (optional) Folder to which to save plot :param figformat: (optional) Desired format for figure. :param **kwargs: Additional keyword arguments passed to :func:`TransitSignal.plot`.
Below is the the instruction that describes the task: ### Input: Plots TransitSignal Calls :func:`TransitSignal.plot`, saves to provided folder. :param fig: (optional) Argument for :func:`plotutils.setfig`. :param saveplot: (optional) Whether to save figure. :param folder: (optional) Folder to which to save plot :param figformat: (optional) Desired format for figure. :param **kwargs: Additional keyword arguments passed to :func:`TransitSignal.plot`. ### Response: def plotsignal(self,fig=None,saveplot=True,folder=None,figformat='png',**kwargs): """ Plots TransitSignal Calls :func:`TransitSignal.plot`, saves to provided folder. :param fig: (optional) Argument for :func:`plotutils.setfig`. :param saveplot: (optional) Whether to save figure. :param folder: (optional) Folder to which to save plot :param figformat: (optional) Desired format for figure. :param **kwargs: Additional keyword arguments passed to :func:`TransitSignal.plot`. """ if folder is None: folder = self.folder self.trsig.plot(plot_trap=True,fig=fig,**kwargs) if saveplot: plt.savefig('%s/signal.%s' % (folder,figformat)) plt.close()
def generate_static(self, path): """ This method generates a valid path to the public folder of the running project """ if not path: return "" if path[0] == '/': return "%s?v=%s" % (path, self.version) return "%s/%s?v=%s" % (self.static, path, self.version)
This method generates a valid path to the public folder of the running project
Below is the the instruction that describes the task: ### Input: This method generates a valid path to the public folder of the running project ### Response: def generate_static(self, path): """ This method generates a valid path to the public folder of the running project """ if not path: return "" if path[0] == '/': return "%s?v=%s" % (path, self.version) return "%s/%s?v=%s" % (self.static, path, self.version)
def detect_sv(align_bam, genome_build, dirs, config): """Detect structural variation from discordant aligned pairs. """ work_dir = utils.safe_makedir(os.path.join(dirs["work"], "structural")) pair_stats = shared.calc_paired_insert_stats(align_bam) fix_bam = remove_nopairs(align_bam, work_dir, config) tier2_align = tiered_alignment(fix_bam, "2", True, [], genome_build, pair_stats, work_dir, dirs, config) if tier2_align: tier3_align = tiered_alignment(tier2_align, "3", "Ex 1100", ["-t", "300"], genome_build, pair_stats, work_dir, dirs, config) if tier3_align: hydra_bps = hydra_breakpoints(tier3_align, pair_stats)
Detect structural variation from discordant aligned pairs.
Below is the the instruction that describes the task: ### Input: Detect structural variation from discordant aligned pairs. ### Response: def detect_sv(align_bam, genome_build, dirs, config): """Detect structural variation from discordant aligned pairs. """ work_dir = utils.safe_makedir(os.path.join(dirs["work"], "structural")) pair_stats = shared.calc_paired_insert_stats(align_bam) fix_bam = remove_nopairs(align_bam, work_dir, config) tier2_align = tiered_alignment(fix_bam, "2", True, [], genome_build, pair_stats, work_dir, dirs, config) if tier2_align: tier3_align = tiered_alignment(tier2_align, "3", "Ex 1100", ["-t", "300"], genome_build, pair_stats, work_dir, dirs, config) if tier3_align: hydra_bps = hydra_breakpoints(tier3_align, pair_stats)
def merge_data(lista_dfs_o_dict, keys_merge=None): """ Realiza y devuelve el merge de una lista de pandas DataFrame's (o bien de un diccionario de {key:pd.Dataframe}). Coge la primera y en ella va añadiendo el resto, de una en una. Seguramente no sea la mejor opción para realizar la fusión de los datos de distintos días, pero aguanta bien la superposición de muestras (mejor que hacer un concat(dataframes)). Aun cuando las DF's individuales estén localizadas (index con TZ), en ocasiones el merge aparece en UTC y requiere una operación de conversión a TZ: data_a_corregir.set_index(data_a_corregir.index.tz_convert(tz), inplace=True) :param lista_dfs_o_dict: :param keys_merge: (OPC) """ def _merge_lista(lista_dfs): if len(lista_dfs) == 2 and lista_dfs[0].index[-1] == lista_dfs[1].index[-1]: df0 = lista_dfs[0] df0.update(lista_dfs[1]) elif ((all([len(df_i) == 1 for df_i in lista_dfs])) or (type(lista_dfs[0].index.freq) is Day and len(lista_dfs_o_dict) > 2)): df0 = pd.DataFrame(pd.concat(lista_dfs)) if lista_dfs[0].index.freq and df0.index.freq is None: df0.index.freq = lista_dfs[0].index.freq else: df0 = lista_dfs[0] for df1 in lista_dfs[1:]: df0 = df0.combine_first(df1) return df0 if len(lista_dfs_o_dict) > 0 and type(lista_dfs_o_dict[0]) is dict: if keys_merge is None: keys_merge = lista_dfs_o_dict[0].keys() dict_merge = dict() for k in keys_merge: lista_k = sorted([d[k] for d in lista_dfs_o_dict if (d is not None) and (d[k] is not None)], key=lambda item: item.index[0]) try: dict_merge[k] = pdmerge_respeta_tz(_merge_lista, lista_k[0].index.tz, lista_k) except AttributeError: print('ERROR!') dict_merge[k] = pdmerge_respeta_tz(_merge_lista, None, lista_k) return dict_merge elif len(lista_dfs_o_dict) > 0: try: lista_merge = sorted(lista_dfs_o_dict, key=lambda item: item.index[0]) return pdmerge_respeta_tz(_merge_lista, lista_merge[0].index.tz, lista_merge) except AttributeError: lista_dfs_o_dict = [l for l in lista_dfs_o_dict if l is not None] if len(lista_dfs_o_dict) > 0: lista_merge = sorted(lista_dfs_o_dict, key=lambda item: item.index[0]) return pdmerge_respeta_tz(_merge_lista, lista_merge[0].index.tz, lista_merge) except TypeError as e: print(e, e.__class__) print(lista_dfs_o_dict) return None
Realiza y devuelve el merge de una lista de pandas DataFrame's (o bien de un diccionario de {key:pd.Dataframe}). Coge la primera y en ella va añadiendo el resto, de una en una. Seguramente no sea la mejor opción para realizar la fusión de los datos de distintos días, pero aguanta bien la superposición de muestras (mejor que hacer un concat(dataframes)). Aun cuando las DF's individuales estén localizadas (index con TZ), en ocasiones el merge aparece en UTC y requiere una operación de conversión a TZ: data_a_corregir.set_index(data_a_corregir.index.tz_convert(tz), inplace=True) :param lista_dfs_o_dict: :param keys_merge: (OPC)
Below is the the instruction that describes the task: ### Input: Realiza y devuelve el merge de una lista de pandas DataFrame's (o bien de un diccionario de {key:pd.Dataframe}). Coge la primera y en ella va añadiendo el resto, de una en una. Seguramente no sea la mejor opción para realizar la fusión de los datos de distintos días, pero aguanta bien la superposición de muestras (mejor que hacer un concat(dataframes)). Aun cuando las DF's individuales estén localizadas (index con TZ), en ocasiones el merge aparece en UTC y requiere una operación de conversión a TZ: data_a_corregir.set_index(data_a_corregir.index.tz_convert(tz), inplace=True) :param lista_dfs_o_dict: :param keys_merge: (OPC) ### Response: def merge_data(lista_dfs_o_dict, keys_merge=None): """ Realiza y devuelve el merge de una lista de pandas DataFrame's (o bien de un diccionario de {key:pd.Dataframe}). Coge la primera y en ella va añadiendo el resto, de una en una. Seguramente no sea la mejor opción para realizar la fusión de los datos de distintos días, pero aguanta bien la superposición de muestras (mejor que hacer un concat(dataframes)). Aun cuando las DF's individuales estén localizadas (index con TZ), en ocasiones el merge aparece en UTC y requiere una operación de conversión a TZ: data_a_corregir.set_index(data_a_corregir.index.tz_convert(tz), inplace=True) :param lista_dfs_o_dict: :param keys_merge: (OPC) """ def _merge_lista(lista_dfs): if len(lista_dfs) == 2 and lista_dfs[0].index[-1] == lista_dfs[1].index[-1]: df0 = lista_dfs[0] df0.update(lista_dfs[1]) elif ((all([len(df_i) == 1 for df_i in lista_dfs])) or (type(lista_dfs[0].index.freq) is Day and len(lista_dfs_o_dict) > 2)): df0 = pd.DataFrame(pd.concat(lista_dfs)) if lista_dfs[0].index.freq and df0.index.freq is None: df0.index.freq = lista_dfs[0].index.freq else: df0 = lista_dfs[0] for df1 in lista_dfs[1:]: df0 = df0.combine_first(df1) return df0 if len(lista_dfs_o_dict) > 0 and type(lista_dfs_o_dict[0]) is dict: if keys_merge is None: keys_merge = lista_dfs_o_dict[0].keys() dict_merge = dict() for k in keys_merge: lista_k = sorted([d[k] for d in lista_dfs_o_dict if (d is not None) and (d[k] is not None)], key=lambda item: item.index[0]) try: dict_merge[k] = pdmerge_respeta_tz(_merge_lista, lista_k[0].index.tz, lista_k) except AttributeError: print('ERROR!') dict_merge[k] = pdmerge_respeta_tz(_merge_lista, None, lista_k) return dict_merge elif len(lista_dfs_o_dict) > 0: try: lista_merge = sorted(lista_dfs_o_dict, key=lambda item: item.index[0]) return pdmerge_respeta_tz(_merge_lista, lista_merge[0].index.tz, lista_merge) except AttributeError: lista_dfs_o_dict = [l for l in lista_dfs_o_dict if l is not None] if len(lista_dfs_o_dict) > 0: lista_merge = sorted(lista_dfs_o_dict, key=lambda item: item.index[0]) return pdmerge_respeta_tz(_merge_lista, lista_merge[0].index.tz, lista_merge) except TypeError as e: print(e, e.__class__) print(lista_dfs_o_dict) return None
def _display_controls(self): """ Method to pretty print controls. """ def print_command(char, info): char += " " * (10 - len(char)) print("{}\t{}".format(char, info)) print("") print_command("Keys", "Command") print_command("q", "reset simulation") print_command("spacebar", "toggle gripper (open/close)") print_command("w-a-s-d", "move arm horizontally in x-y plane") print_command("r-f", "move arm vertically") print_command("z-x", "rotate arm about x-axis") print_command("t-g", "rotate arm about y-axis") print_command("c-v", "rotate arm about z-axis") print_command("ESC", "quit") print("")
Method to pretty print controls.
Below is the the instruction that describes the task: ### Input: Method to pretty print controls. ### Response: def _display_controls(self): """ Method to pretty print controls. """ def print_command(char, info): char += " " * (10 - len(char)) print("{}\t{}".format(char, info)) print("") print_command("Keys", "Command") print_command("q", "reset simulation") print_command("spacebar", "toggle gripper (open/close)") print_command("w-a-s-d", "move arm horizontally in x-y plane") print_command("r-f", "move arm vertically") print_command("z-x", "rotate arm about x-axis") print_command("t-g", "rotate arm about y-axis") print_command("c-v", "rotate arm about z-axis") print_command("ESC", "quit") print("")
def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, )
Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True
Below is the the instruction that describes the task: ### Input: Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ### Response: def set_restart_power_failure(enabled): ''' Set whether or not the computer will automatically restart after a power failure. :param bool enabled: True to enable, False to disable. "On" and "Off" are also acceptable values. Additionally you can pass 1 and 0 to represent True and False respectively :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' power.set_restart_power_failure True ''' state = salt.utils.mac_utils.validate_enabled(enabled) cmd = 'systemsetup -setrestartpowerfailure {0}'.format(state) salt.utils.mac_utils.execute_return_success(cmd) return salt.utils.mac_utils.confirm_updated( state, get_restart_power_failure, )
def remove(self, task_name): """ Removes an existing task directory. `task_name` Task name. Returns ``True`` if removal successful. """ try: task_dir = self._get_task_dir(task_name) shutil.rmtree(task_dir) return True except OSError: return False
Removes an existing task directory. `task_name` Task name. Returns ``True`` if removal successful.
Below is the the instruction that describes the task: ### Input: Removes an existing task directory. `task_name` Task name. Returns ``True`` if removal successful. ### Response: def remove(self, task_name): """ Removes an existing task directory. `task_name` Task name. Returns ``True`` if removal successful. """ try: task_dir = self._get_task_dir(task_name) shutil.rmtree(task_dir) return True except OSError: return False
def populate(self, deserialized_txs=None, filename=None, retry=None): """Populates the batch with unsaved model instances from a generator of deserialized objects. """ if not deserialized_txs: raise BatchError("Failed to populate batch. There are no objects to add.") self.filename = filename if not self.filename: raise BatchError("Invalid filename. Got None") try: for deserialized_tx in deserialized_txs: self.peek(deserialized_tx) self.objects.append(deserialized_tx.object) break for deserialized_tx in deserialized_txs: self.objects.append(deserialized_tx.object) except DeserializationError as e: raise BatchDeserializationError(e) from e except JSONFileError as e: raise BatchDeserializationError(e) from e
Populates the batch with unsaved model instances from a generator of deserialized objects.
Below is the the instruction that describes the task: ### Input: Populates the batch with unsaved model instances from a generator of deserialized objects. ### Response: def populate(self, deserialized_txs=None, filename=None, retry=None): """Populates the batch with unsaved model instances from a generator of deserialized objects. """ if not deserialized_txs: raise BatchError("Failed to populate batch. There are no objects to add.") self.filename = filename if not self.filename: raise BatchError("Invalid filename. Got None") try: for deserialized_tx in deserialized_txs: self.peek(deserialized_tx) self.objects.append(deserialized_tx.object) break for deserialized_tx in deserialized_txs: self.objects.append(deserialized_tx.object) except DeserializationError as e: raise BatchDeserializationError(e) from e except JSONFileError as e: raise BatchDeserializationError(e) from e
def compiler_version(): """ Return the version of the installed solc. """ version_info = subprocess.check_output(['solc', '--version']) match = re.search(b'^Version: ([0-9a-z.-]+)/', version_info, re.MULTILINE) if match: return match.group(1)
Return the version of the installed solc.
Below is the the instruction that describes the task: ### Input: Return the version of the installed solc. ### Response: def compiler_version(): """ Return the version of the installed solc. """ version_info = subprocess.check_output(['solc', '--version']) match = re.search(b'^Version: ([0-9a-z.-]+)/', version_info, re.MULTILINE) if match: return match.group(1)
def add_output(self, key, value, variable_type): """Dynamically add output to output_data dictionary to be written to DB later. This method provides an alternative and more dynamic way to create output variables in an App. Instead of storing the output data manually and writing all at once the data can be stored inline, when it is generated and then written before the App completes. .. code-block:: python :linenos: :lineno-start: 1 for color in ['blue', 'red', 'yellow']: tcex.playbook.add_output('app.colors', color, 'StringArray') tcex.playbook.write_output() # writes the output stored in output_data .. code-block:: json :linenos: :lineno-start: 1 { "my_color-String": { "key": "my_color", "type": "String", "value": "blue" }, "my_numbers-String": { "key": "my_numbers", "type": "String", "value": "seven" }, "my_numbers-StringArray": { "key": "my_numbers", "type": "StringArray", "value": ["seven", "five"] } } Args: key (string): The variable name to write to storage. value (any): The value to write to storage. variable_type (string): The variable type being written. """ index = '{}-{}'.format(key, variable_type) self.output_data.setdefault(index, {}) if value is None: return if variable_type in ['String', 'Binary', 'KeyValue', 'TCEntity', 'TCEnhancedEntity']: self.output_data[index] = {'key': key, 'type': variable_type, 'value': value} elif variable_type in [ 'StringArray', 'BinaryArray', 'KeyValueArray', 'TCEntityArray', 'TCEnhancedEntityArray', ]: self.output_data[index].setdefault('key', key) self.output_data[index].setdefault('type', variable_type) if isinstance(value, list): self.output_data[index].setdefault('value', []).extend(value) else: self.output_data[index].setdefault('value', []).append(value)
Dynamically add output to output_data dictionary to be written to DB later. This method provides an alternative and more dynamic way to create output variables in an App. Instead of storing the output data manually and writing all at once the data can be stored inline, when it is generated and then written before the App completes. .. code-block:: python :linenos: :lineno-start: 1 for color in ['blue', 'red', 'yellow']: tcex.playbook.add_output('app.colors', color, 'StringArray') tcex.playbook.write_output() # writes the output stored in output_data .. code-block:: json :linenos: :lineno-start: 1 { "my_color-String": { "key": "my_color", "type": "String", "value": "blue" }, "my_numbers-String": { "key": "my_numbers", "type": "String", "value": "seven" }, "my_numbers-StringArray": { "key": "my_numbers", "type": "StringArray", "value": ["seven", "five"] } } Args: key (string): The variable name to write to storage. value (any): The value to write to storage. variable_type (string): The variable type being written.
Below is the the instruction that describes the task: ### Input: Dynamically add output to output_data dictionary to be written to DB later. This method provides an alternative and more dynamic way to create output variables in an App. Instead of storing the output data manually and writing all at once the data can be stored inline, when it is generated and then written before the App completes. .. code-block:: python :linenos: :lineno-start: 1 for color in ['blue', 'red', 'yellow']: tcex.playbook.add_output('app.colors', color, 'StringArray') tcex.playbook.write_output() # writes the output stored in output_data .. code-block:: json :linenos: :lineno-start: 1 { "my_color-String": { "key": "my_color", "type": "String", "value": "blue" }, "my_numbers-String": { "key": "my_numbers", "type": "String", "value": "seven" }, "my_numbers-StringArray": { "key": "my_numbers", "type": "StringArray", "value": ["seven", "five"] } } Args: key (string): The variable name to write to storage. value (any): The value to write to storage. variable_type (string): The variable type being written. ### Response: def add_output(self, key, value, variable_type): """Dynamically add output to output_data dictionary to be written to DB later. This method provides an alternative and more dynamic way to create output variables in an App. Instead of storing the output data manually and writing all at once the data can be stored inline, when it is generated and then written before the App completes. .. code-block:: python :linenos: :lineno-start: 1 for color in ['blue', 'red', 'yellow']: tcex.playbook.add_output('app.colors', color, 'StringArray') tcex.playbook.write_output() # writes the output stored in output_data .. code-block:: json :linenos: :lineno-start: 1 { "my_color-String": { "key": "my_color", "type": "String", "value": "blue" }, "my_numbers-String": { "key": "my_numbers", "type": "String", "value": "seven" }, "my_numbers-StringArray": { "key": "my_numbers", "type": "StringArray", "value": ["seven", "five"] } } Args: key (string): The variable name to write to storage. value (any): The value to write to storage. variable_type (string): The variable type being written. """ index = '{}-{}'.format(key, variable_type) self.output_data.setdefault(index, {}) if value is None: return if variable_type in ['String', 'Binary', 'KeyValue', 'TCEntity', 'TCEnhancedEntity']: self.output_data[index] = {'key': key, 'type': variable_type, 'value': value} elif variable_type in [ 'StringArray', 'BinaryArray', 'KeyValueArray', 'TCEntityArray', 'TCEnhancedEntityArray', ]: self.output_data[index].setdefault('key', key) self.output_data[index].setdefault('type', variable_type) if isinstance(value, list): self.output_data[index].setdefault('value', []).extend(value) else: self.output_data[index].setdefault('value', []).append(value)
def deprecated_function(func, warning=DEPRECATED_FUNCTION_WARNING): """Adds a `DeprecationWarning` to a function Parameters ---------- func : `callable` the function to decorate with a `DeprecationWarning` warning : `str`, optional the warning to present Notes ----- The final warning message is formatted as ``warning.format(func)`` so you can use attribute references to the function itself. See the default message as an example. """ @wraps(func) def wrapped_func(*args, **kwargs): warnings.warn( DEPRECATED_FUNCTION_WARNING.format(func), category=DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapped_func
Adds a `DeprecationWarning` to a function Parameters ---------- func : `callable` the function to decorate with a `DeprecationWarning` warning : `str`, optional the warning to present Notes ----- The final warning message is formatted as ``warning.format(func)`` so you can use attribute references to the function itself. See the default message as an example.
Below is the the instruction that describes the task: ### Input: Adds a `DeprecationWarning` to a function Parameters ---------- func : `callable` the function to decorate with a `DeprecationWarning` warning : `str`, optional the warning to present Notes ----- The final warning message is formatted as ``warning.format(func)`` so you can use attribute references to the function itself. See the default message as an example. ### Response: def deprecated_function(func, warning=DEPRECATED_FUNCTION_WARNING): """Adds a `DeprecationWarning` to a function Parameters ---------- func : `callable` the function to decorate with a `DeprecationWarning` warning : `str`, optional the warning to present Notes ----- The final warning message is formatted as ``warning.format(func)`` so you can use attribute references to the function itself. See the default message as an example. """ @wraps(func) def wrapped_func(*args, **kwargs): warnings.warn( DEPRECATED_FUNCTION_WARNING.format(func), category=DeprecationWarning, stacklevel=2, ) return func(*args, **kwargs) return wrapped_func
def enable_global_annotations_decorator(flag = True, retrospective = True): """Enables or disables global annotation mode via decorators. See flag global_annotations_decorator. In contrast to setting the flag directly, this function provides a retrospective option. If retrospective is true, this will also affect already imported modules, not only future imports. """ global global_annotations_decorator global_annotations_decorator = flag if import_hook_enabled: _install_import_hook() if global_annotations_decorator and retrospective: _catch_up_global_annotations_decorator() return global_annotations_decorator
Enables or disables global annotation mode via decorators. See flag global_annotations_decorator. In contrast to setting the flag directly, this function provides a retrospective option. If retrospective is true, this will also affect already imported modules, not only future imports.
Below is the the instruction that describes the task: ### Input: Enables or disables global annotation mode via decorators. See flag global_annotations_decorator. In contrast to setting the flag directly, this function provides a retrospective option. If retrospective is true, this will also affect already imported modules, not only future imports. ### Response: def enable_global_annotations_decorator(flag = True, retrospective = True): """Enables or disables global annotation mode via decorators. See flag global_annotations_decorator. In contrast to setting the flag directly, this function provides a retrospective option. If retrospective is true, this will also affect already imported modules, not only future imports. """ global global_annotations_decorator global_annotations_decorator = flag if import_hook_enabled: _install_import_hook() if global_annotations_decorator and retrospective: _catch_up_global_annotations_decorator() return global_annotations_decorator
def get_mol_filename(self): '''Returns mol filename''' mol_filename = parsers.get_mol_filename(self.__chebi_id) if mol_filename is None: mol_filename = parsers.get_mol_filename(self.get_parent_id()) if mol_filename is None: for parent_or_child_id in self.__get_all_ids(): mol_filename = \ parsers.get_mol_filename(parent_or_child_id) if mol_filename is not None: break return mol_filename
Returns mol filename
Below is the the instruction that describes the task: ### Input: Returns mol filename ### Response: def get_mol_filename(self): '''Returns mol filename''' mol_filename = parsers.get_mol_filename(self.__chebi_id) if mol_filename is None: mol_filename = parsers.get_mol_filename(self.get_parent_id()) if mol_filename is None: for parent_or_child_id in self.__get_all_ids(): mol_filename = \ parsers.get_mol_filename(parent_or_child_id) if mol_filename is not None: break return mol_filename
def create_metadata_response(self, uri, http_method='GET', body=None, headers=None): """Create metadata response """ headers = { 'Content-Type': 'application/json' } return headers, json.dumps(self.claims), 200
Create metadata response
Below is the the instruction that describes the task: ### Input: Create metadata response ### Response: def create_metadata_response(self, uri, http_method='GET', body=None, headers=None): """Create metadata response """ headers = { 'Content-Type': 'application/json' } return headers, json.dumps(self.claims), 200
def logexception(self, logger=None): ''' calls exception method on a loger and prints the log to stdout logger is set in the cfg #Todo more details here on the cfg etc :param logger: :return: ''' traceback.print_exc() if logger: logger.exception('Unexpected runtime Error...') else: self.logger.exception('Unexpected runtime Error...')
calls exception method on a loger and prints the log to stdout logger is set in the cfg #Todo more details here on the cfg etc :param logger: :return:
Below is the the instruction that describes the task: ### Input: calls exception method on a loger and prints the log to stdout logger is set in the cfg #Todo more details here on the cfg etc :param logger: :return: ### Response: def logexception(self, logger=None): ''' calls exception method on a loger and prints the log to stdout logger is set in the cfg #Todo more details here on the cfg etc :param logger: :return: ''' traceback.print_exc() if logger: logger.exception('Unexpected runtime Error...') else: self.logger.exception('Unexpected runtime Error...')
def _handle_post(self): """ An OCSP POST request contains the DER encoded OCSP request in the HTTP request body. """ der = request.body.read() ocsp_request = self._parse_ocsp_request(der) return self._build_http_response(ocsp_request)
An OCSP POST request contains the DER encoded OCSP request in the HTTP request body.
Below is the the instruction that describes the task: ### Input: An OCSP POST request contains the DER encoded OCSP request in the HTTP request body. ### Response: def _handle_post(self): """ An OCSP POST request contains the DER encoded OCSP request in the HTTP request body. """ der = request.body.read() ocsp_request = self._parse_ocsp_request(der) return self._build_http_response(ocsp_request)
def aggregate_data(data, T=[0, inf], P=[0, inf], data_ranges=None, merge="overlap", feature_type=None, impute=False, scale_center=True): """ Aggregates molecular data for model training Parameters ---------- data: list density, cpt, and/or viscosity T: array desired min and max of temperature distribution P: array desired min and max of pressure distribution data_ranges: array desired min and max of property distribution(s) merge: str overlap or union, defaults to overlap. Merge type of property sets feature_type: str desired feature set, defaults to RDKit's 2D descriptor set Returns ----------- devmodel: dev_model obj returns dev_model object containing scale/center information, data summary, and the data frame """ data_files = [] for i, string in enumerate(data): data_files.append(load_data("%s_premodel.csv" % string)) if i == 0: merged = data_files[0] if i == 1: merged = pd.merge(data_files[0], data_files[1], sort=False, how='outer') elif i > 1: merged = pd.merge(merged, data_files[-1], sort=False, how='outer') if merge == "overlap": merged.dropna(inplace=True) # select state variable and data ranges merged = merged.loc[merged["Temperature, K"] < T[1]] merged = merged.loc[merged["Temperature, K"] > T[0]] merged = merged.loc[merged["Pressure, kPa"] < P[1]] merged = merged.loc[merged["Pressure, kPa"] > P[0]] for i in range(1, len(data) + 1): merged = merged[merged.iloc[:, -i] != 0] # avoid log(0) error if data_ranges: merged = merged[merged.iloc[:, -i] < data_ranges[::-1][i - 1][1]] merged = merged[merged.iloc[:, -i] > data_ranges[::-1][i - 1][0]] merged.reset_index(drop=True, inplace=True) # Create summary of dataset unique_salts = merged["smiles-cation"] + merged["smiles-anion"] unique_cations = repr(merged["smiles-cation"].unique()) unique_anions = repr(merged["smiles-anion"].unique()) actual_data_ranges = [] for i in range(1, len(data) + 3): actual_data_ranges.append("{} - {}".format( str(merged.iloc[:, -i].min()), str(merged.iloc[:, -i].max()))) a = np.array([len(unique_salts.unique()), unique_cations, unique_anions, len(unique_salts)]) a = np.concatenate((a, actual_data_ranges)) cols1 = ["Unique salts", "Cations", "Anions", "Total datapoints"] cols2 = ["Temperature range (K)", "Pressure range (kPa)"] cols = cols1 + data[::-1] + cols2 data_summary = pd.DataFrame(a, cols) # scale and center metaDf = merged.select_dtypes(include=["object"]) dataDf = merged.select_dtypes(include=[np.number]) cols = dataDf.columns.tolist() if impute: imp = Imputer(missing_values='NaN', strategy="median", axis=0) X = imp.fit_transform(dataDf) dataDf = pd.DataFrame(X, columns=cols) instance = StandardScaler() if scale_center: for i in range(1, len(data) + 1): dataDf.is_copy = False dataDf.iloc[:, -i] = dataDf.iloc[:, -i].apply(lambda x: log(float(x))) scaled_data = pd.DataFrame(instance. fit_transform(dataDf.iloc[:, :-len(data)]), columns=cols[:-len(data)]) df = pd.concat([scaled_data, dataDf.iloc[:, -len(data):], metaDf], axis=1) mean_std_of_coeffs = pd.DataFrame([instance.mean_, instance.scale_], columns=cols[:-len(data)]) else: instance.fit(dataDf.iloc[:, :-len(data)]) df = pd.concat([dataDf, metaDf], axis=1) mean_std_of_coeffs = pd.DataFrame([instance.mean_, instance.scale_], columns=cols[:-len(data)]) devmodel = dev_model(mean_std_of_coeffs, data_summary, df) return devmodel
Aggregates molecular data for model training Parameters ---------- data: list density, cpt, and/or viscosity T: array desired min and max of temperature distribution P: array desired min and max of pressure distribution data_ranges: array desired min and max of property distribution(s) merge: str overlap or union, defaults to overlap. Merge type of property sets feature_type: str desired feature set, defaults to RDKit's 2D descriptor set Returns ----------- devmodel: dev_model obj returns dev_model object containing scale/center information, data summary, and the data frame
Below is the the instruction that describes the task: ### Input: Aggregates molecular data for model training Parameters ---------- data: list density, cpt, and/or viscosity T: array desired min and max of temperature distribution P: array desired min and max of pressure distribution data_ranges: array desired min and max of property distribution(s) merge: str overlap or union, defaults to overlap. Merge type of property sets feature_type: str desired feature set, defaults to RDKit's 2D descriptor set Returns ----------- devmodel: dev_model obj returns dev_model object containing scale/center information, data summary, and the data frame ### Response: def aggregate_data(data, T=[0, inf], P=[0, inf], data_ranges=None, merge="overlap", feature_type=None, impute=False, scale_center=True): """ Aggregates molecular data for model training Parameters ---------- data: list density, cpt, and/or viscosity T: array desired min and max of temperature distribution P: array desired min and max of pressure distribution data_ranges: array desired min and max of property distribution(s) merge: str overlap or union, defaults to overlap. Merge type of property sets feature_type: str desired feature set, defaults to RDKit's 2D descriptor set Returns ----------- devmodel: dev_model obj returns dev_model object containing scale/center information, data summary, and the data frame """ data_files = [] for i, string in enumerate(data): data_files.append(load_data("%s_premodel.csv" % string)) if i == 0: merged = data_files[0] if i == 1: merged = pd.merge(data_files[0], data_files[1], sort=False, how='outer') elif i > 1: merged = pd.merge(merged, data_files[-1], sort=False, how='outer') if merge == "overlap": merged.dropna(inplace=True) # select state variable and data ranges merged = merged.loc[merged["Temperature, K"] < T[1]] merged = merged.loc[merged["Temperature, K"] > T[0]] merged = merged.loc[merged["Pressure, kPa"] < P[1]] merged = merged.loc[merged["Pressure, kPa"] > P[0]] for i in range(1, len(data) + 1): merged = merged[merged.iloc[:, -i] != 0] # avoid log(0) error if data_ranges: merged = merged[merged.iloc[:, -i] < data_ranges[::-1][i - 1][1]] merged = merged[merged.iloc[:, -i] > data_ranges[::-1][i - 1][0]] merged.reset_index(drop=True, inplace=True) # Create summary of dataset unique_salts = merged["smiles-cation"] + merged["smiles-anion"] unique_cations = repr(merged["smiles-cation"].unique()) unique_anions = repr(merged["smiles-anion"].unique()) actual_data_ranges = [] for i in range(1, len(data) + 3): actual_data_ranges.append("{} - {}".format( str(merged.iloc[:, -i].min()), str(merged.iloc[:, -i].max()))) a = np.array([len(unique_salts.unique()), unique_cations, unique_anions, len(unique_salts)]) a = np.concatenate((a, actual_data_ranges)) cols1 = ["Unique salts", "Cations", "Anions", "Total datapoints"] cols2 = ["Temperature range (K)", "Pressure range (kPa)"] cols = cols1 + data[::-1] + cols2 data_summary = pd.DataFrame(a, cols) # scale and center metaDf = merged.select_dtypes(include=["object"]) dataDf = merged.select_dtypes(include=[np.number]) cols = dataDf.columns.tolist() if impute: imp = Imputer(missing_values='NaN', strategy="median", axis=0) X = imp.fit_transform(dataDf) dataDf = pd.DataFrame(X, columns=cols) instance = StandardScaler() if scale_center: for i in range(1, len(data) + 1): dataDf.is_copy = False dataDf.iloc[:, -i] = dataDf.iloc[:, -i].apply(lambda x: log(float(x))) scaled_data = pd.DataFrame(instance. fit_transform(dataDf.iloc[:, :-len(data)]), columns=cols[:-len(data)]) df = pd.concat([scaled_data, dataDf.iloc[:, -len(data):], metaDf], axis=1) mean_std_of_coeffs = pd.DataFrame([instance.mean_, instance.scale_], columns=cols[:-len(data)]) else: instance.fit(dataDf.iloc[:, :-len(data)]) df = pd.concat([dataDf, metaDf], axis=1) mean_std_of_coeffs = pd.DataFrame([instance.mean_, instance.scale_], columns=cols[:-len(data)]) devmodel = dev_model(mean_std_of_coeffs, data_summary, df) return devmodel
def get_vnetwork_dvpgs_input_vcenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs") config = get_vnetwork_dvpgs input = ET.SubElement(get_vnetwork_dvpgs, "input") vcenter = ET.SubElement(input, "vcenter") vcenter.text = kwargs.pop('vcenter') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
Below is the the instruction that describes the task: ### Input: Auto Generated Code ### Response: def get_vnetwork_dvpgs_input_vcenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_dvpgs = ET.Element("get_vnetwork_dvpgs") config = get_vnetwork_dvpgs input = ET.SubElement(get_vnetwork_dvpgs, "input") vcenter = ET.SubElement(input, "vcenter") vcenter.text = kwargs.pop('vcenter') callback = kwargs.pop('callback', self._callback) return callback(config)
def _augment_network(self, edge_dict): """Given a dictionary of edges (edge id -> feature list), add all of these to the CoNetwork object """ for (vertex0, vertex1), feature_list in edge_dict.items(): edge_obj = Edge(vertex0, vertex1, feature_list) self.edges[(vertex0, vertex1)] = edge_obj self._add_edge_to_vertex(vertex0, edge_obj) self._add_edge_to_vertex(vertex1, edge_obj)
Given a dictionary of edges (edge id -> feature list), add all of these to the CoNetwork object
Below is the the instruction that describes the task: ### Input: Given a dictionary of edges (edge id -> feature list), add all of these to the CoNetwork object ### Response: def _augment_network(self, edge_dict): """Given a dictionary of edges (edge id -> feature list), add all of these to the CoNetwork object """ for (vertex0, vertex1), feature_list in edge_dict.items(): edge_obj = Edge(vertex0, vertex1, feature_list) self.edges[(vertex0, vertex1)] = edge_obj self._add_edge_to_vertex(vertex0, edge_obj) self._add_edge_to_vertex(vertex1, edge_obj)
def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge.
Below is the the instruction that describes the task: ### Input: convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. ### Response: def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
def append(self, data, size): """ Append user-supplied data to chunk, return resulting chunk size. If the data would exceeded the available space, it is truncated. If you want to grow the chunk to accommodate new data, use the zchunk_extend method. """ return lib.zchunk_append(self._as_parameter_, data, size)
Append user-supplied data to chunk, return resulting chunk size. If the data would exceeded the available space, it is truncated. If you want to grow the chunk to accommodate new data, use the zchunk_extend method.
Below is the the instruction that describes the task: ### Input: Append user-supplied data to chunk, return resulting chunk size. If the data would exceeded the available space, it is truncated. If you want to grow the chunk to accommodate new data, use the zchunk_extend method. ### Response: def append(self, data, size): """ Append user-supplied data to chunk, return resulting chunk size. If the data would exceeded the available space, it is truncated. If you want to grow the chunk to accommodate new data, use the zchunk_extend method. """ return lib.zchunk_append(self._as_parameter_, data, size)
def run(self): """Required by flake8 Will be called after add_options and parse_options. Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation """ if len(self.filename_checks) == 0: message = "N401 no configuration found for {}, " \ "please provide filename configuration in a flake8 config".format(self.name) yield (0, 0, message, type(self)) rule_funcs = [rules.rule_n5xx] for rule_func in rule_funcs: for rule_name, configured_rule in self.filename_checks.items(): for err in rule_func(self.filename, rule_name, configured_rule, type(self)): yield err
Required by flake8 Will be called after add_options and parse_options. Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation
Below is the the instruction that describes the task: ### Input: Required by flake8 Will be called after add_options and parse_options. Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation ### Response: def run(self): """Required by flake8 Will be called after add_options and parse_options. Yields: tuple: (int, int, str, type) the tuple used by flake8 to construct a violation """ if len(self.filename_checks) == 0: message = "N401 no configuration found for {}, " \ "please provide filename configuration in a flake8 config".format(self.name) yield (0, 0, message, type(self)) rule_funcs = [rules.rule_n5xx] for rule_func in rule_funcs: for rule_name, configured_rule in self.filename_checks.items(): for err in rule_func(self.filename, rule_name, configured_rule, type(self)): yield err
def mf_aBl(self): """ These are the expected log likelihoods (node potentials) as seen from the discrete states. """ mf_aBl = self._mf_aBl = np.zeros((self.T, self.num_states)) ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \ self.emission_distns for idx, (d1, d2, d3) in enumerate(zip(ids, dds, eds)): mf_aBl[0,idx] = d1.expected_log_likelihood( stats=self.E_init_stats) mf_aBl[:-1,idx] += d2.expected_log_likelihood( stats=self.E_dynamics_stats) mf_aBl[:,idx] += d3.expected_log_likelihood( stats=self.E_emission_stats) mf_aBl[np.isnan(mf_aBl).any(1)] = 0. return mf_aBl
These are the expected log likelihoods (node potentials) as seen from the discrete states.
Below is the the instruction that describes the task: ### Input: These are the expected log likelihoods (node potentials) as seen from the discrete states. ### Response: def mf_aBl(self): """ These are the expected log likelihoods (node potentials) as seen from the discrete states. """ mf_aBl = self._mf_aBl = np.zeros((self.T, self.num_states)) ids, dds, eds = self.init_dynamics_distns, self.dynamics_distns, \ self.emission_distns for idx, (d1, d2, d3) in enumerate(zip(ids, dds, eds)): mf_aBl[0,idx] = d1.expected_log_likelihood( stats=self.E_init_stats) mf_aBl[:-1,idx] += d2.expected_log_likelihood( stats=self.E_dynamics_stats) mf_aBl[:,idx] += d3.expected_log_likelihood( stats=self.E_emission_stats) mf_aBl[np.isnan(mf_aBl).any(1)] = 0. return mf_aBl
def delete_panel(self, panel_obj): """Delete a panel by '_id'. Args: panel_obj(dict) Returns: res(pymongo.DeleteResult) """ res = self.panel_collection.delete_one({'_id': panel_obj['_id']}) LOG.warning("Deleting panel %s, version %s" % (panel_obj['panel_name'], panel_obj['version'])) return res
Delete a panel by '_id'. Args: panel_obj(dict) Returns: res(pymongo.DeleteResult)
Below is the the instruction that describes the task: ### Input: Delete a panel by '_id'. Args: panel_obj(dict) Returns: res(pymongo.DeleteResult) ### Response: def delete_panel(self, panel_obj): """Delete a panel by '_id'. Args: panel_obj(dict) Returns: res(pymongo.DeleteResult) """ res = self.panel_collection.delete_one({'_id': panel_obj['_id']}) LOG.warning("Deleting panel %s, version %s" % (panel_obj['panel_name'], panel_obj['version'])) return res
def get_response_content_type(self): """Figure out what content type will be used in the response.""" if self._best_response_match is None: settings = get_settings(self.application, force_instance=True) acceptable = headers.parse_accept( self.request.headers.get( 'Accept', settings.default_content_type if settings.default_content_type else '*/*')) try: selected, _ = algorithms.select_content_type( acceptable, settings.available_content_types) self._best_response_match = '/'.join( [selected.content_type, selected.content_subtype]) if selected.content_suffix is not None: self._best_response_match = '+'.join( [self._best_response_match, selected.content_suffix]) except errors.NoMatch: self._best_response_match = settings.default_content_type return self._best_response_match
Figure out what content type will be used in the response.
Below is the the instruction that describes the task: ### Input: Figure out what content type will be used in the response. ### Response: def get_response_content_type(self): """Figure out what content type will be used in the response.""" if self._best_response_match is None: settings = get_settings(self.application, force_instance=True) acceptable = headers.parse_accept( self.request.headers.get( 'Accept', settings.default_content_type if settings.default_content_type else '*/*')) try: selected, _ = algorithms.select_content_type( acceptable, settings.available_content_types) self._best_response_match = '/'.join( [selected.content_type, selected.content_subtype]) if selected.content_suffix is not None: self._best_response_match = '+'.join( [self._best_response_match, selected.content_suffix]) except errors.NoMatch: self._best_response_match = settings.default_content_type return self._best_response_match
def is_winding_consistent(self): """ Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge going in an opposite direction from the other in the pair. Returns -------- consistent : bool Is winding is consistent or not """ if self.is_empty: return False # consistent winding check is populated into the cache by is_watertight populate = self.is_watertight return self._cache['is_winding_consistent']
Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge going in an opposite direction from the other in the pair. Returns -------- consistent : bool Is winding is consistent or not
Below is the the instruction that describes the task: ### Input: Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge going in an opposite direction from the other in the pair. Returns -------- consistent : bool Is winding is consistent or not ### Response: def is_winding_consistent(self): """ Does the mesh have consistent winding or not. A mesh with consistent winding has each shared edge going in an opposite direction from the other in the pair. Returns -------- consistent : bool Is winding is consistent or not """ if self.is_empty: return False # consistent winding check is populated into the cache by is_watertight populate = self.is_watertight return self._cache['is_winding_consistent']
def update(cls, request_response_id, monetary_account_id=None, amount_responded=None, status=None, address_shipping=None, address_billing=None, custom_headers=None): """ Update the status to accept or reject the RequestResponse. :type user_id: int :type monetary_account_id: int :type request_response_id: int :param amount_responded: The Amount the user decides to pay. :type amount_responded: object_.Amount :param status: The responding status of the RequestResponse. Can be ACCEPTED or REJECTED. :type status: str :param address_shipping: The shipping Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to SHIPPING, BILLING_SHIPPING or OPTIONAL. :type address_shipping: object_.Address :param address_billing: The billing Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to BILLING, BILLING_SHIPPING or OPTIONAL. :type address_billing: object_.Address :type custom_headers: dict[str, str]|None :rtype: BunqResponseRequestResponse """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) request_map = { cls.FIELD_AMOUNT_RESPONDED: amount_responded, cls.FIELD_STATUS: status, cls.FIELD_ADDRESS_SHIPPING: address_shipping, cls.FIELD_ADDRESS_BILLING: address_billing } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), request_response_id) response_raw = api_client.put(endpoint_url, request_bytes, custom_headers) return BunqResponseRequestResponse.cast_from_bunq_response( cls._from_json(response_raw, cls._OBJECT_TYPE_PUT) )
Update the status to accept or reject the RequestResponse. :type user_id: int :type monetary_account_id: int :type request_response_id: int :param amount_responded: The Amount the user decides to pay. :type amount_responded: object_.Amount :param status: The responding status of the RequestResponse. Can be ACCEPTED or REJECTED. :type status: str :param address_shipping: The shipping Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to SHIPPING, BILLING_SHIPPING or OPTIONAL. :type address_shipping: object_.Address :param address_billing: The billing Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to BILLING, BILLING_SHIPPING or OPTIONAL. :type address_billing: object_.Address :type custom_headers: dict[str, str]|None :rtype: BunqResponseRequestResponse
Below is the the instruction that describes the task: ### Input: Update the status to accept or reject the RequestResponse. :type user_id: int :type monetary_account_id: int :type request_response_id: int :param amount_responded: The Amount the user decides to pay. :type amount_responded: object_.Amount :param status: The responding status of the RequestResponse. Can be ACCEPTED or REJECTED. :type status: str :param address_shipping: The shipping Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to SHIPPING, BILLING_SHIPPING or OPTIONAL. :type address_shipping: object_.Address :param address_billing: The billing Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to BILLING, BILLING_SHIPPING or OPTIONAL. :type address_billing: object_.Address :type custom_headers: dict[str, str]|None :rtype: BunqResponseRequestResponse ### Response: def update(cls, request_response_id, monetary_account_id=None, amount_responded=None, status=None, address_shipping=None, address_billing=None, custom_headers=None): """ Update the status to accept or reject the RequestResponse. :type user_id: int :type monetary_account_id: int :type request_response_id: int :param amount_responded: The Amount the user decides to pay. :type amount_responded: object_.Amount :param status: The responding status of the RequestResponse. Can be ACCEPTED or REJECTED. :type status: str :param address_shipping: The shipping Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to SHIPPING, BILLING_SHIPPING or OPTIONAL. :type address_shipping: object_.Address :param address_billing: The billing Address to return to the user who created the RequestInquiry. Should only be provided if 'require_address' is set to BILLING, BILLING_SHIPPING or OPTIONAL. :type address_billing: object_.Address :type custom_headers: dict[str, str]|None :rtype: BunqResponseRequestResponse """ if custom_headers is None: custom_headers = {} api_client = client.ApiClient(cls._get_api_context()) request_map = { cls.FIELD_AMOUNT_RESPONDED: amount_responded, cls.FIELD_STATUS: status, cls.FIELD_ADDRESS_SHIPPING: address_shipping, cls.FIELD_ADDRESS_BILLING: address_billing } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_UPDATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), request_response_id) response_raw = api_client.put(endpoint_url, request_bytes, custom_headers) return BunqResponseRequestResponse.cast_from_bunq_response( cls._from_json(response_raw, cls._OBJECT_TYPE_PUT) )
def network_sketch(ax, highlight=None, labels=True, yscaling=1.): ''' highlight : None or string if string, then only the label of this population is set and the box is highlighted ''' name_to_id_mapping={'L6E':(0,0), 'L6I':(0,1), 'L5E':(1,0), 'L5I':(1,1), 'L4E':(2,0), 'L4I':(2,1), 'L23E':(3,0), 'L23I':(3,1) } showgrid=False ## switch on/off grid ## sketch parameters layer_x=0.1 ## x position of left boundary of cortex layers layer6_y=0.2*yscaling ## y position of lower boundary of layer 6 layer_width=0.65 ## width of cortex layers layer_height=0.21*yscaling ## height of cortex layers layer_colors=['0.9','0.8','0.9','0.8'] ## layer colors c_pop_size=0.15 ## cortex population size c_pop_dist=0.17 ## distance between cortex populations t_pop_size=0.15 ## thalamus population size t_pop_y=0.0 ## y position of lower thalamus boundary axon_cell_sep=0.04 ## distance between axons and popualations cc_input_y=0.6*yscaling ## y position of cortico-cortical synapses (relative to cortex population) tc_input_y=0.4*yscaling ## y position of thalamo-cortical synapses (relative to cortex population) exc_clr = 'k' if analysis_params.bw else analysis_params.colorE ## color of excitatory axons/synapses inh_clr = 'gray' if analysis_params.bw else analysis_params.colorI ## color of inhibitory axons/synapses lw_pop=0.5 ## linewidth for populations lw_axons=0.4 ## linewidth for axons arrow_size=0.013 ## arrow size conn_radius=0.005 ## radius of connector marker legend_length=0.07 ## length of legend arrows colors = phlp.get_colors(8)[::-1] ## colors of each population fontdict1={'fontsize': 6, ## population name 'weight':'normal', 'horizontalalignment':'center', 'verticalalignment':'center'} fontdict2={'fontsize': 6, ## cortico-cortical input 'weight':'normal', 'horizontalalignment':'center', 'verticalalignment':'center'} fontdict3={'fontsize': 6, ## legend 'weight':'normal', 'horizontalalignment':'left', 'verticalalignment':'center'} ###################################################################################### def draw_box(ax,pos,lw=1.,ls='solid',eclr='k',fclr='w',zorder=0, clip_on=False, boxstyle=patches.BoxStyle("Round", pad=0.0), padadjust=0.): '''Draws a rectangle.''' rect = patches.FancyBboxPatch((pos[0]+padadjust, pos[1]+padadjust), pos[2]-2*padadjust, pos[3]-2*padadjust, ec=eclr, fc=fclr, lw=lw, ls=ls, zorder=zorder, clip_on=clip_on, boxstyle=boxstyle) ax.add_patch(rect) def draw_circle(ax,xy,radius,lw=1.,ls='solid',eclr='k',fclr='w',zorder=0): '''Draws a circle.''' circ = plt.Circle((xy[0],xy[1]),radius=radius, ec=eclr,fc=fclr,lw=lw,ls=ls,zorder=zorder) ax.add_patch(circ) def put_text(ax,xy,txt,clr,fontdict,zorder=10): '''Puts text to a specific position.''' ax.text(xy[0],xy[1],txt,fontdict=fontdict,color=clr,zorder=zorder) def draw_line(ax,path,lw=1.,ls='solid',lclr='k',zorder=0): '''Draws a path.''' #pth = path.Path(np.array(path)) pth = Path(np.array(path)) patch = patches.PathPatch(pth, fill=False, lw=lw,ls=ls,ec=lclr,fc=lclr,zorder=zorder) ax.add_patch(patch) def draw_arrow(ax,path,lw=1.0,ls='solid',lclr='k',arrow_size=0.025,zorder=0): '''Draws a path with an arrow at the end. ''' x=path[-2][0] y=path[-2][1] dx=path[-1][0]-path[-2][0] dy=path[-1][1]-path[-2][1] D=np.array([dx,dy]) D=D/np.sqrt(D[0]**2+D[1]**2) path2=np.array(path).copy() path2[-1,:]=path2[-1,:]-arrow_size*D pth = Path(np.array(path2)) patch = patches.PathPatch(pth, fill=False, lw=lw,ls=ls,ec=lclr,fc=lclr,zorder=zorder) ax.add_patch(patch) arr=patches.FancyArrow(\ x,y,dx,dy,\ length_includes_head=True,width=0.0,head_width=arrow_size,\ overhang=0.2,ec=lclr,fc=lclr,linewidth=0) ax.add_patch(arr) ################################################## ## populations ## cortex layer_pos=[] c_pop_pos=[] for i in xrange(4): ## cortex layers layer_pos+=[[layer_x,layer6_y+i*layer_height*yscaling,layer_width,layer_height]] ## layer positions draw_box(ax,layer_pos[i],lw=0.,fclr=layer_colors[i],zorder=0) ## cortex populations l_margin=(layer_width-2.*c_pop_size-c_pop_dist)/2. b_margin=(layer_height-c_pop_size)/2. ## positions of cortex populations c_pop_pos+=[[ [layer_pos[i][0] + l_margin, layer_pos[i][1] + b_margin, c_pop_size, c_pop_size], ## E [layer_pos[i][0] + l_margin + c_pop_size + c_pop_dist, layer_pos[i][1] + b_margin, c_pop_size, c_pop_size] ]] ## I #draw_box(ax,c_pop_pos[i][0],lw=lw_pop,eclr='k',fclr='w',zorder=2) ## E #draw_box(ax,c_pop_pos[i][1],lw=lw_pop,eclr='k',fclr='w',zorder=2) ## I draw_box(ax,c_pop_pos[i][0],lw=lw_pop,eclr='k',fclr=colors[i*2+1],zorder=2, boxstyle=patches.BoxStyle("Round", pad=0.02), padadjust=0.02) ## E draw_box(ax,c_pop_pos[i][1],lw=lw_pop,eclr='k',fclr=colors[i*2],zorder=2, boxstyle=patches.BoxStyle("Round", pad=0.02), padadjust=0.02) ## I ## thalamus c_center_x=layer_x+layer_width/2. ## x position of cortex center t_pos=[c_center_x-t_pop_size/2.,t_pop_y*yscaling,t_pop_size,t_pop_size] ## thalamus position #draw_box(ax,t_pos,lw=lw_pop,eclr='k',fclr='w',zorder=2) ## Th draw_box(ax,t_pos,lw=lw_pop,eclr='k',fclr='k',zorder=2, boxstyle=patches.BoxStyle("Round", pad=0.02), padadjust=0.02) ## Th ################################################## ## intracortical axons axon_x_dist=(c_pop_dist-2.*axon_cell_sep)/7. assert(axon_x_dist>0.) axon_y_dist=c_pop_size/9.#*yscaling c_axon_x=[] c_axon_y=[] # x positions of vertical intracortical axons for i in xrange(4): # pre layer exc=c_pop_pos[i][0][0]+c_pop_size+axon_cell_sep+i*axon_x_dist ## E inh=exc+4.*axon_x_dist ## I c_axon_x+=[[exc,inh]] # y positions of horizontal intracortical axons for i in xrange(4): ## post layer c_axon_y+=[[]] for j in xrange(4): ## pre layer exc=c_pop_pos[i][0][1]+(j+1.)*axon_y_dist ## E inh=c_pop_pos[i][0][1]+c_pop_size-(j+1.)*axon_y_dist ## I c_axon_y[i]+=[[exc,inh]] ## vertical intracortical axons for i in xrange(4): draw_line(ax,[[c_axon_x[i][0],c_axon_y[0][i][0]],[c_axon_x[i][0],c_axon_y[-1][i][0]]],lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) draw_line(ax,[[c_axon_x[i][1],c_axon_y[0][i][1]],[c_axon_x[i][1],c_axon_y[-1][i][1]]],lw=lw_axons,ls='solid',lclr=inh_clr,zorder=0) ## horizontal intracortical axons for i in xrange(4): ## post layer for j in xrange(4): ## pre layer path=[[c_axon_x[j][0],c_axon_y[i][j][0]],[c_pop_pos[i][0][0]+c_pop_size,c_axon_y[i][j][0]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) path=[[c_axon_x[j][0],c_axon_y[i][j][0]],[c_pop_pos[i][1][0],c_axon_y[i][j][0]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) path=[[c_axon_x[j][1],c_axon_y[i][j][1]],[c_pop_pos[i][1][0],c_axon_y[i][j][1]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,arrow_size=arrow_size,zorder=0) path=[[c_axon_x[j][1],c_axon_y[i][j][1]],[c_pop_pos[i][0][0]+c_pop_size,c_axon_y[i][j][1]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,arrow_size=arrow_size,zorder=0) ## connector markers draw_circle(ax,[c_axon_x[j][0],c_axon_y[i][j][0]],conn_radius,lw=0,fclr=exc_clr,zorder=0) draw_circle(ax,[c_axon_x[j][1],c_axon_y[i][j][1]],conn_radius,lw=0,fclr=inh_clr,zorder=0) ## cell outputs for i in xrange(4): path=[[c_pop_pos[i][0][0]+c_pop_size/2.,c_pop_pos[i][0][1]], [c_pop_pos[i][0][0]+c_pop_size/2.,c_pop_pos[i][0][1]-axon_y_dist], [c_axon_x[i][0],c_pop_pos[i][0][1]-axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## excitatory draw_circle(ax,path[-1],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[i][1][0]+c_pop_size/2.,c_pop_pos[i][1][1]], [c_pop_pos[i][1][0]+c_pop_size/2.,c_pop_pos[i][1][1]-axon_y_dist], [c_axon_x[-1-i][1],c_pop_pos[i][1][1]-axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,zorder=1) ## inhibitory draw_circle(ax,path[-1],conn_radius,lw=0,fclr=inh_clr,zorder=0) ## connector ## remaining first segments for L6 path=[[c_axon_x[0][0],c_pop_pos[0][0][1]-axon_y_dist],[c_axon_x[0][0],c_axon_y[0][0][0]]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=0) path=[[c_axon_x[-1][1],c_pop_pos[0][1][1]-axon_y_dist],[c_axon_x[-1][1],c_axon_y[0][0][1]]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,zorder=0) ################################################## ## cortico-cortical axons ## horizontal branch in L1 path=[[0.,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep], [c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## vertical branches path=[[c_pop_pos[-1][0][0]-axon_cell_sep,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep], [c_pop_pos[-1][0][0]-axon_cell_sep,c_pop_pos[0][0][1]+cc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## cc input to exc pop draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep], [c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[0][0][1]+cc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## cc input to inh pop draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector ## horizontal branches (arrows) for i in xrange(4): ## cc input to excitatory populations path=[[c_pop_pos[-1][0][0]-axon_cell_sep,c_pop_pos[i][0][1]+cc_input_y*c_pop_size], [c_pop_pos[-1][0][0],c_pop_pos[i][0][1]+cc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=0) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector ## cc input to inhibitory populations path=[[c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[i][0][1]+cc_input_y*c_pop_size], [c_pop_pos[-1][1][0]+c_pop_size,c_pop_pos[i][0][1]+cc_input_y*c_pop_size]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=0) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector ################################################## ## thalamo-cortical axons path=[[t_pos[0]+t_pop_size/2.,t_pos[1]+t_pop_size], [t_pos[0]+t_pop_size/2.,t_pos[1]+t_pop_size+axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## thalamic output draw_circle(ax,path[-1],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist], [c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## horizontal branch path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist], [c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## left vertical branch path=[[c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist], [c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## right vertical branch path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][0][0],c_pop_pos[2][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L4E synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),c_pop_pos[0][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][0][0],c_pop_pos[0][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L6E synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][1][0]+c_pop_size,c_pop_pos[2][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L4I synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),c_pop_pos[0][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][1][0]+c_pop_size,c_pop_pos[0][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L6I synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector if labels: ################################################## ## legend legend_x=[t_pos[0]+t_pop_size+axon_cell_sep,t_pos[0]+t_pop_size+axon_cell_sep+legend_length] legend_y=[t_pos[1],(t_pos[1]+2*t_pop_size/3)] draw_arrow(ax,[[legend_x[0],legend_y[1]],[legend_x[1],legend_y[1]]],lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) draw_arrow(ax,[[legend_x[0],legend_y[0]],[legend_x[1],legend_y[0]]],lw=lw_axons,ls='solid',lclr=inh_clr,arrow_size=arrow_size,zorder=1) ################################################## ## population names put_text(ax,[t_pos[0]+t_pop_size/2.,(t_pos[1]+t_pop_size/2.)],r'TC','w',fontdict1) put_text(ax,[c_pop_pos[0][0][0]+c_pop_size/2.,c_pop_pos[0][0][1]+c_pop_size/2.],r'L6E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[0][1][0]+c_pop_size/2.,c_pop_pos[0][1][1]+c_pop_size/2.],r'L6I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[1][0][0]+c_pop_size/2.,c_pop_pos[1][0][1]+c_pop_size/2.],r'L5E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[1][1][0]+c_pop_size/2.,c_pop_pos[1][1][1]+c_pop_size/2.],r'L5I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[2][0][0]+c_pop_size/2.,c_pop_pos[2][0][1]+c_pop_size/2.],r'L4E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[2][1][0]+c_pop_size/2.,c_pop_pos[2][1][1]+c_pop_size/2.],r'L4I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[3][0][0]+c_pop_size/2.,c_pop_pos[3][0][1]+c_pop_size/2.],r'L23E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[3][1][0]+c_pop_size/2.,c_pop_pos[3][1][1]+c_pop_size/2.],r'L23I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[-1][0][0], c_pop_pos[-1][0][1]+c_pop_size+1.7*axon_cell_sep + 0.01], r'cortico-cortical input','k',fontdict2) put_text(ax,[legend_x[1]+axon_y_dist,legend_y[1]],r'excitatory','k',fontdict3) put_text(ax,[legend_x[1]+axon_y_dist,legend_y[0]],r'inhibitory','k',fontdict3) ################################################## ## layer names put_text(ax,[0.2*c_pop_pos[0][0][0],c_pop_pos[0][1][1]+c_pop_size/2.],r'L6','k',fontdict1) put_text(ax,[0.2*c_pop_pos[1][0][0],c_pop_pos[1][1][1]+c_pop_size/2.],r'L5','k',fontdict1) put_text(ax,[0.2*c_pop_pos[2][0][0],c_pop_pos[2][1][1]+c_pop_size/2.],r'L4','k',fontdict1) put_text(ax,[0.2*c_pop_pos[3][0][0],c_pop_pos[3][1][1]+c_pop_size/2.],r'L2/3','k',fontdict1) if highlight is not None: ids = name_to_id_mapping[highlight] fontdict1['fontsize']=4 put_text(ax,[c_pop_pos[ids[0]][ids[1]][0]+c_pop_size/2.,c_pop_pos[ids[0]][ids[1]][1]+c_pop_size/2.],highlight,'k',fontdict1) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) ax.axis(ax.axis('equal')) return ax
highlight : None or string if string, then only the label of this population is set and the box is highlighted
Below is the the instruction that describes the task: ### Input: highlight : None or string if string, then only the label of this population is set and the box is highlighted ### Response: def network_sketch(ax, highlight=None, labels=True, yscaling=1.): ''' highlight : None or string if string, then only the label of this population is set and the box is highlighted ''' name_to_id_mapping={'L6E':(0,0), 'L6I':(0,1), 'L5E':(1,0), 'L5I':(1,1), 'L4E':(2,0), 'L4I':(2,1), 'L23E':(3,0), 'L23I':(3,1) } showgrid=False ## switch on/off grid ## sketch parameters layer_x=0.1 ## x position of left boundary of cortex layers layer6_y=0.2*yscaling ## y position of lower boundary of layer 6 layer_width=0.65 ## width of cortex layers layer_height=0.21*yscaling ## height of cortex layers layer_colors=['0.9','0.8','0.9','0.8'] ## layer colors c_pop_size=0.15 ## cortex population size c_pop_dist=0.17 ## distance between cortex populations t_pop_size=0.15 ## thalamus population size t_pop_y=0.0 ## y position of lower thalamus boundary axon_cell_sep=0.04 ## distance between axons and popualations cc_input_y=0.6*yscaling ## y position of cortico-cortical synapses (relative to cortex population) tc_input_y=0.4*yscaling ## y position of thalamo-cortical synapses (relative to cortex population) exc_clr = 'k' if analysis_params.bw else analysis_params.colorE ## color of excitatory axons/synapses inh_clr = 'gray' if analysis_params.bw else analysis_params.colorI ## color of inhibitory axons/synapses lw_pop=0.5 ## linewidth for populations lw_axons=0.4 ## linewidth for axons arrow_size=0.013 ## arrow size conn_radius=0.005 ## radius of connector marker legend_length=0.07 ## length of legend arrows colors = phlp.get_colors(8)[::-1] ## colors of each population fontdict1={'fontsize': 6, ## population name 'weight':'normal', 'horizontalalignment':'center', 'verticalalignment':'center'} fontdict2={'fontsize': 6, ## cortico-cortical input 'weight':'normal', 'horizontalalignment':'center', 'verticalalignment':'center'} fontdict3={'fontsize': 6, ## legend 'weight':'normal', 'horizontalalignment':'left', 'verticalalignment':'center'} ###################################################################################### def draw_box(ax,pos,lw=1.,ls='solid',eclr='k',fclr='w',zorder=0, clip_on=False, boxstyle=patches.BoxStyle("Round", pad=0.0), padadjust=0.): '''Draws a rectangle.''' rect = patches.FancyBboxPatch((pos[0]+padadjust, pos[1]+padadjust), pos[2]-2*padadjust, pos[3]-2*padadjust, ec=eclr, fc=fclr, lw=lw, ls=ls, zorder=zorder, clip_on=clip_on, boxstyle=boxstyle) ax.add_patch(rect) def draw_circle(ax,xy,radius,lw=1.,ls='solid',eclr='k',fclr='w',zorder=0): '''Draws a circle.''' circ = plt.Circle((xy[0],xy[1]),radius=radius, ec=eclr,fc=fclr,lw=lw,ls=ls,zorder=zorder) ax.add_patch(circ) def put_text(ax,xy,txt,clr,fontdict,zorder=10): '''Puts text to a specific position.''' ax.text(xy[0],xy[1],txt,fontdict=fontdict,color=clr,zorder=zorder) def draw_line(ax,path,lw=1.,ls='solid',lclr='k',zorder=0): '''Draws a path.''' #pth = path.Path(np.array(path)) pth = Path(np.array(path)) patch = patches.PathPatch(pth, fill=False, lw=lw,ls=ls,ec=lclr,fc=lclr,zorder=zorder) ax.add_patch(patch) def draw_arrow(ax,path,lw=1.0,ls='solid',lclr='k',arrow_size=0.025,zorder=0): '''Draws a path with an arrow at the end. ''' x=path[-2][0] y=path[-2][1] dx=path[-1][0]-path[-2][0] dy=path[-1][1]-path[-2][1] D=np.array([dx,dy]) D=D/np.sqrt(D[0]**2+D[1]**2) path2=np.array(path).copy() path2[-1,:]=path2[-1,:]-arrow_size*D pth = Path(np.array(path2)) patch = patches.PathPatch(pth, fill=False, lw=lw,ls=ls,ec=lclr,fc=lclr,zorder=zorder) ax.add_patch(patch) arr=patches.FancyArrow(\ x,y,dx,dy,\ length_includes_head=True,width=0.0,head_width=arrow_size,\ overhang=0.2,ec=lclr,fc=lclr,linewidth=0) ax.add_patch(arr) ################################################## ## populations ## cortex layer_pos=[] c_pop_pos=[] for i in xrange(4): ## cortex layers layer_pos+=[[layer_x,layer6_y+i*layer_height*yscaling,layer_width,layer_height]] ## layer positions draw_box(ax,layer_pos[i],lw=0.,fclr=layer_colors[i],zorder=0) ## cortex populations l_margin=(layer_width-2.*c_pop_size-c_pop_dist)/2. b_margin=(layer_height-c_pop_size)/2. ## positions of cortex populations c_pop_pos+=[[ [layer_pos[i][0] + l_margin, layer_pos[i][1] + b_margin, c_pop_size, c_pop_size], ## E [layer_pos[i][0] + l_margin + c_pop_size + c_pop_dist, layer_pos[i][1] + b_margin, c_pop_size, c_pop_size] ]] ## I #draw_box(ax,c_pop_pos[i][0],lw=lw_pop,eclr='k',fclr='w',zorder=2) ## E #draw_box(ax,c_pop_pos[i][1],lw=lw_pop,eclr='k',fclr='w',zorder=2) ## I draw_box(ax,c_pop_pos[i][0],lw=lw_pop,eclr='k',fclr=colors[i*2+1],zorder=2, boxstyle=patches.BoxStyle("Round", pad=0.02), padadjust=0.02) ## E draw_box(ax,c_pop_pos[i][1],lw=lw_pop,eclr='k',fclr=colors[i*2],zorder=2, boxstyle=patches.BoxStyle("Round", pad=0.02), padadjust=0.02) ## I ## thalamus c_center_x=layer_x+layer_width/2. ## x position of cortex center t_pos=[c_center_x-t_pop_size/2.,t_pop_y*yscaling,t_pop_size,t_pop_size] ## thalamus position #draw_box(ax,t_pos,lw=lw_pop,eclr='k',fclr='w',zorder=2) ## Th draw_box(ax,t_pos,lw=lw_pop,eclr='k',fclr='k',zorder=2, boxstyle=patches.BoxStyle("Round", pad=0.02), padadjust=0.02) ## Th ################################################## ## intracortical axons axon_x_dist=(c_pop_dist-2.*axon_cell_sep)/7. assert(axon_x_dist>0.) axon_y_dist=c_pop_size/9.#*yscaling c_axon_x=[] c_axon_y=[] # x positions of vertical intracortical axons for i in xrange(4): # pre layer exc=c_pop_pos[i][0][0]+c_pop_size+axon_cell_sep+i*axon_x_dist ## E inh=exc+4.*axon_x_dist ## I c_axon_x+=[[exc,inh]] # y positions of horizontal intracortical axons for i in xrange(4): ## post layer c_axon_y+=[[]] for j in xrange(4): ## pre layer exc=c_pop_pos[i][0][1]+(j+1.)*axon_y_dist ## E inh=c_pop_pos[i][0][1]+c_pop_size-(j+1.)*axon_y_dist ## I c_axon_y[i]+=[[exc,inh]] ## vertical intracortical axons for i in xrange(4): draw_line(ax,[[c_axon_x[i][0],c_axon_y[0][i][0]],[c_axon_x[i][0],c_axon_y[-1][i][0]]],lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) draw_line(ax,[[c_axon_x[i][1],c_axon_y[0][i][1]],[c_axon_x[i][1],c_axon_y[-1][i][1]]],lw=lw_axons,ls='solid',lclr=inh_clr,zorder=0) ## horizontal intracortical axons for i in xrange(4): ## post layer for j in xrange(4): ## pre layer path=[[c_axon_x[j][0],c_axon_y[i][j][0]],[c_pop_pos[i][0][0]+c_pop_size,c_axon_y[i][j][0]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) path=[[c_axon_x[j][0],c_axon_y[i][j][0]],[c_pop_pos[i][1][0],c_axon_y[i][j][0]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) path=[[c_axon_x[j][1],c_axon_y[i][j][1]],[c_pop_pos[i][1][0],c_axon_y[i][j][1]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,arrow_size=arrow_size,zorder=0) path=[[c_axon_x[j][1],c_axon_y[i][j][1]],[c_pop_pos[i][0][0]+c_pop_size,c_axon_y[i][j][1]]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,arrow_size=arrow_size,zorder=0) ## connector markers draw_circle(ax,[c_axon_x[j][0],c_axon_y[i][j][0]],conn_radius,lw=0,fclr=exc_clr,zorder=0) draw_circle(ax,[c_axon_x[j][1],c_axon_y[i][j][1]],conn_radius,lw=0,fclr=inh_clr,zorder=0) ## cell outputs for i in xrange(4): path=[[c_pop_pos[i][0][0]+c_pop_size/2.,c_pop_pos[i][0][1]], [c_pop_pos[i][0][0]+c_pop_size/2.,c_pop_pos[i][0][1]-axon_y_dist], [c_axon_x[i][0],c_pop_pos[i][0][1]-axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## excitatory draw_circle(ax,path[-1],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[i][1][0]+c_pop_size/2.,c_pop_pos[i][1][1]], [c_pop_pos[i][1][0]+c_pop_size/2.,c_pop_pos[i][1][1]-axon_y_dist], [c_axon_x[-1-i][1],c_pop_pos[i][1][1]-axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,zorder=1) ## inhibitory draw_circle(ax,path[-1],conn_radius,lw=0,fclr=inh_clr,zorder=0) ## connector ## remaining first segments for L6 path=[[c_axon_x[0][0],c_pop_pos[0][0][1]-axon_y_dist],[c_axon_x[0][0],c_axon_y[0][0][0]]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=0) path=[[c_axon_x[-1][1],c_pop_pos[0][1][1]-axon_y_dist],[c_axon_x[-1][1],c_axon_y[0][0][1]]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=inh_clr,zorder=0) ################################################## ## cortico-cortical axons ## horizontal branch in L1 path=[[0.,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep], [c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## vertical branches path=[[c_pop_pos[-1][0][0]-axon_cell_sep,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep], [c_pop_pos[-1][0][0]-axon_cell_sep,c_pop_pos[0][0][1]+cc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## cc input to exc pop draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[-1][0][1]+c_pop_size+axon_cell_sep], [c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[0][0][1]+cc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## cc input to inh pop draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector ## horizontal branches (arrows) for i in xrange(4): ## cc input to excitatory populations path=[[c_pop_pos[-1][0][0]-axon_cell_sep,c_pop_pos[i][0][1]+cc_input_y*c_pop_size], [c_pop_pos[-1][0][0],c_pop_pos[i][0][1]+cc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=0) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector ## cc input to inhibitory populations path=[[c_pop_pos[-1][1][0]+c_pop_size+axon_cell_sep,c_pop_pos[i][0][1]+cc_input_y*c_pop_size], [c_pop_pos[-1][1][0]+c_pop_size,c_pop_pos[i][0][1]+cc_input_y*c_pop_size]] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=0) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector ################################################## ## thalamo-cortical axons path=[[t_pos[0]+t_pop_size/2.,t_pos[1]+t_pop_size], [t_pos[0]+t_pop_size/2.,t_pos[1]+t_pop_size+axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## thalamic output draw_circle(ax,path[-1],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist], [c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## horizontal branch path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist], [c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## left vertical branch path=[[c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),t_pos[1]+t_pop_size+axon_y_dist], [c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size]] draw_line(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,zorder=1) ## right vertical branch path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][0][0],c_pop_pos[2][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L4E synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][0][0]-(axon_cell_sep+axon_y_dist),c_pop_pos[0][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][0][0],c_pop_pos[0][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L6E synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),c_pop_pos[2][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][1][0]+c_pop_size,c_pop_pos[2][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L4I synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector path=[[c_pop_pos[0][1][0]+c_pop_size+(axon_cell_sep+axon_y_dist),c_pop_pos[0][0][1]+tc_input_y*c_pop_size], [c_pop_pos[0][1][0]+c_pop_size,c_pop_pos[0][0][1]+tc_input_y*c_pop_size],] draw_arrow(ax,path,lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) ## Th -> L6I synapses (arrows) draw_circle(ax,path[0],conn_radius,lw=0,fclr=exc_clr,zorder=0) ## connector if labels: ################################################## ## legend legend_x=[t_pos[0]+t_pop_size+axon_cell_sep,t_pos[0]+t_pop_size+axon_cell_sep+legend_length] legend_y=[t_pos[1],(t_pos[1]+2*t_pop_size/3)] draw_arrow(ax,[[legend_x[0],legend_y[1]],[legend_x[1],legend_y[1]]],lw=lw_axons,ls='solid',lclr=exc_clr,arrow_size=arrow_size,zorder=1) draw_arrow(ax,[[legend_x[0],legend_y[0]],[legend_x[1],legend_y[0]]],lw=lw_axons,ls='solid',lclr=inh_clr,arrow_size=arrow_size,zorder=1) ################################################## ## population names put_text(ax,[t_pos[0]+t_pop_size/2.,(t_pos[1]+t_pop_size/2.)],r'TC','w',fontdict1) put_text(ax,[c_pop_pos[0][0][0]+c_pop_size/2.,c_pop_pos[0][0][1]+c_pop_size/2.],r'L6E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[0][1][0]+c_pop_size/2.,c_pop_pos[0][1][1]+c_pop_size/2.],r'L6I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[1][0][0]+c_pop_size/2.,c_pop_pos[1][0][1]+c_pop_size/2.],r'L5E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[1][1][0]+c_pop_size/2.,c_pop_pos[1][1][1]+c_pop_size/2.],r'L5I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[2][0][0]+c_pop_size/2.,c_pop_pos[2][0][1]+c_pop_size/2.],r'L4E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[2][1][0]+c_pop_size/2.,c_pop_pos[2][1][1]+c_pop_size/2.],r'L4I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[3][0][0]+c_pop_size/2.,c_pop_pos[3][0][1]+c_pop_size/2.],r'L23E','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[3][1][0]+c_pop_size/2.,c_pop_pos[3][1][1]+c_pop_size/2.],r'L23I','w' if analysis_params.bw else 'k',fontdict1) put_text(ax,[c_pop_pos[-1][0][0], c_pop_pos[-1][0][1]+c_pop_size+1.7*axon_cell_sep + 0.01], r'cortico-cortical input','k',fontdict2) put_text(ax,[legend_x[1]+axon_y_dist,legend_y[1]],r'excitatory','k',fontdict3) put_text(ax,[legend_x[1]+axon_y_dist,legend_y[0]],r'inhibitory','k',fontdict3) ################################################## ## layer names put_text(ax,[0.2*c_pop_pos[0][0][0],c_pop_pos[0][1][1]+c_pop_size/2.],r'L6','k',fontdict1) put_text(ax,[0.2*c_pop_pos[1][0][0],c_pop_pos[1][1][1]+c_pop_size/2.],r'L5','k',fontdict1) put_text(ax,[0.2*c_pop_pos[2][0][0],c_pop_pos[2][1][1]+c_pop_size/2.],r'L4','k',fontdict1) put_text(ax,[0.2*c_pop_pos[3][0][0],c_pop_pos[3][1][1]+c_pop_size/2.],r'L2/3','k',fontdict1) if highlight is not None: ids = name_to_id_mapping[highlight] fontdict1['fontsize']=4 put_text(ax,[c_pop_pos[ids[0]][ids[1]][0]+c_pop_size/2.,c_pop_pos[ids[0]][ids[1]][1]+c_pop_size/2.],highlight,'k',fontdict1) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) ax.axis(ax.axis('equal')) return ax
def md5(self): """ "Hash" of transforms Returns ----------- md5 : str Approximate hash of transforms """ result = str(self._updated) + str(self.base_frame) return result
"Hash" of transforms Returns ----------- md5 : str Approximate hash of transforms
Below is the the instruction that describes the task: ### Input: "Hash" of transforms Returns ----------- md5 : str Approximate hash of transforms ### Response: def md5(self): """ "Hash" of transforms Returns ----------- md5 : str Approximate hash of transforms """ result = str(self._updated) + str(self.base_frame) return result
def send(self, user_id, stock_id, op_user_id=None, device_info=None, out_trade_no=None): """ 发放代金券 :param user_id: 用户在公众号下的 openid :param stock_id: 代金券批次 ID :param op_user_id: 可选,操作员账号,默认为商户号 :param device_info: 可选,微信支付分配的终端设备号 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :return: 返回的结果信息 """ if not out_trade_no: now = datetime.now() out_trade_no = '{0}{1}{2}'.format( self.mch_id, now.strftime('%Y%m%d%H%M%S'), random.randint(1000, 10000) ) data = { 'appid': self.appid, 'coupon_stock_id': stock_id, 'openid': user_id, 'openid_count': 1, 'partner_trade_no': out_trade_no, 'op_user_id': op_user_id, 'device_info': device_info, 'version': '1.0', 'type': 'XML', } return self._post('mmpaymkttransfers/send_coupon', data=data)
发放代金券 :param user_id: 用户在公众号下的 openid :param stock_id: 代金券批次 ID :param op_user_id: 可选,操作员账号,默认为商户号 :param device_info: 可选,微信支付分配的终端设备号 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :return: 返回的结果信息
Below is the the instruction that describes the task: ### Input: 发放代金券 :param user_id: 用户在公众号下的 openid :param stock_id: 代金券批次 ID :param op_user_id: 可选,操作员账号,默认为商户号 :param device_info: 可选,微信支付分配的终端设备号 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :return: 返回的结果信息 ### Response: def send(self, user_id, stock_id, op_user_id=None, device_info=None, out_trade_no=None): """ 发放代金券 :param user_id: 用户在公众号下的 openid :param stock_id: 代金券批次 ID :param op_user_id: 可选,操作员账号,默认为商户号 :param device_info: 可选,微信支付分配的终端设备号 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :return: 返回的结果信息 """ if not out_trade_no: now = datetime.now() out_trade_no = '{0}{1}{2}'.format( self.mch_id, now.strftime('%Y%m%d%H%M%S'), random.randint(1000, 10000) ) data = { 'appid': self.appid, 'coupon_stock_id': stock_id, 'openid': user_id, 'openid_count': 1, 'partner_trade_no': out_trade_no, 'op_user_id': op_user_id, 'device_info': device_info, 'version': '1.0', 'type': 'XML', } return self._post('mmpaymkttransfers/send_coupon', data=data)
def readS8(self, register): """Read a signed byte from the specified register.""" result = self.readU8(register) if result > 127: result -= 256 return result
Read a signed byte from the specified register.
Below is the the instruction that describes the task: ### Input: Read a signed byte from the specified register. ### Response: def readS8(self, register): """Read a signed byte from the specified register.""" result = self.readU8(register) if result > 127: result -= 256 return result
def save_state(internal_request, state): """ Saves all necessary information needed by the UserIdHasher :type internal_request: satosa.internal_data.InternalRequest :param internal_request: The request :param state: The current state """ state_data = {"hash_type": internal_request.user_id_hash_type} state[UserIdHasher.STATE_KEY] = state_data
Saves all necessary information needed by the UserIdHasher :type internal_request: satosa.internal_data.InternalRequest :param internal_request: The request :param state: The current state
Below is the the instruction that describes the task: ### Input: Saves all necessary information needed by the UserIdHasher :type internal_request: satosa.internal_data.InternalRequest :param internal_request: The request :param state: The current state ### Response: def save_state(internal_request, state): """ Saves all necessary information needed by the UserIdHasher :type internal_request: satosa.internal_data.InternalRequest :param internal_request: The request :param state: The current state """ state_data = {"hash_type": internal_request.user_id_hash_type} state[UserIdHasher.STATE_KEY] = state_data
def get_reply_order_cache_data(comid): """ Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL. """ return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256), chr((comid >> 8) % 256), chr(comid % 256))
Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL.
Below is the the instruction that describes the task: ### Input: Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL. ### Response: def get_reply_order_cache_data(comid): """ Prepare a representation of the comment ID given as parameter so that it is suitable for byte ordering in MySQL. """ return "%s%s%s%s" % (chr((comid >> 24) % 256), chr((comid >> 16) % 256), chr((comid >> 8) % 256), chr(comid % 256))
def inet_aton(text): """Convert a text format IPv6 address into network format. @param text: the textual address @type text: string @rtype: string @raises dns.exception.SyntaxError: the text was not properly formatted """ # # Our aim here is not something fast; we just want something that works. # if text == '::': text = '0::' # # Get rid of the icky dot-quad syntax if we have it. # m = _v4_ending.match(text) if not m is None: text = "%s:%04x:%04x" % (m.group(1), int(m.group(2)) * 256 + int(m.group(3)), int(m.group(4)) * 256 + int(m.group(5))) # # Try to turn '::<whatever>' into ':<whatever>'; if no match try to # turn '<whatever>::' into '<whatever>:' # m = _colon_colon_start.match(text) if not m is None: text = text[1:] else: m = _colon_colon_end.match(text) if not m is None: text = text[:-1] # # Now canonicalize into 8 chunks of 4 hex digits each # chunks = text.split(':') l = len(chunks) if l > 8: raise dns.exception.SyntaxError seen_empty = False canonical = [] for c in chunks: if c == '': if seen_empty: raise dns.exception.SyntaxError seen_empty = True for i in xrange(0, 8 - l + 1): canonical.append('0000') else: lc = len(c) if lc > 4: raise dns.exception.SyntaxError if lc != 4: c = ('0' * (4 - lc)) + c canonical.append(c) if l < 8 and not seen_empty: raise dns.exception.SyntaxError text = ''.join(canonical) # # Finally we can go to binary. # try: return text.decode('hex_codec') except TypeError: raise dns.exception.SyntaxError
Convert a text format IPv6 address into network format. @param text: the textual address @type text: string @rtype: string @raises dns.exception.SyntaxError: the text was not properly formatted
Below is the the instruction that describes the task: ### Input: Convert a text format IPv6 address into network format. @param text: the textual address @type text: string @rtype: string @raises dns.exception.SyntaxError: the text was not properly formatted ### Response: def inet_aton(text): """Convert a text format IPv6 address into network format. @param text: the textual address @type text: string @rtype: string @raises dns.exception.SyntaxError: the text was not properly formatted """ # # Our aim here is not something fast; we just want something that works. # if text == '::': text = '0::' # # Get rid of the icky dot-quad syntax if we have it. # m = _v4_ending.match(text) if not m is None: text = "%s:%04x:%04x" % (m.group(1), int(m.group(2)) * 256 + int(m.group(3)), int(m.group(4)) * 256 + int(m.group(5))) # # Try to turn '::<whatever>' into ':<whatever>'; if no match try to # turn '<whatever>::' into '<whatever>:' # m = _colon_colon_start.match(text) if not m is None: text = text[1:] else: m = _colon_colon_end.match(text) if not m is None: text = text[:-1] # # Now canonicalize into 8 chunks of 4 hex digits each # chunks = text.split(':') l = len(chunks) if l > 8: raise dns.exception.SyntaxError seen_empty = False canonical = [] for c in chunks: if c == '': if seen_empty: raise dns.exception.SyntaxError seen_empty = True for i in xrange(0, 8 - l + 1): canonical.append('0000') else: lc = len(c) if lc > 4: raise dns.exception.SyntaxError if lc != 4: c = ('0' * (4 - lc)) + c canonical.append(c) if l < 8 and not seen_empty: raise dns.exception.SyntaxError text = ''.join(canonical) # # Finally we can go to binary. # try: return text.decode('hex_codec') except TypeError: raise dns.exception.SyntaxError
def find_files(sequencepath): """ Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files """ # Create a sorted list of all the FASTA files in the sequence path files = sorted(glob(os.path.join(sequencepath, '*.fa*'))) return files
Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files
Below is the the instruction that describes the task: ### Input: Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files ### Response: def find_files(sequencepath): """ Use glob to find all FASTA files in the provided sequence path. NOTE: FASTA files must have an extension such as .fasta, .fa, or .fas. Extensions of .fsa, .tfa, ect. are not currently supported :param sequencepath: path of folder containing FASTA genomes :return: list of FASTA files """ # Create a sorted list of all the FASTA files in the sequence path files = sorted(glob(os.path.join(sequencepath, '*.fa*'))) return files
def get_default_values(self): """ Make a crude estimation of the alignment using the center of mass and general C->N orientation. """ out = dict(dx=0, dy=0, dz=0, theta=0, phi=0, psi=0) dx, dy, dz, _ = np.mean(self.coord1 - self.coord2, axis=1) out['dx'] = dx out['dy'] = dy out['dz'] = dz # C->N vector vec1 = self.coord1[:-1, 1] - self.coord1[:-1, -1] vec2 = self.coord2[:-1, 1] - self.coord2[:-1, -1] vec1 /= np.linalg.norm(vec1) vec2 /= np.linalg.norm(vec2) # Find the rotation matrix that converts vec1 to vec2: # http://math.stackexchange.com/questions/180418/#476311 v = np.cross(vec1, vec2) s = np.linalg.norm(v) + np.finfo(DTYPE).eps c = vec1.dot(vec2) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]], dtype=DTYPE) rotation_matrix = np.eye(3) + vx + vx.dot(vx) * (1 - c) / (s * s) # Recover the angles from the matrix as seen here: # http://nghiaho.com/?page_id=846 out['theta'] = math.atan2(rotation_matrix[2, 1], rotation_matrix[2, 2]) out['phi'] = math.atan2(-rotation_matrix[2, 0], math.hypot(rotation_matrix[2, 1], rotation_matrix[2, 2])) out['psi'] = math.atan2(rotation_matrix[1, 0], rotation_matrix[0, 0]) return out
Make a crude estimation of the alignment using the center of mass and general C->N orientation.
Below is the the instruction that describes the task: ### Input: Make a crude estimation of the alignment using the center of mass and general C->N orientation. ### Response: def get_default_values(self): """ Make a crude estimation of the alignment using the center of mass and general C->N orientation. """ out = dict(dx=0, dy=0, dz=0, theta=0, phi=0, psi=0) dx, dy, dz, _ = np.mean(self.coord1 - self.coord2, axis=1) out['dx'] = dx out['dy'] = dy out['dz'] = dz # C->N vector vec1 = self.coord1[:-1, 1] - self.coord1[:-1, -1] vec2 = self.coord2[:-1, 1] - self.coord2[:-1, -1] vec1 /= np.linalg.norm(vec1) vec2 /= np.linalg.norm(vec2) # Find the rotation matrix that converts vec1 to vec2: # http://math.stackexchange.com/questions/180418/#476311 v = np.cross(vec1, vec2) s = np.linalg.norm(v) + np.finfo(DTYPE).eps c = vec1.dot(vec2) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]], dtype=DTYPE) rotation_matrix = np.eye(3) + vx + vx.dot(vx) * (1 - c) / (s * s) # Recover the angles from the matrix as seen here: # http://nghiaho.com/?page_id=846 out['theta'] = math.atan2(rotation_matrix[2, 1], rotation_matrix[2, 2]) out['phi'] = math.atan2(-rotation_matrix[2, 0], math.hypot(rotation_matrix[2, 1], rotation_matrix[2, 2])) out['psi'] = math.atan2(rotation_matrix[1, 0], rotation_matrix[0, 0]) return out
def save(self): """Convert the scanner to JSON. Returns ------- `dict` JSON data. """ data = super().save() data['expr'] = self.expr.pattern data['default_end'] = self.default_end return data
Convert the scanner to JSON. Returns ------- `dict` JSON data.
Below is the the instruction that describes the task: ### Input: Convert the scanner to JSON. Returns ------- `dict` JSON data. ### Response: def save(self): """Convert the scanner to JSON. Returns ------- `dict` JSON data. """ data = super().save() data['expr'] = self.expr.pattern data['default_end'] = self.default_end return data
def _number_type_helper(national_number, metadata): """Return the type of the given number against the metadata""" if not _is_number_matching_desc(national_number, metadata.general_desc): return PhoneNumberType.UNKNOWN if _is_number_matching_desc(national_number, metadata.premium_rate): return PhoneNumberType.PREMIUM_RATE if _is_number_matching_desc(national_number, metadata.toll_free): return PhoneNumberType.TOLL_FREE if _is_number_matching_desc(national_number, metadata.shared_cost): return PhoneNumberType.SHARED_COST if _is_number_matching_desc(national_number, metadata.voip): return PhoneNumberType.VOIP if _is_number_matching_desc(national_number, metadata.personal_number): return PhoneNumberType.PERSONAL_NUMBER if _is_number_matching_desc(national_number, metadata.pager): return PhoneNumberType.PAGER if _is_number_matching_desc(national_number, metadata.uan): return PhoneNumberType.UAN if _is_number_matching_desc(national_number, metadata.voicemail): return PhoneNumberType.VOICEMAIL if _is_number_matching_desc(national_number, metadata.fixed_line): if metadata.same_mobile_and_fixed_line_pattern: return PhoneNumberType.FIXED_LINE_OR_MOBILE elif _is_number_matching_desc(national_number, metadata.mobile): return PhoneNumberType.FIXED_LINE_OR_MOBILE return PhoneNumberType.FIXED_LINE # Otherwise, test to see if the number is mobile. Only do this if certain # that the patterns for mobile and fixed line aren't the same. if (not metadata.same_mobile_and_fixed_line_pattern and _is_number_matching_desc(national_number, metadata.mobile)): return PhoneNumberType.MOBILE return PhoneNumberType.UNKNOWN
Return the type of the given number against the metadata
Below is the the instruction that describes the task: ### Input: Return the type of the given number against the metadata ### Response: def _number_type_helper(national_number, metadata): """Return the type of the given number against the metadata""" if not _is_number_matching_desc(national_number, metadata.general_desc): return PhoneNumberType.UNKNOWN if _is_number_matching_desc(national_number, metadata.premium_rate): return PhoneNumberType.PREMIUM_RATE if _is_number_matching_desc(national_number, metadata.toll_free): return PhoneNumberType.TOLL_FREE if _is_number_matching_desc(national_number, metadata.shared_cost): return PhoneNumberType.SHARED_COST if _is_number_matching_desc(national_number, metadata.voip): return PhoneNumberType.VOIP if _is_number_matching_desc(national_number, metadata.personal_number): return PhoneNumberType.PERSONAL_NUMBER if _is_number_matching_desc(national_number, metadata.pager): return PhoneNumberType.PAGER if _is_number_matching_desc(national_number, metadata.uan): return PhoneNumberType.UAN if _is_number_matching_desc(national_number, metadata.voicemail): return PhoneNumberType.VOICEMAIL if _is_number_matching_desc(national_number, metadata.fixed_line): if metadata.same_mobile_and_fixed_line_pattern: return PhoneNumberType.FIXED_LINE_OR_MOBILE elif _is_number_matching_desc(national_number, metadata.mobile): return PhoneNumberType.FIXED_LINE_OR_MOBILE return PhoneNumberType.FIXED_LINE # Otherwise, test to see if the number is mobile. Only do this if certain # that the patterns for mobile and fixed line aren't the same. if (not metadata.same_mobile_and_fixed_line_pattern and _is_number_matching_desc(national_number, metadata.mobile)): return PhoneNumberType.MOBILE return PhoneNumberType.UNKNOWN
def _calculate_page_index(index, data): """Determine the location of a given index in paged data. Arguments: index (:py:class:`int`): The overall index. data: (:py:class:`dict`) The first page of data. Returns: :py:class:`tuple`: The location of that index, in the format ``(page, index_in_page)``. """ if index > data['total_results']: raise ValueError('index not in paged data') page_length = len(data['results']) return (index // page_length) + 1, (index % page_length) - 1
Determine the location of a given index in paged data. Arguments: index (:py:class:`int`): The overall index. data: (:py:class:`dict`) The first page of data. Returns: :py:class:`tuple`: The location of that index, in the format ``(page, index_in_page)``.
Below is the the instruction that describes the task: ### Input: Determine the location of a given index in paged data. Arguments: index (:py:class:`int`): The overall index. data: (:py:class:`dict`) The first page of data. Returns: :py:class:`tuple`: The location of that index, in the format ``(page, index_in_page)``. ### Response: def _calculate_page_index(index, data): """Determine the location of a given index in paged data. Arguments: index (:py:class:`int`): The overall index. data: (:py:class:`dict`) The first page of data. Returns: :py:class:`tuple`: The location of that index, in the format ``(page, index_in_page)``. """ if index > data['total_results']: raise ValueError('index not in paged data') page_length = len(data['results']) return (index // page_length) + 1, (index % page_length) - 1
def make_hop_info_from_url(url, verify_reachability=None): """Build HopInfo object from url. It allows only telnet and ssh as a valid protocols. Args: url (str): The url string describing the node. i.e. telnet://username@1.1.1.1. The protocol, username and address portion of url is mandatory. Port and password is optional. If port is missing the standard protocol -> port mapping is done. The password is optional i.e. for TS access directly to console ports. The path part is treated as additional password required for some systems, i.e. enable password for IOS devices.: telnet://<username>:<password>@<host>:<port>/<enable_password> <enable_password> is optional verify_reachability: This is optional callable returning boolean if node is reachable. It can be used to verify reachability of the node before making a connection. It can speedup the connection process when node not reachable especially with telnet having long timeout. Returns: HopInfo object or None if url is invalid or protocol not supported """ parsed = urlparse(url) username = None if parsed.username is None else unquote(parsed.username) # It's None if not exists password = None if parsed.password is None else unquote(parsed.password) # It's None if not exists try: enable_password = parse_qs(parsed.query)["enable_password"][0] except KeyError: enable_password = None hop_info = HopInfo( parsed.scheme, parsed.hostname, username, password, parsed.port, enable_password, verify_reachability=verify_reachability ) if hop_info.is_valid(): return hop_info raise InvalidHopInfoError
Build HopInfo object from url. It allows only telnet and ssh as a valid protocols. Args: url (str): The url string describing the node. i.e. telnet://username@1.1.1.1. The protocol, username and address portion of url is mandatory. Port and password is optional. If port is missing the standard protocol -> port mapping is done. The password is optional i.e. for TS access directly to console ports. The path part is treated as additional password required for some systems, i.e. enable password for IOS devices.: telnet://<username>:<password>@<host>:<port>/<enable_password> <enable_password> is optional verify_reachability: This is optional callable returning boolean if node is reachable. It can be used to verify reachability of the node before making a connection. It can speedup the connection process when node not reachable especially with telnet having long timeout. Returns: HopInfo object or None if url is invalid or protocol not supported
Below is the the instruction that describes the task: ### Input: Build HopInfo object from url. It allows only telnet and ssh as a valid protocols. Args: url (str): The url string describing the node. i.e. telnet://username@1.1.1.1. The protocol, username and address portion of url is mandatory. Port and password is optional. If port is missing the standard protocol -> port mapping is done. The password is optional i.e. for TS access directly to console ports. The path part is treated as additional password required for some systems, i.e. enable password for IOS devices.: telnet://<username>:<password>@<host>:<port>/<enable_password> <enable_password> is optional verify_reachability: This is optional callable returning boolean if node is reachable. It can be used to verify reachability of the node before making a connection. It can speedup the connection process when node not reachable especially with telnet having long timeout. Returns: HopInfo object or None if url is invalid or protocol not supported ### Response: def make_hop_info_from_url(url, verify_reachability=None): """Build HopInfo object from url. It allows only telnet and ssh as a valid protocols. Args: url (str): The url string describing the node. i.e. telnet://username@1.1.1.1. The protocol, username and address portion of url is mandatory. Port and password is optional. If port is missing the standard protocol -> port mapping is done. The password is optional i.e. for TS access directly to console ports. The path part is treated as additional password required for some systems, i.e. enable password for IOS devices.: telnet://<username>:<password>@<host>:<port>/<enable_password> <enable_password> is optional verify_reachability: This is optional callable returning boolean if node is reachable. It can be used to verify reachability of the node before making a connection. It can speedup the connection process when node not reachable especially with telnet having long timeout. Returns: HopInfo object or None if url is invalid or protocol not supported """ parsed = urlparse(url) username = None if parsed.username is None else unquote(parsed.username) # It's None if not exists password = None if parsed.password is None else unquote(parsed.password) # It's None if not exists try: enable_password = parse_qs(parsed.query)["enable_password"][0] except KeyError: enable_password = None hop_info = HopInfo( parsed.scheme, parsed.hostname, username, password, parsed.port, enable_password, verify_reachability=verify_reachability ) if hop_info.is_valid(): return hop_info raise InvalidHopInfoError
def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs): """Parses an Messages row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = HangoutsMessageData() event_data.sender = self._GetRowValue(query_hash, row, 'full_name') event_data.body = self._GetRowValue(query_hash, row, 'text') event_data.offset = self._GetRowValue(query_hash, row, '_id') event_data.query = query event_data.message_status = self._GetRowValue(query_hash, row, 'status') event_data.message_type = self._GetRowValue(query_hash, row, 'type') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an Messages row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
Below is the the instruction that describes the task: ### Input: Parses an Messages row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. ### Response: def ParseMessagesRow(self, parser_mediator, query, row, **unused_kwargs): """Parses an Messages row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row. """ query_hash = hash(query) event_data = HangoutsMessageData() event_data.sender = self._GetRowValue(query_hash, row, 'full_name') event_data.body = self._GetRowValue(query_hash, row, 'text') event_data.offset = self._GetRowValue(query_hash, row, '_id') event_data.query = query event_data.message_status = self._GetRowValue(query_hash, row, 'status') event_data.message_type = self._GetRowValue(query_hash, row, 'type') timestamp = self._GetRowValue(query_hash, row, 'timestamp') date_time = dfdatetime_posix_time.PosixTimeInMicroseconds( timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
def bounding_primitive(self): """ The minimum volume primitive (box, sphere, or cylinder) that bounds the mesh. Returns --------- bounding_primitive : trimesh.primitives.Sphere trimesh.primitives.Box trimesh.primitives.Cylinder Primitive which bounds the mesh with the smallest volume """ options = [self.bounding_box_oriented, self.bounding_sphere, self.bounding_cylinder] volume_min = np.argmin([i.volume for i in options]) bounding_primitive = options[volume_min] return bounding_primitive
The minimum volume primitive (box, sphere, or cylinder) that bounds the mesh. Returns --------- bounding_primitive : trimesh.primitives.Sphere trimesh.primitives.Box trimesh.primitives.Cylinder Primitive which bounds the mesh with the smallest volume
Below is the the instruction that describes the task: ### Input: The minimum volume primitive (box, sphere, or cylinder) that bounds the mesh. Returns --------- bounding_primitive : trimesh.primitives.Sphere trimesh.primitives.Box trimesh.primitives.Cylinder Primitive which bounds the mesh with the smallest volume ### Response: def bounding_primitive(self): """ The minimum volume primitive (box, sphere, or cylinder) that bounds the mesh. Returns --------- bounding_primitive : trimesh.primitives.Sphere trimesh.primitives.Box trimesh.primitives.Cylinder Primitive which bounds the mesh with the smallest volume """ options = [self.bounding_box_oriented, self.bounding_sphere, self.bounding_cylinder] volume_min = np.argmin([i.volume for i in options]) bounding_primitive = options[volume_min] return bounding_primitive
def _setse(self, i): """Initialise bitstring with signed exponential-Golomb code for integer i.""" if i > 0: u = (i * 2) - 1 else: u = -2 * i self._setue(u)
Initialise bitstring with signed exponential-Golomb code for integer i.
Below is the the instruction that describes the task: ### Input: Initialise bitstring with signed exponential-Golomb code for integer i. ### Response: def _setse(self, i): """Initialise bitstring with signed exponential-Golomb code for integer i.""" if i > 0: u = (i * 2) - 1 else: u = -2 * i self._setue(u)
def list_closed_workflow_executions(domain=None, startTimeFilter=None, closeTimeFilter=None, executionFilter=None, closeStatusFilter=None, typeFilter=None, tagFilter=None, nextPageToken=None, maximumPageSize=None, reverseOrder=None): """ Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows . See also: AWS API Documentation :example: response = client.list_closed_workflow_executions( domain='string', startTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, closeTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, executionFilter={ 'workflowId': 'string' }, closeStatusFilter={ 'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT' }, typeFilter={ 'name': 'string', 'version': 'string' }, tagFilter={ 'tag': 'string' }, nextPageToken='string', maximumPageSize=123, reverseOrder=True|False ) :type domain: string :param domain: [REQUIRED] The name of the domain that contains the workflow executions to list. :type startTimeFilter: dict :param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type closeTimeFilter: dict :param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type executionFilter: dict :param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. :type closeStatusFilter: dict :param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. :type typeFilter: dict :param typeFilter: If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. :type tagFilter: dict :param tagFilter: If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. :type nextPageToken: string :param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. :type maximumPageSize: integer :param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. :type reverseOrder: boolean :param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. :rtype: dict :return: { 'executionInfos': [ { 'execution': { 'workflowId': 'string', 'runId': 'string' }, 'workflowType': { 'name': 'string', 'version': 'string' }, 'startTimestamp': datetime(2015, 1, 1), 'closeTimestamp': datetime(2015, 1, 1), 'executionStatus': 'OPEN'|'CLOSED', 'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT', 'parent': { 'workflowId': 'string', 'runId': 'string' }, 'tagList': [ 'string', ], 'cancelRequested': True|False }, ], 'nextPageToken': 'string' } :returns: domain (string) -- [REQUIRED] The name of the domain that contains the workflow executions to list. startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. tagFilter (dict) -- If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. """ pass
Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows . See also: AWS API Documentation :example: response = client.list_closed_workflow_executions( domain='string', startTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, closeTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, executionFilter={ 'workflowId': 'string' }, closeStatusFilter={ 'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT' }, typeFilter={ 'name': 'string', 'version': 'string' }, tagFilter={ 'tag': 'string' }, nextPageToken='string', maximumPageSize=123, reverseOrder=True|False ) :type domain: string :param domain: [REQUIRED] The name of the domain that contains the workflow executions to list. :type startTimeFilter: dict :param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type closeTimeFilter: dict :param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type executionFilter: dict :param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. :type closeStatusFilter: dict :param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. :type typeFilter: dict :param typeFilter: If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. :type tagFilter: dict :param tagFilter: If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. :type nextPageToken: string :param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. :type maximumPageSize: integer :param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. :type reverseOrder: boolean :param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. :rtype: dict :return: { 'executionInfos': [ { 'execution': { 'workflowId': 'string', 'runId': 'string' }, 'workflowType': { 'name': 'string', 'version': 'string' }, 'startTimestamp': datetime(2015, 1, 1), 'closeTimestamp': datetime(2015, 1, 1), 'executionStatus': 'OPEN'|'CLOSED', 'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT', 'parent': { 'workflowId': 'string', 'runId': 'string' }, 'tagList': [ 'string', ], 'cancelRequested': True|False }, ], 'nextPageToken': 'string' } :returns: domain (string) -- [REQUIRED] The name of the domain that contains the workflow executions to list. startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. tagFilter (dict) -- If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions.
Below is the the instruction that describes the task: ### Input: Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows . See also: AWS API Documentation :example: response = client.list_closed_workflow_executions( domain='string', startTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, closeTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, executionFilter={ 'workflowId': 'string' }, closeStatusFilter={ 'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT' }, typeFilter={ 'name': 'string', 'version': 'string' }, tagFilter={ 'tag': 'string' }, nextPageToken='string', maximumPageSize=123, reverseOrder=True|False ) :type domain: string :param domain: [REQUIRED] The name of the domain that contains the workflow executions to list. :type startTimeFilter: dict :param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type closeTimeFilter: dict :param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type executionFilter: dict :param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. :type closeStatusFilter: dict :param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. :type typeFilter: dict :param typeFilter: If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. :type tagFilter: dict :param tagFilter: If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. :type nextPageToken: string :param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. :type maximumPageSize: integer :param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. :type reverseOrder: boolean :param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. :rtype: dict :return: { 'executionInfos': [ { 'execution': { 'workflowId': 'string', 'runId': 'string' }, 'workflowType': { 'name': 'string', 'version': 'string' }, 'startTimestamp': datetime(2015, 1, 1), 'closeTimestamp': datetime(2015, 1, 1), 'executionStatus': 'OPEN'|'CLOSED', 'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT', 'parent': { 'workflowId': 'string', 'runId': 'string' }, 'tagList': [ 'string', ], 'cancelRequested': True|False }, ], 'nextPageToken': 'string' } :returns: domain (string) -- [REQUIRED] The name of the domain that contains the workflow executions to list. startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. tagFilter (dict) -- If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. ### Response: def list_closed_workflow_executions(domain=None, startTimeFilter=None, closeTimeFilter=None, executionFilter=None, closeStatusFilter=None, typeFilter=None, tagFilter=None, nextPageToken=None, maximumPageSize=None, reverseOrder=None): """ Returns a list of closed workflow executions in the specified domain that meet the filtering criteria. The results may be split into multiple pages. To retrieve subsequent pages, make the call again using the nextPageToken returned by the initial call. Access Control You can use IAM policies to control this action's access to Amazon SWF resources as follows: If the caller does not have sufficient permissions to invoke the action, or the parameter values fall outside the specified constraints, the action fails. The associated event attribute's cause parameter will be set to OPERATION_NOT_PERMITTED. For details and example IAM policies, see Using IAM to Manage Access to Amazon SWF Workflows . See also: AWS API Documentation :example: response = client.list_closed_workflow_executions( domain='string', startTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, closeTimeFilter={ 'oldestDate': datetime(2015, 1, 1), 'latestDate': datetime(2015, 1, 1) }, executionFilter={ 'workflowId': 'string' }, closeStatusFilter={ 'status': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT' }, typeFilter={ 'name': 'string', 'version': 'string' }, tagFilter={ 'tag': 'string' }, nextPageToken='string', maximumPageSize=123, reverseOrder=True|False ) :type domain: string :param domain: [REQUIRED] The name of the domain that contains the workflow executions to list. :type startTimeFilter: dict :param startTimeFilter: If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type closeTimeFilter: dict :param closeTimeFilter: If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. :type executionFilter: dict :param executionFilter: If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. :type closeStatusFilter: dict :param closeStatusFilter: If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. :type typeFilter: dict :param typeFilter: If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. :type tagFilter: dict :param tagFilter: If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. :type nextPageToken: string :param nextPageToken: If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. :type maximumPageSize: integer :param maximumPageSize: The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. :type reverseOrder: boolean :param reverseOrder: When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. :rtype: dict :return: { 'executionInfos': [ { 'execution': { 'workflowId': 'string', 'runId': 'string' }, 'workflowType': { 'name': 'string', 'version': 'string' }, 'startTimestamp': datetime(2015, 1, 1), 'closeTimestamp': datetime(2015, 1, 1), 'executionStatus': 'OPEN'|'CLOSED', 'closeStatus': 'COMPLETED'|'FAILED'|'CANCELED'|'TERMINATED'|'CONTINUED_AS_NEW'|'TIMED_OUT', 'parent': { 'workflowId': 'string', 'runId': 'string' }, 'tagList': [ 'string', ], 'cancelRequested': True|False }, ], 'nextPageToken': 'string' } :returns: domain (string) -- [REQUIRED] The name of the domain that contains the workflow executions to list. startTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their start times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their start times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. closeTimeFilter (dict) -- If specified, the workflow executions are included in the returned results based on whether their close times are within the range specified by this filter. Also, if this parameter is specified, the returned results are ordered by their close times. Note startTimeFilter and closeTimeFilter are mutually exclusive. You must specify one of these in a request but not both. oldestDate (datetime) -- [REQUIRED]Specifies the oldest start or close date and time to return. latestDate (datetime) --Specifies the latest start or close date and time to return. executionFilter (dict) -- If specified, only workflow executions matching the workflow ID specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. workflowId (string) -- [REQUIRED]The workflowId to pass of match the criteria of this filter. closeStatusFilter (dict) -- If specified, only workflow executions that match this close status are listed. For example, if TERMINATED is specified, then only TERMINATED workflow executions are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. status (string) -- [REQUIRED]Required. The close status that must match the close status of an execution for it to meet the criteria of this filter. typeFilter (dict) -- If specified, only executions of the type specified in the filter are returned. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. name (string) -- [REQUIRED]Required. Name of the workflow type. version (string) --Version of the workflow type. tagFilter (dict) -- If specified, only executions that have the matching tag are listed. Note closeStatusFilter , executionFilter , typeFilter and tagFilter are mutually exclusive. You can specify at most one of these in a request. tag (string) -- [REQUIRED]Required. Specifies the tag that must be associated with the execution for it to meet the filter criteria. nextPageToken (string) -- If a NextPageToken was returned by a previous call, there are more results available. To retrieve the next page of results, make the call again using the returned token in nextPageToken . Keep all other arguments unchanged. The configured maximumPageSize determines how many results can be returned in a single call. maximumPageSize (integer) -- The maximum number of results that will be returned per call. nextPageToken can be used to obtain futher pages of results. The default is 1000, which is the maximum allowed page size. You can, however, specify a page size smaller than the maximum. This is an upper limit only; the actual number of results returned per call may be fewer than the specified maximum. reverseOrder (boolean) -- When set to true , returns the results in reverse order. By default the results are returned in descending order of the start or the close time of the executions. """ pass
def send(channel, message, **kwargs): """ Site: https://slack.com API: https://api.slack.com Desc: real-time messaging """ headers = { "Content-type": "application/x-www-form-urlencoded", "User-Agent": "DBMail/%s" % get_version(), } username = from_unicode(kwargs.pop("username", settings.SLACK_USERNAME)) hook_url = from_unicode(kwargs.pop("hook_url", settings.SLACK_HOOCK_URL)) channel = from_unicode(channel or settings.SLACK_CHANNEL) emoji = from_unicode(kwargs.pop("emoji", "")) message = from_unicode(message) data = { "channel": channel, "username": username, "text": message, "icon_emoji": emoji, } _data = kwargs.pop('data', None) if _data is not None: data.update(_data) up = urlparse(hook_url) http = HTTPSConnection(up.netloc) http.request( "POST", up.path, headers=headers, body=urlencode({"payload": dumps(data)})) response = http.getresponse() if response.status != 200: raise SlackError(response.reason) body = response.read() if body != "ok": raise SlackError(repr(body)) return True
Site: https://slack.com API: https://api.slack.com Desc: real-time messaging
Below is the the instruction that describes the task: ### Input: Site: https://slack.com API: https://api.slack.com Desc: real-time messaging ### Response: def send(channel, message, **kwargs): """ Site: https://slack.com API: https://api.slack.com Desc: real-time messaging """ headers = { "Content-type": "application/x-www-form-urlencoded", "User-Agent": "DBMail/%s" % get_version(), } username = from_unicode(kwargs.pop("username", settings.SLACK_USERNAME)) hook_url = from_unicode(kwargs.pop("hook_url", settings.SLACK_HOOCK_URL)) channel = from_unicode(channel or settings.SLACK_CHANNEL) emoji = from_unicode(kwargs.pop("emoji", "")) message = from_unicode(message) data = { "channel": channel, "username": username, "text": message, "icon_emoji": emoji, } _data = kwargs.pop('data', None) if _data is not None: data.update(_data) up = urlparse(hook_url) http = HTTPSConnection(up.netloc) http.request( "POST", up.path, headers=headers, body=urlencode({"payload": dumps(data)})) response = http.getresponse() if response.status != 200: raise SlackError(response.reason) body = response.read() if body != "ok": raise SlackError(repr(body)) return True
def tail(fpath, n=2, trailing=True): """ Alias for path_ndir_split """ return path_ndir_split(fpath, n=n, trailing=trailing)
Alias for path_ndir_split
Below is the the instruction that describes the task: ### Input: Alias for path_ndir_split ### Response: def tail(fpath, n=2, trailing=True): """ Alias for path_ndir_split """ return path_ndir_split(fpath, n=n, trailing=trailing)
def search(self, query, threshold=None): """Search the index for items whose key exceeds threshold similarity to the query string. :param query: returned items will have at least `threshold` \ similarity to the query string. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1]) >>> sorted(n.search("SPA")) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] >>> n.search("M") [((0, 'SPAM'), 0.125)] >>> n.search("EG") [((2, 'EG'), 1.0)] """ threshold = threshold if threshold is not None else self.threshold results = [] # Identify possible results for match, samegrams in self.items_sharing_ngrams(query).items(): allgrams = (len(self.pad(query)) + self.length[match] - (2 * self.N) - samegrams + 2) similarity = self.ngram_similarity(samegrams, allgrams, self.warp) if similarity >= threshold: results.append((match, similarity)) # Sort results by decreasing similarity results.sort(key=lambda x: x[1], reverse=True) return results
Search the index for items whose key exceeds threshold similarity to the query string. :param query: returned items will have at least `threshold` \ similarity to the query string. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1]) >>> sorted(n.search("SPA")) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] >>> n.search("M") [((0, 'SPAM'), 0.125)] >>> n.search("EG") [((2, 'EG'), 1.0)]
Below is the the instruction that describes the task: ### Input: Search the index for items whose key exceeds threshold similarity to the query string. :param query: returned items will have at least `threshold` \ similarity to the query string. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1]) >>> sorted(n.search("SPA")) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] >>> n.search("M") [((0, 'SPAM'), 0.125)] >>> n.search("EG") [((2, 'EG'), 1.0)] ### Response: def search(self, query, threshold=None): """Search the index for items whose key exceeds threshold similarity to the query string. :param query: returned items will have at least `threshold` \ similarity to the query string. :return: list of pairs of (item, similarity) by decreasing similarity. >>> from ngram import NGram >>> n = NGram([(0, "SPAM"), (1, "SPAN"), (2, "EG")], key=lambda x:x[1]) >>> sorted(n.search("SPA")) [((0, 'SPAM'), 0.375), ((1, 'SPAN'), 0.375)] >>> n.search("M") [((0, 'SPAM'), 0.125)] >>> n.search("EG") [((2, 'EG'), 1.0)] """ threshold = threshold if threshold is not None else self.threshold results = [] # Identify possible results for match, samegrams in self.items_sharing_ngrams(query).items(): allgrams = (len(self.pad(query)) + self.length[match] - (2 * self.N) - samegrams + 2) similarity = self.ngram_similarity(samegrams, allgrams, self.warp) if similarity >= threshold: results.append((match, similarity)) # Sort results by decreasing similarity results.sort(key=lambda x: x[1], reverse=True) return results
def connect(self, peer_address): """Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer """ self._sock.connect(peer_address) peer_address = self._sock.getpeername() # substituted host addrinfo BIO_dgram_set_connected(self._wbio.value, peer_address) assert self._wbio is self._rbio if self._do_handshake_on_connect: self.do_handshake()
Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer
Below is the the instruction that describes the task: ### Input: Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer ### Response: def connect(self, peer_address): """Client-side UDP connection establishment This method connects this object's underlying socket. It subsequently performs a handshake if do_handshake_on_connect was set during initialization. Arguments: peer_address - address tuple of server peer """ self._sock.connect(peer_address) peer_address = self._sock.getpeername() # substituted host addrinfo BIO_dgram_set_connected(self._wbio.value, peer_address) assert self._wbio is self._rbio if self._do_handshake_on_connect: self.do_handshake()
def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False): """Create a leaf Fake directory and create any non-existent parent dirs. Args: dir_name: (str) Name of directory to create. mode: (int) Mode to create directory (and any necessary parent directories) with. This argument defaults to 0o777. The umask is applied to this mode. exist_ok: (boolean) If exist_ok is False (the default), an OSError is raised if the target directory already exists. New in Python 3.2. Raises: OSError: if the directory already exists and exist_ok=False, or as per :py:meth:`create_dir`. """ ends_with_sep = self.ends_with_path_separator(dir_name) dir_name = self.absnormpath(dir_name) if (ends_with_sep and self.is_macos and self.exists(dir_name, check_link=True) and not self.exists(dir_name)): # to avoid EEXIST exception, remove the link self.remove_object(dir_name) path_components = self._path_components(dir_name) # Raise a permission denied error if the first existing directory # is not writeable. current_dir = self.root for component in path_components: if (component not in current_dir.contents or not isinstance(current_dir.contents, dict)): break else: current_dir = current_dir.contents[component] try: self.create_dir(dir_name, mode & ~self.umask) except (IOError, OSError) as e: if (not exist_ok or not isinstance(self.resolve(dir_name), FakeDirectory)): if self.is_windows_fs and e.errno == errno.ENOTDIR: e.errno = errno.ENOENT self.raise_os_error(e.errno, e.filename)
Create a leaf Fake directory and create any non-existent parent dirs. Args: dir_name: (str) Name of directory to create. mode: (int) Mode to create directory (and any necessary parent directories) with. This argument defaults to 0o777. The umask is applied to this mode. exist_ok: (boolean) If exist_ok is False (the default), an OSError is raised if the target directory already exists. New in Python 3.2. Raises: OSError: if the directory already exists and exist_ok=False, or as per :py:meth:`create_dir`.
Below is the the instruction that describes the task: ### Input: Create a leaf Fake directory and create any non-existent parent dirs. Args: dir_name: (str) Name of directory to create. mode: (int) Mode to create directory (and any necessary parent directories) with. This argument defaults to 0o777. The umask is applied to this mode. exist_ok: (boolean) If exist_ok is False (the default), an OSError is raised if the target directory already exists. New in Python 3.2. Raises: OSError: if the directory already exists and exist_ok=False, or as per :py:meth:`create_dir`. ### Response: def makedirs(self, dir_name, mode=PERM_DEF, exist_ok=False): """Create a leaf Fake directory and create any non-existent parent dirs. Args: dir_name: (str) Name of directory to create. mode: (int) Mode to create directory (and any necessary parent directories) with. This argument defaults to 0o777. The umask is applied to this mode. exist_ok: (boolean) If exist_ok is False (the default), an OSError is raised if the target directory already exists. New in Python 3.2. Raises: OSError: if the directory already exists and exist_ok=False, or as per :py:meth:`create_dir`. """ ends_with_sep = self.ends_with_path_separator(dir_name) dir_name = self.absnormpath(dir_name) if (ends_with_sep and self.is_macos and self.exists(dir_name, check_link=True) and not self.exists(dir_name)): # to avoid EEXIST exception, remove the link self.remove_object(dir_name) path_components = self._path_components(dir_name) # Raise a permission denied error if the first existing directory # is not writeable. current_dir = self.root for component in path_components: if (component not in current_dir.contents or not isinstance(current_dir.contents, dict)): break else: current_dir = current_dir.contents[component] try: self.create_dir(dir_name, mode & ~self.umask) except (IOError, OSError) as e: if (not exist_ok or not isinstance(self.resolve(dir_name), FakeDirectory)): if self.is_windows_fs and e.errno == errno.ENOTDIR: e.errno = errno.ENOENT self.raise_os_error(e.errno, e.filename)
def active_status(self, request): """ This will only work if you have django-celery installed (for now). In case you only need to work with status codes to find out if the workers are up or not. This will only work if we assume our db only contains "active workers". To use this feature, you must ensure you use only named workers, For example: "-n worker1@localhost:8000". http://docs.celeryproject.org/en/latest/userguide/workers.html#starting-the-worker """ app_installed = "djcelery" in settings.INSTALLED_APPS if not app_installed: return Response(status=status.HTTP_501_NOT_IMPLEMENTED) from djcelery.models import WorkerState count_workers = WorkerState.objects.all().count() result = self.inspect.active() if result is not None and count_workers == len(result): return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_404_NOT_FOUND)
This will only work if you have django-celery installed (for now). In case you only need to work with status codes to find out if the workers are up or not. This will only work if we assume our db only contains "active workers". To use this feature, you must ensure you use only named workers, For example: "-n worker1@localhost:8000". http://docs.celeryproject.org/en/latest/userguide/workers.html#starting-the-worker
Below is the the instruction that describes the task: ### Input: This will only work if you have django-celery installed (for now). In case you only need to work with status codes to find out if the workers are up or not. This will only work if we assume our db only contains "active workers". To use this feature, you must ensure you use only named workers, For example: "-n worker1@localhost:8000". http://docs.celeryproject.org/en/latest/userguide/workers.html#starting-the-worker ### Response: def active_status(self, request): """ This will only work if you have django-celery installed (for now). In case you only need to work with status codes to find out if the workers are up or not. This will only work if we assume our db only contains "active workers". To use this feature, you must ensure you use only named workers, For example: "-n worker1@localhost:8000". http://docs.celeryproject.org/en/latest/userguide/workers.html#starting-the-worker """ app_installed = "djcelery" in settings.INSTALLED_APPS if not app_installed: return Response(status=status.HTTP_501_NOT_IMPLEMENTED) from djcelery.models import WorkerState count_workers = WorkerState.objects.all().count() result = self.inspect.active() if result is not None and count_workers == len(result): return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_404_NOT_FOUND)
def weight(self): """ Current weight of the Node (with respect to the parent). """ if self.root.stale: self.root.update(self.root.now, None) return self._weight
Current weight of the Node (with respect to the parent).
Below is the the instruction that describes the task: ### Input: Current weight of the Node (with respect to the parent). ### Response: def weight(self): """ Current weight of the Node (with respect to the parent). """ if self.root.stale: self.root.update(self.root.now, None) return self._weight
def for_point(cls, point, zoom): """Creates a tile for given point""" latitude, longitude = point.latitude_longitude return cls.for_latitude_longitude(latitude=latitude, longitude=longitude, zoom=zoom)
Creates a tile for given point
Below is the the instruction that describes the task: ### Input: Creates a tile for given point ### Response: def for_point(cls, point, zoom): """Creates a tile for given point""" latitude, longitude = point.latitude_longitude return cls.for_latitude_longitude(latitude=latitude, longitude=longitude, zoom=zoom)
def get_page_as_pdf(self, page_id): """ Export page as standard pdf exporter :param page_id: Page ID :return: PDF File """ headers = self.form_token_headers url = 'spaces/flyingpdf/pdfpageexport.action?pageId={pageId}'.format(pageId=page_id) return self.get(url, headers=headers, not_json_response=True)
Export page as standard pdf exporter :param page_id: Page ID :return: PDF File
Below is the the instruction that describes the task: ### Input: Export page as standard pdf exporter :param page_id: Page ID :return: PDF File ### Response: def get_page_as_pdf(self, page_id): """ Export page as standard pdf exporter :param page_id: Page ID :return: PDF File """ headers = self.form_token_headers url = 'spaces/flyingpdf/pdfpageexport.action?pageId={pageId}'.format(pageId=page_id) return self.get(url, headers=headers, not_json_response=True)
def get_device_type(device_type=0): """Return the device type from a device_type list.""" device_types = { 0: "Unknown", 1: "Classic - BR/EDR devices", 2: "Low Energy - LE-only", 3: "Dual Mode - BR/EDR/LE" } if device_type in [0, 1, 2, 3]: return_value = device_types[device_type] else: return_value = device_types[0] return return_value
Return the device type from a device_type list.
Below is the the instruction that describes the task: ### Input: Return the device type from a device_type list. ### Response: def get_device_type(device_type=0): """Return the device type from a device_type list.""" device_types = { 0: "Unknown", 1: "Classic - BR/EDR devices", 2: "Low Energy - LE-only", 3: "Dual Mode - BR/EDR/LE" } if device_type in [0, 1, 2, 3]: return_value = device_types[device_type] else: return_value = device_types[0] return return_value
def groupby(self, *args, **kwargs): """Takes the same parameters as groupby on DataFrame. Like with groupby on DataFrame disabling sorting will result in an even larger performance improvement. This returns a Sparkling Pandas L{GroupBy} object which supports many of the same operations as regular GroupBy but not all.""" from sparklingpandas.groupby import GroupBy return GroupBy(self._rdd, *args, **kwargs)
Takes the same parameters as groupby on DataFrame. Like with groupby on DataFrame disabling sorting will result in an even larger performance improvement. This returns a Sparkling Pandas L{GroupBy} object which supports many of the same operations as regular GroupBy but not all.
Below is the the instruction that describes the task: ### Input: Takes the same parameters as groupby on DataFrame. Like with groupby on DataFrame disabling sorting will result in an even larger performance improvement. This returns a Sparkling Pandas L{GroupBy} object which supports many of the same operations as regular GroupBy but not all. ### Response: def groupby(self, *args, **kwargs): """Takes the same parameters as groupby on DataFrame. Like with groupby on DataFrame disabling sorting will result in an even larger performance improvement. This returns a Sparkling Pandas L{GroupBy} object which supports many of the same operations as regular GroupBy but not all.""" from sparklingpandas.groupby import GroupBy return GroupBy(self._rdd, *args, **kwargs)
def plot(x, y, z, ax=None, **kwargs): r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar """ if ax is None: ax = matplotlib.pyplot.gca() # Get inputs colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r) smooth = kwargs.pop('smooth', False) linewidths = kwargs.pop('linewidths', 0.3) contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3]) fineness = kwargs.pop('fineness', 0.5) default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1, fineness) contour_color_levels = kwargs.pop('contour_color_levels', default_color_levels) rasterize_contours = kwargs.pop('rasterize_contours', False) lines = kwargs.pop('lines', True) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) # Convert to sigmas z = numpy.sqrt(2) * scipy.special.erfinv(1 - z) # Gaussian filter if desired the sigmas by a factor of smooth% if smooth: sigma = smooth*numpy.array(z.shape)/100.0 z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0) # Plot the filled contours onto the axis ax cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels) # Rasterize contours (the rest of the figure stays in vector format) if rasterize_contours: for c in cbar.collections: c.set_rasterized(True) # Remove those annoying white lines for c in cbar.collections: c.set_edgecolor("face") # Plot some sigma-based contour lines if lines: ax.contour(x, y, z, colors='k', linewidths=linewidths, levels=contour_line_levels) # Return the contours for use as a colourbar later return cbar
r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar
Below is the the instruction that describes the task: ### Input: r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar ### Response: def plot(x, y, z, ax=None, **kwargs): r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar """ if ax is None: ax = matplotlib.pyplot.gca() # Get inputs colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r) smooth = kwargs.pop('smooth', False) linewidths = kwargs.pop('linewidths', 0.3) contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3]) fineness = kwargs.pop('fineness', 0.5) default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1, fineness) contour_color_levels = kwargs.pop('contour_color_levels', default_color_levels) rasterize_contours = kwargs.pop('rasterize_contours', False) lines = kwargs.pop('lines', True) if kwargs: raise TypeError('Unexpected **kwargs: %r' % kwargs) # Convert to sigmas z = numpy.sqrt(2) * scipy.special.erfinv(1 - z) # Gaussian filter if desired the sigmas by a factor of smooth% if smooth: sigma = smooth*numpy.array(z.shape)/100.0 z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0) # Plot the filled contours onto the axis ax cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels) # Rasterize contours (the rest of the figure stays in vector format) if rasterize_contours: for c in cbar.collections: c.set_rasterized(True) # Remove those annoying white lines for c in cbar.collections: c.set_edgecolor("face") # Plot some sigma-based contour lines if lines: ax.contour(x, y, z, colors='k', linewidths=linewidths, levels=contour_line_levels) # Return the contours for use as a colourbar later return cbar
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING): """ Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity """ # check if we have the psutil module if not psutil: logger.warning('Skipping cpu affinity because psutil was not found.') return # check if the platform supports cpu_affinity if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'): logger.warning('Faking cpu affinity because it is not supported on this platform') actual = False # get the available processors cpu_list = list(range(psutil.cpu_count())) # affinities of 0 or gte cpu_count, equals to no affinity if not n or n >= len(cpu_list): return # spread the workers over the available processors. index = 0 for pid in process_ids: affinity = [] for k in range(n): if index == len(cpu_list): index = 0 affinity.append(cpu_list[index]) index += 1 if psutil.pid_exists(pid): p = psutil.Process(pid) if actual: p.cpu_affinity(affinity) logger.info(_('{} will use cpu {}').format(pid, affinity))
Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity
Below is the the instruction that describes the task: ### Input: Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity ### Response: def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING): """ Sets the cpu affinity for the supplied processes. Requires the optional psutil module. :param int n: affinity :param list process_ids: a list of pids :param bool actual: Test workaround for Travis not supporting cpu affinity """ # check if we have the psutil module if not psutil: logger.warning('Skipping cpu affinity because psutil was not found.') return # check if the platform supports cpu_affinity if actual and not hasattr(psutil.Process(process_ids[0]), 'cpu_affinity'): logger.warning('Faking cpu affinity because it is not supported on this platform') actual = False # get the available processors cpu_list = list(range(psutil.cpu_count())) # affinities of 0 or gte cpu_count, equals to no affinity if not n or n >= len(cpu_list): return # spread the workers over the available processors. index = 0 for pid in process_ids: affinity = [] for k in range(n): if index == len(cpu_list): index = 0 affinity.append(cpu_list[index]) index += 1 if psutil.pid_exists(pid): p = psutil.Process(pid) if actual: p.cpu_affinity(affinity) logger.info(_('{} will use cpu {}').format(pid, affinity))
def retrieve(self, request, project, pk=None): """ Returns a job_log_url object given its ID """ log = JobLog.objects.get(id=pk) return Response(self._log_as_dict(log))
Returns a job_log_url object given its ID
Below is the the instruction that describes the task: ### Input: Returns a job_log_url object given its ID ### Response: def retrieve(self, request, project, pk=None): """ Returns a job_log_url object given its ID """ log = JobLog.objects.get(id=pk) return Response(self._log_as_dict(log))
def get_object_query_dict(self): """returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt """ if isinstance(self.object_query_code, dict): # _DATE_ _DATETIME_ return self.object_query_code else: # comma separated, key=value pairs. wrapping spaces will be ignored # eg: "key=val, key2 = val2 , key3= value with spaces" return dict(pair.split('=') for pair in self.object_query_code.split(','))
returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt
Below is the the instruction that describes the task: ### Input: returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt ### Response: def get_object_query_dict(self): """returns objects keys according to self.object_query_code which can be json encoded queryset filter dict or key=value set in the following format: ```"key=val, key2 = val2 , key3= value with spaces"``` Returns: (dict): Queryset filtering dicqt """ if isinstance(self.object_query_code, dict): # _DATE_ _DATETIME_ return self.object_query_code else: # comma separated, key=value pairs. wrapping spaces will be ignored # eg: "key=val, key2 = val2 , key3= value with spaces" return dict(pair.split('=') for pair in self.object_query_code.split(','))
def requeue(self, queue=None, retry_count=0): """ Requeues the current job. Doesn't interrupt it """ if not queue: if not self.data or not self.data.get("queue"): self.fetch(full_data={"_id": 0, "queue": 1, "path": 1}) queue = self.data["queue"] self._save_status("queued", updates={ "queue": queue, "datequeued": datetime.datetime.utcnow(), "retry_count": retry_count })
Requeues the current job. Doesn't interrupt it
Below is the the instruction that describes the task: ### Input: Requeues the current job. Doesn't interrupt it ### Response: def requeue(self, queue=None, retry_count=0): """ Requeues the current job. Doesn't interrupt it """ if not queue: if not self.data or not self.data.get("queue"): self.fetch(full_data={"_id": 0, "queue": 1, "path": 1}) queue = self.data["queue"] self._save_status("queued", updates={ "queue": queue, "datequeued": datetime.datetime.utcnow(), "retry_count": retry_count })
def collect_github_config(): """ Try load Github configuration such as usernames from the local or global git config """ github_config = {} for field in ["user", "token"]: try: github_config[field] = subprocess.check_output(["git", "config", "github.{}".format(field)]).decode('utf-8').strip() except (OSError, subprocess.CalledProcessError): pass return github_config
Try load Github configuration such as usernames from the local or global git config
Below is the the instruction that describes the task: ### Input: Try load Github configuration such as usernames from the local or global git config ### Response: def collect_github_config(): """ Try load Github configuration such as usernames from the local or global git config """ github_config = {} for field in ["user", "token"]: try: github_config[field] = subprocess.check_output(["git", "config", "github.{}".format(field)]).decode('utf-8').strip() except (OSError, subprocess.CalledProcessError): pass return github_config
def __get_slice(data, slice_number, axis=0, flipH=False, flipV=False): """ :param data: :param slice_number: :param axis: :param flipV: vertical flip :param flipH: horizontal flip :return: """ if axis == 0: data2d = data[slice_number, :, :] elif axis == 1: data2d = data[:, slice_number, :] elif axis == 2: data2d = data[:, :, slice_number] else: logger.error("axis number error") print("axis number error") return None if flipV: if data2d is not None: data2d = data2d[-1:0:-1,:] if flipH: if data2d is not None: data2d = data2d[:, -1:0:-1] return data2d
:param data: :param slice_number: :param axis: :param flipV: vertical flip :param flipH: horizontal flip :return:
Below is the the instruction that describes the task: ### Input: :param data: :param slice_number: :param axis: :param flipV: vertical flip :param flipH: horizontal flip :return: ### Response: def __get_slice(data, slice_number, axis=0, flipH=False, flipV=False): """ :param data: :param slice_number: :param axis: :param flipV: vertical flip :param flipH: horizontal flip :return: """ if axis == 0: data2d = data[slice_number, :, :] elif axis == 1: data2d = data[:, slice_number, :] elif axis == 2: data2d = data[:, :, slice_number] else: logger.error("axis number error") print("axis number error") return None if flipV: if data2d is not None: data2d = data2d[-1:0:-1,:] if flipH: if data2d is not None: data2d = data2d[:, -1:0:-1] return data2d
def _return_wrapper(fits, return_all, start, trace): """If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all. """ # make sure it's an iterable if not is_iterable(fits): fits = [fits] # whether to print the final runtime if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) # which to return? if not all, then first index (assume sorted) if not return_all: return fits[0] return fits
If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all.
Below is the the instruction that describes the task: ### Input: If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all. ### Response: def _return_wrapper(fits, return_all, start, trace): """If the user wants to get all of the models back, this will return a list of the ARIMA models, otherwise it will just return the model. If this is called from the end of the function, ``fits`` will already be a list. We *know* that if a function call makes it here, ``fits`` is NOT None or it would have thrown an exception in :func:`_post_ppc_arima`. Parameters ---------- fits : iterable or ARIMA The ARIMA(s) return_all : bool Whether to return all. """ # make sure it's an iterable if not is_iterable(fits): fits = [fits] # whether to print the final runtime if trace: print('Total fit time: %.3f seconds' % (time.time() - start)) # which to return? if not all, then first index (assume sorted) if not return_all: return fits[0] return fits
def get_pipeline_inputs(job, input_flag, input_file): """ Get the input file from s3 or disk, untargz if necessary and then write to file job store. :param job: job :param str input_flag: The name of the flag :param str input_file: The value passed in the config file :return: The jobstore ID for the file """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.logToMaster('Obtaining file (%s) to the file job store' % os.path.basename( input_file)) if input_file.startswith('http'): assert input_file.startswith('https://s3'), input_file + ' is not an S3 file' input_file = get_file_from_s3(job, input_file, write_to_jobstore=False) elif input_file.startswith('S3'): input_file = get_file_from_s3(job, input_file, write_to_jobstore=False) else: assert os.path.exists(input_file), 'Bogus Input : ' + input_file # If the file isn't a tarball, then it is a single file that is tar.gzipped for the # sake of maximum compression instead of enveloping a folder. Thus it should be # decompressed before writing to job store. Also, this is cool but they will by # default also be dumped into the cache! if 'tar' not in input_flag: input_file = untargz(input_file, work_dir) return job.fileStore.writeGlobalFile(input_file)
Get the input file from s3 or disk, untargz if necessary and then write to file job store. :param job: job :param str input_flag: The name of the flag :param str input_file: The value passed in the config file :return: The jobstore ID for the file
Below is the the instruction that describes the task: ### Input: Get the input file from s3 or disk, untargz if necessary and then write to file job store. :param job: job :param str input_flag: The name of the flag :param str input_file: The value passed in the config file :return: The jobstore ID for the file ### Response: def get_pipeline_inputs(job, input_flag, input_file): """ Get the input file from s3 or disk, untargz if necessary and then write to file job store. :param job: job :param str input_flag: The name of the flag :param str input_file: The value passed in the config file :return: The jobstore ID for the file """ work_dir = job.fileStore.getLocalTempDir() job.fileStore.logToMaster('Obtaining file (%s) to the file job store' % os.path.basename( input_file)) if input_file.startswith('http'): assert input_file.startswith('https://s3'), input_file + ' is not an S3 file' input_file = get_file_from_s3(job, input_file, write_to_jobstore=False) elif input_file.startswith('S3'): input_file = get_file_from_s3(job, input_file, write_to_jobstore=False) else: assert os.path.exists(input_file), 'Bogus Input : ' + input_file # If the file isn't a tarball, then it is a single file that is tar.gzipped for the # sake of maximum compression instead of enveloping a folder. Thus it should be # decompressed before writing to job store. Also, this is cool but they will by # default also be dumped into the cache! if 'tar' not in input_flag: input_file = untargz(input_file, work_dir) return job.fileStore.writeGlobalFile(input_file)
def _get_or_create_stack(name): """Returns a thread local stack uniquified by the given name.""" stack = getattr(_LOCAL_STACKS, name, None) if stack is None: stack = [] setattr(_LOCAL_STACKS, name, stack) return stack
Returns a thread local stack uniquified by the given name.
Below is the the instruction that describes the task: ### Input: Returns a thread local stack uniquified by the given name. ### Response: def _get_or_create_stack(name): """Returns a thread local stack uniquified by the given name.""" stack = getattr(_LOCAL_STACKS, name, None) if stack is None: stack = [] setattr(_LOCAL_STACKS, name, stack) return stack
def md5(value): """ Create MD5 :param value: :return: """ m = hashlib.md5() m.update(value) return str(m.hexdigest())
Create MD5 :param value: :return:
Below is the the instruction that describes the task: ### Input: Create MD5 :param value: :return: ### Response: def md5(value): """ Create MD5 :param value: :return: """ m = hashlib.md5() m.update(value) return str(m.hexdigest())
def register_instance(self, instance, allow_dotted_names=False): """Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network. """ self.instance = instance self.allow_dotted_names = allow_dotted_names
Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network.
Below is the the instruction that describes the task: ### Input: Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network. ### Response: def register_instance(self, instance, allow_dotted_names=False): """Registers an instance to respond to XML-RPC requests. Only one instance can be installed at a time. If the registered instance has a _dispatch method then that method will be called with the name of the XML-RPC method and its parameters as a tuple e.g. instance._dispatch('add',(2,3)) If the registered instance does not have a _dispatch method then the instance will be searched to find a matching method and, if found, will be called. Methods beginning with an '_' are considered private and will not be called by SimpleXMLRPCServer. If a registered function matches a XML-RPC request, then it will be called instead of the registered instance. If the optional allow_dotted_names argument is true and the instance does not have a _dispatch method, method names containing dots are supported and resolved, as long as none of the name segments start with an '_'. *** SECURITY WARNING: *** Enabling the allow_dotted_names options allows intruders to access your module's global variables and may allow intruders to execute arbitrary code on your machine. Only use this option on a secure, closed network. """ self.instance = instance self.allow_dotted_names = allow_dotted_names
def get_snapshots(self): """ Returns a list of all completed snapshots for this volume ID. """ ec2 = self.get_ec2_connection() rs = ec2.get_all_snapshots() all_vols = [self.volume_id] + self.past_volume_ids snaps = [] for snapshot in rs: if snapshot.volume_id in all_vols: if snapshot.progress == '100%': snapshot.date = boto.utils.parse_ts(snapshot.start_time) snapshot.keep = True snaps.append(snapshot) snaps.sort(cmp=lambda x,y: cmp(x.date, y.date)) return snaps
Returns a list of all completed snapshots for this volume ID.
Below is the the instruction that describes the task: ### Input: Returns a list of all completed snapshots for this volume ID. ### Response: def get_snapshots(self): """ Returns a list of all completed snapshots for this volume ID. """ ec2 = self.get_ec2_connection() rs = ec2.get_all_snapshots() all_vols = [self.volume_id] + self.past_volume_ids snaps = [] for snapshot in rs: if snapshot.volume_id in all_vols: if snapshot.progress == '100%': snapshot.date = boto.utils.parse_ts(snapshot.start_time) snapshot.keep = True snaps.append(snapshot) snaps.sort(cmp=lambda x,y: cmp(x.date, y.date)) return snaps
def stats(self): """The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed. """ if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats
The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed.
Below is the the instruction that describes the task: ### Input: The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed. ### Response: def stats(self): """The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed. """ if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats
def parse_station_table(root): """Parse station list XML file.""" stations = [parse_xml_station(elem) for elem in root.findall('station')] return {st.id: st for st in stations}
Parse station list XML file.
Below is the the instruction that describes the task: ### Input: Parse station list XML file. ### Response: def parse_station_table(root): """Parse station list XML file.""" stations = [parse_xml_station(elem) for elem in root.findall('station')] return {st.id: st for st in stations}
def populate_data_sharing_consent(apps, schema_editor): """ Populates the ``DataSharingConsent`` model with the ``enterprise`` application's consent data. Consent data from the ``enterprise`` application come from the ``EnterpriseCourseEnrollment`` model. """ DataSharingConsent = apps.get_model('consent', 'DataSharingConsent') EnterpriseCourseEnrollment = apps.get_model('enterprise', 'EnterpriseCourseEnrollment') User = apps.get_model('auth', 'User') for enrollment in EnterpriseCourseEnrollment.objects.all(): user = User.objects.get(pk=enrollment.enterprise_customer_user.user_id) data_sharing_consent, __ = DataSharingConsent.objects.get_or_create( username=user.username, enterprise_customer=enrollment.enterprise_customer_user.enterprise_customer, course_id=enrollment.course_id, ) if enrollment.consent_granted is not None: data_sharing_consent.granted = enrollment.consent_granted else: # Check UDSCA instead. consent_state = enrollment.enterprise_customer_user.data_sharing_consent.first() if consent_state is not None: data_sharing_consent.granted = consent_state.state in ['enabled', 'external'] else: data_sharing_consent.granted = False data_sharing_consent.save()
Populates the ``DataSharingConsent`` model with the ``enterprise`` application's consent data. Consent data from the ``enterprise`` application come from the ``EnterpriseCourseEnrollment`` model.
Below is the the instruction that describes the task: ### Input: Populates the ``DataSharingConsent`` model with the ``enterprise`` application's consent data. Consent data from the ``enterprise`` application come from the ``EnterpriseCourseEnrollment`` model. ### Response: def populate_data_sharing_consent(apps, schema_editor): """ Populates the ``DataSharingConsent`` model with the ``enterprise`` application's consent data. Consent data from the ``enterprise`` application come from the ``EnterpriseCourseEnrollment`` model. """ DataSharingConsent = apps.get_model('consent', 'DataSharingConsent') EnterpriseCourseEnrollment = apps.get_model('enterprise', 'EnterpriseCourseEnrollment') User = apps.get_model('auth', 'User') for enrollment in EnterpriseCourseEnrollment.objects.all(): user = User.objects.get(pk=enrollment.enterprise_customer_user.user_id) data_sharing_consent, __ = DataSharingConsent.objects.get_or_create( username=user.username, enterprise_customer=enrollment.enterprise_customer_user.enterprise_customer, course_id=enrollment.course_id, ) if enrollment.consent_granted is not None: data_sharing_consent.granted = enrollment.consent_granted else: # Check UDSCA instead. consent_state = enrollment.enterprise_customer_user.data_sharing_consent.first() if consent_state is not None: data_sharing_consent.granted = consent_state.state in ['enabled', 'external'] else: data_sharing_consent.granted = False data_sharing_consent.save()
def deregister(cls, attr_name): """ De-register an extended property that has been registered with register() """ try: field = cls.get_field_by_fieldname(attr_name) except InvalidField: raise ValueError("'%s' is not registered" % attr_name) if not isinstance(field, ExtendedPropertyField): raise ValueError("'%s' is not registered as an ExtendedProperty" % attr_name) cls.remove_field(field)
De-register an extended property that has been registered with register()
Below is the the instruction that describes the task: ### Input: De-register an extended property that has been registered with register() ### Response: def deregister(cls, attr_name): """ De-register an extended property that has been registered with register() """ try: field = cls.get_field_by_fieldname(attr_name) except InvalidField: raise ValueError("'%s' is not registered" % attr_name) if not isinstance(field, ExtendedPropertyField): raise ValueError("'%s' is not registered as an ExtendedProperty" % attr_name) cls.remove_field(field)
def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ client = self.client # Fire off the query. return client.service.track( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, SelectionDetails=self.SelectionDetails, ProcessingOptions=self.ProcessingOptions)
Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED.
Below is the the instruction that describes the task: ### Input: Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. ### Response: def _assemble_and_send_request(self): """ Fires off the Fedex request. @warning: NEVER CALL THIS METHOD DIRECTLY. CALL send_request(), WHICH RESIDES ON FedexBaseService AND IS INHERITED. """ client = self.client # Fire off the query. return client.service.track( WebAuthenticationDetail=self.WebAuthenticationDetail, ClientDetail=self.ClientDetail, TransactionDetail=self.TransactionDetail, Version=self.VersionId, SelectionDetails=self.SelectionDetails, ProcessingOptions=self.ProcessingOptions)
def temp_update(self, params, values): """ Context manager to temporarily perform a parameter update (by using the stack structure). To use: with state.temp_update(params, values): # measure the cost or something state.error """ self.push_update(params, values) yield self.pop_update()
Context manager to temporarily perform a parameter update (by using the stack structure). To use: with state.temp_update(params, values): # measure the cost or something state.error
Below is the the instruction that describes the task: ### Input: Context manager to temporarily perform a parameter update (by using the stack structure). To use: with state.temp_update(params, values): # measure the cost or something state.error ### Response: def temp_update(self, params, values): """ Context manager to temporarily perform a parameter update (by using the stack structure). To use: with state.temp_update(params, values): # measure the cost or something state.error """ self.push_update(params, values) yield self.pop_update()
def taxTree(taxdict): """Return taxonomic Newick tree""" # the taxonomic dictionary holds the lineage of each ident in # the same order as the taxonomy # use hierarchy to construct a taxonomic tree for rank in taxdict.taxonomy: current_level = float(taxdict.taxonomy.index(rank)) # get clades at this rank in hierarchy clades = taxdict.hierarchy[rank] # merge those that are in the same clade into a cladestring for clade in clades: # unpack the identities in this clade and its clade name cladeidents, cladename = clade # Remove '' TaxRefs -- in cladestring already cladeidents = [e for e in cladeidents if e.ident] # only create cladestring if more than one ident in clade if len(cladeidents) < 2: continue # label node by 'clade'_'rank' cladename = '{0}_{1}'.format(cladename, rank) cladestring = stringClade(cladeidents, cladename, current_level) # replace first TaxRef in cladeidents with cladestring cladeidents[0].change(ident=cladestring, rank=rank) # replace all other TaxRefs with '' for e in cladeidents[1:]: e.change(ident='', rank=rank) # join any remaining strands into tree if len(taxdict.hierarchy[taxdict.taxonomy[-1]]) > 1: # unlist first clade = [e[0] for e in taxdict.hierarchy[taxdict.taxonomy[-1]]] cladeidents = sum(clade, []) cladeidents = [e for e in cladeidents if e.ident] cladestring = stringClade(cladeidents, 'life', current_level+1) return cladestring + ';'
Return taxonomic Newick tree
Below is the the instruction that describes the task: ### Input: Return taxonomic Newick tree ### Response: def taxTree(taxdict): """Return taxonomic Newick tree""" # the taxonomic dictionary holds the lineage of each ident in # the same order as the taxonomy # use hierarchy to construct a taxonomic tree for rank in taxdict.taxonomy: current_level = float(taxdict.taxonomy.index(rank)) # get clades at this rank in hierarchy clades = taxdict.hierarchy[rank] # merge those that are in the same clade into a cladestring for clade in clades: # unpack the identities in this clade and its clade name cladeidents, cladename = clade # Remove '' TaxRefs -- in cladestring already cladeidents = [e for e in cladeidents if e.ident] # only create cladestring if more than one ident in clade if len(cladeidents) < 2: continue # label node by 'clade'_'rank' cladename = '{0}_{1}'.format(cladename, rank) cladestring = stringClade(cladeidents, cladename, current_level) # replace first TaxRef in cladeidents with cladestring cladeidents[0].change(ident=cladestring, rank=rank) # replace all other TaxRefs with '' for e in cladeidents[1:]: e.change(ident='', rank=rank) # join any remaining strands into tree if len(taxdict.hierarchy[taxdict.taxonomy[-1]]) > 1: # unlist first clade = [e[0] for e in taxdict.hierarchy[taxdict.taxonomy[-1]]] cladeidents = sum(clade, []) cladeidents = [e for e in cladeidents if e.ident] cladestring = stringClade(cladeidents, 'life', current_level+1) return cladestring + ';'
def query_status(self): '''Query the hub for the status of this command''' try: data = self.api_iface._api_get(self.link) self._update_details(data) except APIError as e: print("API error: ") for key,value in e.data.iteritems: print(str(key) + ": " + str(value))
Query the hub for the status of this command
Below is the the instruction that describes the task: ### Input: Query the hub for the status of this command ### Response: def query_status(self): '''Query the hub for the status of this command''' try: data = self.api_iface._api_get(self.link) self._update_details(data) except APIError as e: print("API error: ") for key,value in e.data.iteritems: print(str(key) + ": " + str(value))
def can_create_hierarchy_with_record_types(self, hierarchy_record_types): """Tests if this user can create a single ``Hierarchy`` using the desired record types. While ``HierarchyManager.getHierarchyRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Hierarchy``. Providing an empty array tests if a ``Hierarchy`` can be created with no records. arg: hierarchy_record_types (osid.type.Type[]): array of hierarchy record types return: (boolean) - ``true`` if ``Hierarchy`` creation using the specified ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``hierarchy_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=hierarchy_record_types) return True
Tests if this user can create a single ``Hierarchy`` using the desired record types. While ``HierarchyManager.getHierarchyRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Hierarchy``. Providing an empty array tests if a ``Hierarchy`` can be created with no records. arg: hierarchy_record_types (osid.type.Type[]): array of hierarchy record types return: (boolean) - ``true`` if ``Hierarchy`` creation using the specified ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``hierarchy_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Tests if this user can create a single ``Hierarchy`` using the desired record types. While ``HierarchyManager.getHierarchyRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Hierarchy``. Providing an empty array tests if a ``Hierarchy`` can be created with no records. arg: hierarchy_record_types (osid.type.Type[]): array of hierarchy record types return: (boolean) - ``true`` if ``Hierarchy`` creation using the specified ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``hierarchy_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* ### Response: def can_create_hierarchy_with_record_types(self, hierarchy_record_types): """Tests if this user can create a single ``Hierarchy`` using the desired record types. While ``HierarchyManager.getHierarchyRecordTypes()`` can be used to examine which records are supported, this method tests which record(s) are required for creating a specific ``Hierarchy``. Providing an empty array tests if a ``Hierarchy`` can be created with no records. arg: hierarchy_record_types (osid.type.Type[]): array of hierarchy record types return: (boolean) - ``true`` if ``Hierarchy`` creation using the specified ``Types`` is supported, ``false`` otherwise raise: NullArgument - ``hierarchy_record_types`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinAdminSession.can_create_bin_with_record_types # NOTE: It is expected that real authentication hints will be # handled in a service adapter above the pay grade of this impl. if self._catalog_session is not None: return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=hierarchy_record_types) return True
def paint( self, painter, option, widget ): """ Overloads the paint method from QGraphicsPathItem to \ handle custom drawing of the path using this items \ pens and polygons. :param painter <QPainter> :param option <QGraphicsItemStyleOption> :param widget <QWidget> """ super(XGanttDepItem, self).paint(painter, option, widget) # redraw the poly to force-fill it if ( self._polygon ): painter.setRenderHint(painter.Antialiasing) painter.setBrush(self.pen().color()) painter.drawPolygon(self._polygon)
Overloads the paint method from QGraphicsPathItem to \ handle custom drawing of the path using this items \ pens and polygons. :param painter <QPainter> :param option <QGraphicsItemStyleOption> :param widget <QWidget>
Below is the the instruction that describes the task: ### Input: Overloads the paint method from QGraphicsPathItem to \ handle custom drawing of the path using this items \ pens and polygons. :param painter <QPainter> :param option <QGraphicsItemStyleOption> :param widget <QWidget> ### Response: def paint( self, painter, option, widget ): """ Overloads the paint method from QGraphicsPathItem to \ handle custom drawing of the path using this items \ pens and polygons. :param painter <QPainter> :param option <QGraphicsItemStyleOption> :param widget <QWidget> """ super(XGanttDepItem, self).paint(painter, option, widget) # redraw the poly to force-fill it if ( self._polygon ): painter.setRenderHint(painter.Antialiasing) painter.setBrush(self.pen().color()) painter.drawPolygon(self._polygon)
def explain_template_loading_attempts(app, template, attempts): """ This should help developers understand what failed. Mostly the same as :func:`flask.debughelpers.explain_template_loading_attempts`, except here we've extended it to support showing what :class:`UnchainedJinjaLoader` is doing. """ from flask import Flask, Blueprint from flask.debughelpers import _dump_loader_info from flask.globals import _request_ctx_stack template, expected_priors = parse_template(template) info = [f'Locating {pretty_num(expected_priors + 1)} template "{template}":'] total_found = 0 blueprint = None reqctx = _request_ctx_stack.top if reqctx is not None and reqctx.request.blueprint is not None: blueprint = reqctx.request.blueprint for idx, (loader, srcobj, triple) in enumerate(attempts): if isinstance(srcobj, Flask): src_info = 'application "%s"' % srcobj.import_name elif isinstance(srcobj, Blueprint): src_info = 'blueprint "%s" (%s)' % (srcobj.name, srcobj.import_name) else: src_info = repr(srcobj) info.append('% 5d: trying loader of %s' % ( idx + 1, src_info)) for line in _dump_loader_info(loader): info.append(' %s' % line) if triple is None: detail = 'no match' else: if total_found < expected_priors: action = 'skipping' elif total_found == expected_priors: action = 'using' else: action = 'ignoring' detail = '%s (%r)' % (action, triple[1] or '<string>') total_found += 1 info.append(' -> %s' % detail) seems_fishy = False if total_found < expected_priors: info.append('Error: the template could not be found.') seems_fishy = True if blueprint is not None and seems_fishy: info.append(' The template was looked up from an endpoint that ' 'belongs to the blueprint "%s".' % blueprint) info.append(' Maybe you did not place a template in the right folder?') info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') app.logger.info('\n'.join(info))
This should help developers understand what failed. Mostly the same as :func:`flask.debughelpers.explain_template_loading_attempts`, except here we've extended it to support showing what :class:`UnchainedJinjaLoader` is doing.
Below is the the instruction that describes the task: ### Input: This should help developers understand what failed. Mostly the same as :func:`flask.debughelpers.explain_template_loading_attempts`, except here we've extended it to support showing what :class:`UnchainedJinjaLoader` is doing. ### Response: def explain_template_loading_attempts(app, template, attempts): """ This should help developers understand what failed. Mostly the same as :func:`flask.debughelpers.explain_template_loading_attempts`, except here we've extended it to support showing what :class:`UnchainedJinjaLoader` is doing. """ from flask import Flask, Blueprint from flask.debughelpers import _dump_loader_info from flask.globals import _request_ctx_stack template, expected_priors = parse_template(template) info = [f'Locating {pretty_num(expected_priors + 1)} template "{template}":'] total_found = 0 blueprint = None reqctx = _request_ctx_stack.top if reqctx is not None and reqctx.request.blueprint is not None: blueprint = reqctx.request.blueprint for idx, (loader, srcobj, triple) in enumerate(attempts): if isinstance(srcobj, Flask): src_info = 'application "%s"' % srcobj.import_name elif isinstance(srcobj, Blueprint): src_info = 'blueprint "%s" (%s)' % (srcobj.name, srcobj.import_name) else: src_info = repr(srcobj) info.append('% 5d: trying loader of %s' % ( idx + 1, src_info)) for line in _dump_loader_info(loader): info.append(' %s' % line) if triple is None: detail = 'no match' else: if total_found < expected_priors: action = 'skipping' elif total_found == expected_priors: action = 'using' else: action = 'ignoring' detail = '%s (%r)' % (action, triple[1] or '<string>') total_found += 1 info.append(' -> %s' % detail) seems_fishy = False if total_found < expected_priors: info.append('Error: the template could not be found.') seems_fishy = True if blueprint is not None and seems_fishy: info.append(' The template was looked up from an endpoint that ' 'belongs to the blueprint "%s".' % blueprint) info.append(' Maybe you did not place a template in the right folder?') info.append(' See http://flask.pocoo.org/docs/blueprints/#templates') app.logger.info('\n'.join(info))
def eigenvectors_left_samples(self): r""" Samples of the left eigenvectors of the hidden transition matrix """ res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_left return res
r""" Samples of the left eigenvectors of the hidden transition matrix
Below is the the instruction that describes the task: ### Input: r""" Samples of the left eigenvectors of the hidden transition matrix ### Response: def eigenvectors_left_samples(self): r""" Samples of the left eigenvectors of the hidden transition matrix """ res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype) for i in range(self.nsamples): res[i, :, :] = self._sampled_hmms[i].eigenvectors_left return res
def name(self): # pylint: disable=no-self-use """ Name returns user's name or user's email or user_id :return: best guess of name to use to greet user """ if 'lis_person_sourcedid' in self.session: return self.session['lis_person_sourcedid'] elif 'lis_person_contact_email_primary' in self.session: return self.session['lis_person_contact_email_primary'] elif 'user_id' in self.session: return self.session['user_id'] else: return ''
Name returns user's name or user's email or user_id :return: best guess of name to use to greet user
Below is the the instruction that describes the task: ### Input: Name returns user's name or user's email or user_id :return: best guess of name to use to greet user ### Response: def name(self): # pylint: disable=no-self-use """ Name returns user's name or user's email or user_id :return: best guess of name to use to greet user """ if 'lis_person_sourcedid' in self.session: return self.session['lis_person_sourcedid'] elif 'lis_person_contact_email_primary' in self.session: return self.session['lis_person_contact_email_primary'] elif 'user_id' in self.session: return self.session['user_id'] else: return ''
def os_info(): """Returns os data. """ return { 'uname': dict(platform.uname()._asdict()), 'path': os.environ.get('PATH', '').split(':'), 'shell': os.environ.get('SHELL', '/bin/sh'), }
Returns os data.
Below is the the instruction that describes the task: ### Input: Returns os data. ### Response: def os_info(): """Returns os data. """ return { 'uname': dict(platform.uname()._asdict()), 'path': os.environ.get('PATH', '').split(':'), 'shell': os.environ.get('SHELL', '/bin/sh'), }
def safe_request(fct): ''' Return json messages instead of raising errors ''' def inner(*args, **kwargs): ''' decorator ''' try: _data = fct(*args, **kwargs) except requests.exceptions.ConnectionError as error: return {'error': str(error), 'status': 404} if _data.ok: if _data.content: safe_data = _data.json() else: safe_data = {'success': True} else: safe_data = {'error': _data.reason, 'status': _data.status_code} return safe_data return inner
Return json messages instead of raising errors
Below is the the instruction that describes the task: ### Input: Return json messages instead of raising errors ### Response: def safe_request(fct): ''' Return json messages instead of raising errors ''' def inner(*args, **kwargs): ''' decorator ''' try: _data = fct(*args, **kwargs) except requests.exceptions.ConnectionError as error: return {'error': str(error), 'status': 404} if _data.ok: if _data.content: safe_data = _data.json() else: safe_data = {'success': True} else: safe_data = {'error': _data.reason, 'status': _data.status_code} return safe_data return inner
def resize_dimension(self, dimension, size): """ Resize a dimension to a certain size. It will pad with the underlying HDF5 data sets' fill values (usually zero) where necessary. """ if self.dimensions[dimension] is not None: raise ValueError("Dimension '%s' is not unlimited and thus " "cannot be resized." % dimension) # Resize the dimension. self._current_dim_sizes[dimension] = size for var in self.variables.values(): new_shape = list(var.shape) for i, d in enumerate(var.dimensions): if d == dimension: new_shape[i] = size new_shape = tuple(new_shape) if new_shape != var.shape: var._h5ds.resize(new_shape) # Recurse as dimensions are visible to this group and all child groups. for i in self.groups.values(): i.resize_dimension(dimension, size)
Resize a dimension to a certain size. It will pad with the underlying HDF5 data sets' fill values (usually zero) where necessary.
Below is the the instruction that describes the task: ### Input: Resize a dimension to a certain size. It will pad with the underlying HDF5 data sets' fill values (usually zero) where necessary. ### Response: def resize_dimension(self, dimension, size): """ Resize a dimension to a certain size. It will pad with the underlying HDF5 data sets' fill values (usually zero) where necessary. """ if self.dimensions[dimension] is not None: raise ValueError("Dimension '%s' is not unlimited and thus " "cannot be resized." % dimension) # Resize the dimension. self._current_dim_sizes[dimension] = size for var in self.variables.values(): new_shape = list(var.shape) for i, d in enumerate(var.dimensions): if d == dimension: new_shape[i] = size new_shape = tuple(new_shape) if new_shape != var.shape: var._h5ds.resize(new_shape) # Recurse as dimensions are visible to this group and all child groups. for i in self.groups.values(): i.resize_dimension(dimension, size)
def render_to_response(self, context, indent=None): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context, indent=indent))
Returns a JSON response containing 'context' as payload
Below is the the instruction that describes the task: ### Input: Returns a JSON response containing 'context' as payload ### Response: def render_to_response(self, context, indent=None): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context, indent=indent))
def _undouble(self, word): """Undouble endings -kk, -dd, and -tt. Parameters ---------- word : str The word to stem Returns ------- str The word with doubled endings undoubled """ if ( len(word) > 1 and word[-1] == word[-2] and word[-1] in {'d', 'k', 't'} ): return word[:-1] return word
Undouble endings -kk, -dd, and -tt. Parameters ---------- word : str The word to stem Returns ------- str The word with doubled endings undoubled
Below is the the instruction that describes the task: ### Input: Undouble endings -kk, -dd, and -tt. Parameters ---------- word : str The word to stem Returns ------- str The word with doubled endings undoubled ### Response: def _undouble(self, word): """Undouble endings -kk, -dd, and -tt. Parameters ---------- word : str The word to stem Returns ------- str The word with doubled endings undoubled """ if ( len(word) > 1 and word[-1] == word[-2] and word[-1] in {'d', 'k', 't'} ): return word[:-1] return word
def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. :param gp: latent variable """ orig_shape = gp.shape gp = gp.flatten() #orig_shape = gp.shape gp = gp.flatten() Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp]) return Ysim.reshape(orig_shape)
Returns a set of samples of observations based on a given value of the latent variable. :param gp: latent variable
Below is the the instruction that describes the task: ### Input: Returns a set of samples of observations based on a given value of the latent variable. :param gp: latent variable ### Response: def samples(self, gp, Y_metadata=None): """ Returns a set of samples of observations based on a given value of the latent variable. :param gp: latent variable """ orig_shape = gp.shape gp = gp.flatten() #orig_shape = gp.shape gp = gp.flatten() Ysim = np.array([np.random.normal(self.gp_link.transf(gpj), scale=np.sqrt(self.variance), size=1) for gpj in gp]) return Ysim.reshape(orig_shape)
def range_by_score(self, min, max, num=None, withscores=False): """Return all the elements with score >= min and score <= max (a range query) from the sorted set.""" return self.client.zrangebyscore(self.name, min, max, num=num, withscores=withscores)
Return all the elements with score >= min and score <= max (a range query) from the sorted set.
Below is the the instruction that describes the task: ### Input: Return all the elements with score >= min and score <= max (a range query) from the sorted set. ### Response: def range_by_score(self, min, max, num=None, withscores=False): """Return all the elements with score >= min and score <= max (a range query) from the sorted set.""" return self.client.zrangebyscore(self.name, min, max, num=num, withscores=withscores)
def __get_attr(what, type_attr, value_attr, **kwargs): """ get the value of a parm :param what: string parm :param type_attr: type of parm :param value_attr: :param kwargs: :return: value of the parm """ if what in kwargs: value = int(kwargs[what]) if type_attr == 'int' else kwargs[what] if value in value_attr: return value
get the value of a parm :param what: string parm :param type_attr: type of parm :param value_attr: :param kwargs: :return: value of the parm
Below is the the instruction that describes the task: ### Input: get the value of a parm :param what: string parm :param type_attr: type of parm :param value_attr: :param kwargs: :return: value of the parm ### Response: def __get_attr(what, type_attr, value_attr, **kwargs): """ get the value of a parm :param what: string parm :param type_attr: type of parm :param value_attr: :param kwargs: :return: value of the parm """ if what in kwargs: value = int(kwargs[what]) if type_attr == 'int' else kwargs[what] if value in value_attr: return value
def message(self): ''' Override this to provide failure message''' name = self.__class__.__name__ return "{0} {1}".format(humanize(name), pp(*self.expectedArgs, **self.expectedKwArgs))
Override this to provide failure message
Below is the the instruction that describes the task: ### Input: Override this to provide failure message ### Response: def message(self): ''' Override this to provide failure message''' name = self.__class__.__name__ return "{0} {1}".format(humanize(name), pp(*self.expectedArgs, **self.expectedKwArgs))
def _convert_to_spmatrix(operators): """Convert an array-like object of operators to a sparse matrix.""" # Lazy import to improve `import odl` time import scipy.sparse # Convert ops to sparse representation. This is not trivial because # operators can be indexable themselves and give the wrong impression # of an extra dimension. So we have to infer the shape manually # first and extract the indices of nonzero positions. nrows = len(operators) ncols = None irow, icol, data = [], [], [] for i, row in enumerate(operators): try: iter(row) except TypeError: raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, got {!r} (row {} = {!r} is not iterable)' ''.format(operators, i, row)) if isinstance(row, Operator): raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, but row {} is an `Operator` {!r}' ''.format(i, row)) if ncols is None: ncols = len(row) elif len(row) != ncols: raise ValueError( 'all rows in `operators` must have the same length, but ' 'length {} of row {} differs from previous common length ' '{}'.format(len(row), i, ncols)) for j, col in enumerate(row): if col is None or col is 0: pass elif isinstance(col, Operator): irow.append(i) icol.append(j) data.append(col) else: raise ValueError( '`operators` must be a matrix of `Operator` objects, ' '`0` or `None`, got entry {!r} at ({}, {})' ''.format(col, i, j)) # Create object array explicitly, threby avoiding erroneous conversion # in `coo_matrix.__init__` data_arr = np.empty(len(data), dtype=object) data_arr[:] = data return scipy.sparse.coo_matrix((data_arr, (irow, icol)), shape=(nrows, ncols))
Convert an array-like object of operators to a sparse matrix.
Below is the the instruction that describes the task: ### Input: Convert an array-like object of operators to a sparse matrix. ### Response: def _convert_to_spmatrix(operators): """Convert an array-like object of operators to a sparse matrix.""" # Lazy import to improve `import odl` time import scipy.sparse # Convert ops to sparse representation. This is not trivial because # operators can be indexable themselves and give the wrong impression # of an extra dimension. So we have to infer the shape manually # first and extract the indices of nonzero positions. nrows = len(operators) ncols = None irow, icol, data = [], [], [] for i, row in enumerate(operators): try: iter(row) except TypeError: raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, got {!r} (row {} = {!r} is not iterable)' ''.format(operators, i, row)) if isinstance(row, Operator): raise ValueError( '`operators` must be a matrix of `Operator` objects, `0` ' 'or `None`, but row {} is an `Operator` {!r}' ''.format(i, row)) if ncols is None: ncols = len(row) elif len(row) != ncols: raise ValueError( 'all rows in `operators` must have the same length, but ' 'length {} of row {} differs from previous common length ' '{}'.format(len(row), i, ncols)) for j, col in enumerate(row): if col is None or col is 0: pass elif isinstance(col, Operator): irow.append(i) icol.append(j) data.append(col) else: raise ValueError( '`operators` must be a matrix of `Operator` objects, ' '`0` or `None`, got entry {!r} at ({}, {})' ''.format(col, i, j)) # Create object array explicitly, threby avoiding erroneous conversion # in `coo_matrix.__init__` data_arr = np.empty(len(data), dtype=object) data_arr[:] = data return scipy.sparse.coo_matrix((data_arr, (irow, icol)), shape=(nrows, ncols))
def HWProcess(cls, proc, ctx): """ Serialize HWProcess instance :param scope: name scope to prevent name collisions """ body = proc.statements childCtx = ctx.withIndent() statemets = [cls.asHdl(s, childCtx) for s in body] proc.name = ctx.scope.checkedName(proc.name, proc) return cls.methodTmpl.render( indent=getIndent(ctx.indent), name=proc.name, statements=statemets )
Serialize HWProcess instance :param scope: name scope to prevent name collisions
Below is the the instruction that describes the task: ### Input: Serialize HWProcess instance :param scope: name scope to prevent name collisions ### Response: def HWProcess(cls, proc, ctx): """ Serialize HWProcess instance :param scope: name scope to prevent name collisions """ body = proc.statements childCtx = ctx.withIndent() statemets = [cls.asHdl(s, childCtx) for s in body] proc.name = ctx.scope.checkedName(proc.name, proc) return cls.methodTmpl.render( indent=getIndent(ctx.indent), name=proc.name, statements=statemets )
def get_account_from_name(self, name): """ Returns the account with the given name. :type name: string :param name: The name of the account. """ for account in self.accounts: if account.get_name() == name: return account return None
Returns the account with the given name. :type name: string :param name: The name of the account.
Below is the the instruction that describes the task: ### Input: Returns the account with the given name. :type name: string :param name: The name of the account. ### Response: def get_account_from_name(self, name): """ Returns the account with the given name. :type name: string :param name: The name of the account. """ for account in self.accounts: if account.get_name() == name: return account return None