code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def bot_intent(self) -> "IntentAPI": """ Get the intent API for the appservice bot. Returns: The IntentAPI for the appservice bot. """ if not self._bot_intent: self._bot_intent = IntentAPI(self.bot_mxid, self, state_store=self.state_store, log=self.intent_log) return self._bot_intent
Get the intent API for the appservice bot. Returns: The IntentAPI for the appservice bot.
Below is the the instruction that describes the task: ### Input: Get the intent API for the appservice bot. Returns: The IntentAPI for the appservice bot. ### Response: def bot_intent(self) -> "IntentAPI": """ Get the intent API for the appservice bot. Returns: The IntentAPI for the appservice bot. """ if not self._bot_intent: self._bot_intent = IntentAPI(self.bot_mxid, self, state_store=self.state_store, log=self.intent_log) return self._bot_intent
def get_posterior_mean_ratio_scores_vs_background(self): ''' Returns ------- pd.DataFrame of posterior mean scores vs background ''' df = self.get_term_and_background_counts() df['Log Posterior Mean Ratio'] = self._get_posterior_mean_ratio_from_counts(df['corpus'], df['background']) return df.sort_values('Log Posterior Mean Ratio', ascending=False)
Returns ------- pd.DataFrame of posterior mean scores vs background
Below is the the instruction that describes the task: ### Input: Returns ------- pd.DataFrame of posterior mean scores vs background ### Response: def get_posterior_mean_ratio_scores_vs_background(self): ''' Returns ------- pd.DataFrame of posterior mean scores vs background ''' df = self.get_term_and_background_counts() df['Log Posterior Mean Ratio'] = self._get_posterior_mean_ratio_from_counts(df['corpus'], df['background']) return df.sort_values('Log Posterior Mean Ratio', ascending=False)
def get(self, request, response): """Processes a `GET` request.""" # Ensure we're allowed to read the resource. self.assert_operations('read') # Delegate to `read` to retrieve the items. items = self.read() # if self.slug is not None and not items: # # Requested a specific resource but nothing is returned. # # Attempt to resolve by changing what we understand as # # a slug to a path. # self.path = self.path + self.slug if self.path else self.slug # self.slug = None # # Attempt to retreive the resource again. # items = self.read() # Ensure that if we have a slug and still no items that a 404 # is rasied appropriately. if not items: raise http.exceptions.NotFound() if (isinstance(items, Iterable) and not isinstance(items, six.string_types)) and items: # Paginate over the collection. items = pagination.paginate(self.request, self.response, items) # Build the response object. self.make_response(items)
Processes a `GET` request.
Below is the the instruction that describes the task: ### Input: Processes a `GET` request. ### Response: def get(self, request, response): """Processes a `GET` request.""" # Ensure we're allowed to read the resource. self.assert_operations('read') # Delegate to `read` to retrieve the items. items = self.read() # if self.slug is not None and not items: # # Requested a specific resource but nothing is returned. # # Attempt to resolve by changing what we understand as # # a slug to a path. # self.path = self.path + self.slug if self.path else self.slug # self.slug = None # # Attempt to retreive the resource again. # items = self.read() # Ensure that if we have a slug and still no items that a 404 # is rasied appropriately. if not items: raise http.exceptions.NotFound() if (isinstance(items, Iterable) and not isinstance(items, six.string_types)) and items: # Paginate over the collection. items = pagination.paginate(self.request, self.response, items) # Build the response object. self.make_response(items)
def add(self, action): """Add an action to the execution queue.""" self._state_machine.transition_to_add() self._actions.append(action)
Add an action to the execution queue.
Below is the the instruction that describes the task: ### Input: Add an action to the execution queue. ### Response: def add(self, action): """Add an action to the execution queue.""" self._state_machine.transition_to_add() self._actions.append(action)
def logprint(logname, category, level='INFO', backupCount=15): """ Print logs by datetime. logname string, file name category string, category path of logs file in log directory level string, restrict whether logs to be printed or not backupCount int, how many backups can be reserved """ path = os.path.join(CFG.dir, category.strip('/'), logname.strip('/') + '.log') print "log path:", path if not os.path.exists(path[:path.rindex('/')]): os.makedirs(path[:path.rindex('/')]) # Initialize logger logger = logging.getLogger(logname) frt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') hdr = logging.StreamHandler(sys.stdout) hdr.setFormatter(frt) hdr._name = '##_sh_##' if not hdr._name in CFG.exist: logger.addHandler(hdr) CFG.exist.append(hdr._name) hdr = TimedRotatingFileHandler(path, 'D', 1, backupCount) hdr.setFormatter(frt) hdr._name = '##_rfh_##' if not hdr._name in CFG.exist: logger.addHandler(hdr) CFG.exist.append(hdr._name) if level.upper() == 'NOTEST': level == logging.NOTSET elif level.upper() == 'DEBUG': level == logging.DEBUG elif level.upper() == 'WARNING': level == logging.WARNING elif level.upper() == 'ERROR': level == logging.ERROR elif level.upper() == 'CRITICAL': level == logging.CRITICAL else: level == logging.INFO logger.setLevel(level) def _wraper(*args, **kwargs): if not CFG.debug: return if not args: return for hdr in CFG.handlers: if not hdr._name in CFG.exist: logger.addHandler(hdr) CFG.exist.append(hdr._name) encoding = 'utf8' if os.name == 'posix' else 'gbk' args = [_cu(a, encoding) for a in args] prefix = '' pl = kwargs.get('printlevel', 'info').upper() if pl == 'DEBUG': try: logger.debug(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) elif pl == 'WARNING': try: logger.warning(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) elif pl == 'ERROR': try: logger.error(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) elif pl == 'CRITICAL': try: logger.critical(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) else: try: logger.info(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) return _wraper, logger
Print logs by datetime. logname string, file name category string, category path of logs file in log directory level string, restrict whether logs to be printed or not backupCount int, how many backups can be reserved
Below is the the instruction that describes the task: ### Input: Print logs by datetime. logname string, file name category string, category path of logs file in log directory level string, restrict whether logs to be printed or not backupCount int, how many backups can be reserved ### Response: def logprint(logname, category, level='INFO', backupCount=15): """ Print logs by datetime. logname string, file name category string, category path of logs file in log directory level string, restrict whether logs to be printed or not backupCount int, how many backups can be reserved """ path = os.path.join(CFG.dir, category.strip('/'), logname.strip('/') + '.log') print "log path:", path if not os.path.exists(path[:path.rindex('/')]): os.makedirs(path[:path.rindex('/')]) # Initialize logger logger = logging.getLogger(logname) frt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') hdr = logging.StreamHandler(sys.stdout) hdr.setFormatter(frt) hdr._name = '##_sh_##' if not hdr._name in CFG.exist: logger.addHandler(hdr) CFG.exist.append(hdr._name) hdr = TimedRotatingFileHandler(path, 'D', 1, backupCount) hdr.setFormatter(frt) hdr._name = '##_rfh_##' if not hdr._name in CFG.exist: logger.addHandler(hdr) CFG.exist.append(hdr._name) if level.upper() == 'NOTEST': level == logging.NOTSET elif level.upper() == 'DEBUG': level == logging.DEBUG elif level.upper() == 'WARNING': level == logging.WARNING elif level.upper() == 'ERROR': level == logging.ERROR elif level.upper() == 'CRITICAL': level == logging.CRITICAL else: level == logging.INFO logger.setLevel(level) def _wraper(*args, **kwargs): if not CFG.debug: return if not args: return for hdr in CFG.handlers: if not hdr._name in CFG.exist: logger.addHandler(hdr) CFG.exist.append(hdr._name) encoding = 'utf8' if os.name == 'posix' else 'gbk' args = [_cu(a, encoding) for a in args] prefix = '' pl = kwargs.get('printlevel', 'info').upper() if pl == 'DEBUG': try: logger.debug(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) elif pl == 'WARNING': try: logger.warning(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) elif pl == 'ERROR': try: logger.error(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) elif pl == 'CRITICAL': try: logger.critical(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) else: try: logger.info(*args, **kwargs) except: t, v, b = sys.exc_info() err_messages = traceback.format_exception(t, v, b) print 'Error: %s' % ','.join(err_messages) return _wraper, logger
def create(self, attributes=values.unset, twilio_address=values.unset, date_created=values.unset, date_updated=values.unset, identity=values.unset, user_address=values.unset): """ Create a new ParticipantInstance :param unicode attributes: An optional string metadata field you can use to store any data you wish. :param unicode twilio_address: The address of the Twilio phone number that the participant is in contact with. :param datetime date_created: The date that this resource was created. :param datetime date_updated: The date that this resource was last updated. :param unicode identity: A unique string identifier for the session participant as Chat User. :param unicode user_address: The address of the participant's device. :returns: Newly created ParticipantInstance :rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance """ data = values.of({ 'Identity': identity, 'UserAddress': user_address, 'Attributes': attributes, 'TwilioAddress': twilio_address, 'DateCreated': serialize.iso8601_datetime(date_created), 'DateUpdated': serialize.iso8601_datetime(date_updated), }) payload = self._version.create( 'POST', self._uri, data=data, ) return ParticipantInstance(self._version, payload, session_sid=self._solution['session_sid'], )
Create a new ParticipantInstance :param unicode attributes: An optional string metadata field you can use to store any data you wish. :param unicode twilio_address: The address of the Twilio phone number that the participant is in contact with. :param datetime date_created: The date that this resource was created. :param datetime date_updated: The date that this resource was last updated. :param unicode identity: A unique string identifier for the session participant as Chat User. :param unicode user_address: The address of the participant's device. :returns: Newly created ParticipantInstance :rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
Below is the the instruction that describes the task: ### Input: Create a new ParticipantInstance :param unicode attributes: An optional string metadata field you can use to store any data you wish. :param unicode twilio_address: The address of the Twilio phone number that the participant is in contact with. :param datetime date_created: The date that this resource was created. :param datetime date_updated: The date that this resource was last updated. :param unicode identity: A unique string identifier for the session participant as Chat User. :param unicode user_address: The address of the participant's device. :returns: Newly created ParticipantInstance :rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance ### Response: def create(self, attributes=values.unset, twilio_address=values.unset, date_created=values.unset, date_updated=values.unset, identity=values.unset, user_address=values.unset): """ Create a new ParticipantInstance :param unicode attributes: An optional string metadata field you can use to store any data you wish. :param unicode twilio_address: The address of the Twilio phone number that the participant is in contact with. :param datetime date_created: The date that this resource was created. :param datetime date_updated: The date that this resource was last updated. :param unicode identity: A unique string identifier for the session participant as Chat User. :param unicode user_address: The address of the participant's device. :returns: Newly created ParticipantInstance :rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance """ data = values.of({ 'Identity': identity, 'UserAddress': user_address, 'Attributes': attributes, 'TwilioAddress': twilio_address, 'DateCreated': serialize.iso8601_datetime(date_created), 'DateUpdated': serialize.iso8601_datetime(date_updated), }) payload = self._version.create( 'POST', self._uri, data=data, ) return ParticipantInstance(self._version, payload, session_sid=self._solution['session_sid'], )
def edit(self, entity, id, payload, sync=True): """ Edit a document. """ url = urljoin(self.host, entity.value + '/') url = urljoin(url, id + '/') params = {'sync': str(sync).lower()} url = Utils.add_url_parameters(url, params) r = requests.put(url, auth=self.auth, data=json.dumps(payload), headers=self.headers) if r.status_code == 500: error_message = r.json()['error_message'] raise CoredataError('Error! {error}'.format(error=error_message))
Edit a document.
Below is the the instruction that describes the task: ### Input: Edit a document. ### Response: def edit(self, entity, id, payload, sync=True): """ Edit a document. """ url = urljoin(self.host, entity.value + '/') url = urljoin(url, id + '/') params = {'sync': str(sync).lower()} url = Utils.add_url_parameters(url, params) r = requests.put(url, auth=self.auth, data=json.dumps(payload), headers=self.headers) if r.status_code == 500: error_message = r.json()['error_message'] raise CoredataError('Error! {error}'.format(error=error_message))
def QCapsulate(self, widget, name, blocking = False, nude = False): """Helper function that encapsulates QWidget into a QMainWindow """ class QuickWindow(QtWidgets.QMainWindow): class Signals(QtCore.QObject): close = QtCore.Signal() show = QtCore.Signal() def __init__(self, blocking = False, parent = None, nude = False): super().__init__(parent) self.propagate = True # send signals or not self.setStyleSheet(style.main_gui) if (blocking): self.setWindowModality(QtCore.Qt.ApplicationModal) if (nude): # http://doc.qt.io/qt-5/qt.html#WindowType-enum # TODO: create a widget for a proper splashscreen (omitting X11 and centering manually) # self.setWindowFlags(QtCore.Qt.Popup) # Qt 5.9+ : setFlags() # self.setWindowFlags(QtCore.Qt.SplashScreen | QtCore.Qt.WindowStaysOnTopHint) self.setWindowFlags(QtCore.Qt.Dialog) self.signals = self.Signals() def closeEvent(self, e): if (self.propagate): self.signals.close.emit() e.accept() def showEvent(self, e): if (self.propagate): self.signals.show.emit() e.accept() def setPropagate(self): self.propagate = True def unSetPropagate(self): self.propagate = False win = QuickWindow(blocking = blocking, nude = nude) win.setCentralWidget(widget) win.setLayout(QtWidgets.QHBoxLayout()) win.setWindowTitle(name) return win
Helper function that encapsulates QWidget into a QMainWindow
Below is the the instruction that describes the task: ### Input: Helper function that encapsulates QWidget into a QMainWindow ### Response: def QCapsulate(self, widget, name, blocking = False, nude = False): """Helper function that encapsulates QWidget into a QMainWindow """ class QuickWindow(QtWidgets.QMainWindow): class Signals(QtCore.QObject): close = QtCore.Signal() show = QtCore.Signal() def __init__(self, blocking = False, parent = None, nude = False): super().__init__(parent) self.propagate = True # send signals or not self.setStyleSheet(style.main_gui) if (blocking): self.setWindowModality(QtCore.Qt.ApplicationModal) if (nude): # http://doc.qt.io/qt-5/qt.html#WindowType-enum # TODO: create a widget for a proper splashscreen (omitting X11 and centering manually) # self.setWindowFlags(QtCore.Qt.Popup) # Qt 5.9+ : setFlags() # self.setWindowFlags(QtCore.Qt.SplashScreen | QtCore.Qt.WindowStaysOnTopHint) self.setWindowFlags(QtCore.Qt.Dialog) self.signals = self.Signals() def closeEvent(self, e): if (self.propagate): self.signals.close.emit() e.accept() def showEvent(self, e): if (self.propagate): self.signals.show.emit() e.accept() def setPropagate(self): self.propagate = True def unSetPropagate(self): self.propagate = False win = QuickWindow(blocking = blocking, nude = nude) win.setCentralWidget(widget) win.setLayout(QtWidgets.QHBoxLayout()) win.setWindowTitle(name) return win
def get_if_deleted(self, addresses): """Returns a list of addresses that have been deleted, or None if it hasn't been deleted. Args: addresses (list of str): The addresses to check if deleted. Returns: (list of str): The addresses, if deleted, or None. """ with self._lock: results = [] for add in addresses: results.append(self._get_if_deleted(add)) return results
Returns a list of addresses that have been deleted, or None if it hasn't been deleted. Args: addresses (list of str): The addresses to check if deleted. Returns: (list of str): The addresses, if deleted, or None.
Below is the the instruction that describes the task: ### Input: Returns a list of addresses that have been deleted, or None if it hasn't been deleted. Args: addresses (list of str): The addresses to check if deleted. Returns: (list of str): The addresses, if deleted, or None. ### Response: def get_if_deleted(self, addresses): """Returns a list of addresses that have been deleted, or None if it hasn't been deleted. Args: addresses (list of str): The addresses to check if deleted. Returns: (list of str): The addresses, if deleted, or None. """ with self._lock: results = [] for add in addresses: results.append(self._get_if_deleted(add)) return results
def _filter_schema(schema, schema_tables, exclude_table_columns): """Filters a schema to only include the specified tables in the schema_tables parameter. This will also filter out any colums for included tables that reference tables that are not included in the schema_tables parameter :param schema: Schema dict to be filtered :param schema_tables: List of table names to filter on. EX: ['Bridge', 'Controller', 'Interface'] NOTE: This list is case sensitive. :return: Schema dict: filtered if the schema_table parameter contains table names, else the original schema dict """ tables = {} for tbl_name, tbl_data in schema['tables'].items(): if not schema_tables or tbl_name in schema_tables: columns = {} exclude_columns = exclude_table_columns.get(tbl_name, []) for col_name, col_data in tbl_data['columns'].items(): if col_name in exclude_columns: continue # NOTE(Alan Quillin) Needs to check and remove # and columns that have references to tables that # are not to be configured type_ = col_data.get('type') if type_: if type_ and isinstance(type_, dict): key = type_.get('key') if key and isinstance(key, dict): ref_tbl = key.get('refTable') if ref_tbl and isinstance(ref_tbl, six.string_types): if ref_tbl not in schema_tables: continue value = type_.get('value') if value and isinstance(value, dict): ref_tbl = value.get('refTable') if ref_tbl and isinstance(ref_tbl, six.string_types): if ref_tbl not in schema_tables: continue columns[col_name] = col_data tbl_data['columns'] = columns tables[tbl_name] = tbl_data schema['tables'] = tables return schema
Filters a schema to only include the specified tables in the schema_tables parameter. This will also filter out any colums for included tables that reference tables that are not included in the schema_tables parameter :param schema: Schema dict to be filtered :param schema_tables: List of table names to filter on. EX: ['Bridge', 'Controller', 'Interface'] NOTE: This list is case sensitive. :return: Schema dict: filtered if the schema_table parameter contains table names, else the original schema dict
Below is the the instruction that describes the task: ### Input: Filters a schema to only include the specified tables in the schema_tables parameter. This will also filter out any colums for included tables that reference tables that are not included in the schema_tables parameter :param schema: Schema dict to be filtered :param schema_tables: List of table names to filter on. EX: ['Bridge', 'Controller', 'Interface'] NOTE: This list is case sensitive. :return: Schema dict: filtered if the schema_table parameter contains table names, else the original schema dict ### Response: def _filter_schema(schema, schema_tables, exclude_table_columns): """Filters a schema to only include the specified tables in the schema_tables parameter. This will also filter out any colums for included tables that reference tables that are not included in the schema_tables parameter :param schema: Schema dict to be filtered :param schema_tables: List of table names to filter on. EX: ['Bridge', 'Controller', 'Interface'] NOTE: This list is case sensitive. :return: Schema dict: filtered if the schema_table parameter contains table names, else the original schema dict """ tables = {} for tbl_name, tbl_data in schema['tables'].items(): if not schema_tables or tbl_name in schema_tables: columns = {} exclude_columns = exclude_table_columns.get(tbl_name, []) for col_name, col_data in tbl_data['columns'].items(): if col_name in exclude_columns: continue # NOTE(Alan Quillin) Needs to check and remove # and columns that have references to tables that # are not to be configured type_ = col_data.get('type') if type_: if type_ and isinstance(type_, dict): key = type_.get('key') if key and isinstance(key, dict): ref_tbl = key.get('refTable') if ref_tbl and isinstance(ref_tbl, six.string_types): if ref_tbl not in schema_tables: continue value = type_.get('value') if value and isinstance(value, dict): ref_tbl = value.get('refTable') if ref_tbl and isinstance(ref_tbl, six.string_types): if ref_tbl not in schema_tables: continue columns[col_name] = col_data tbl_data['columns'] = columns tables[tbl_name] = tbl_data schema['tables'] = tables return schema
def hydrate_spawned_files(self, exported_files_mapper, filename, data_id): """Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict """ # JSON only has string dictionary keys, so the Data object id # needs to be stringified first. data_id = str(data_id) if filename not in exported_files_mapper[data_id]: raise KeyError("Use 're-export' to prepare the file for spawned process: {}".format(filename)) export_fn = exported_files_mapper[data_id].pop(filename) if exported_files_mapper[data_id] == {}: exported_files_mapper.pop(data_id) return {'file_temp': export_fn, 'file': filename}
Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict
Below is the the instruction that describes the task: ### Input: Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict ### Response: def hydrate_spawned_files(self, exported_files_mapper, filename, data_id): """Pop the given file's map from the exported files mapping. :param exported_files_mapper: The dict of file mappings this process produced. :param filename: The filename to format and remove from the mapping. :param data_id: The id of the :meth:`~resolwe.flow.models.Data` object owning the mapping. :return: The formatted mapping between the filename and temporary file path. :rtype: dict """ # JSON only has string dictionary keys, so the Data object id # needs to be stringified first. data_id = str(data_id) if filename not in exported_files_mapper[data_id]: raise KeyError("Use 're-export' to prepare the file for spawned process: {}".format(filename)) export_fn = exported_files_mapper[data_id].pop(filename) if exported_files_mapper[data_id] == {}: exported_files_mapper.pop(data_id) return {'file_temp': export_fn, 'file': filename}
def run_nested(self, maxiter=None, maxcall=None, dlogz=None, logl_max=np.inf, add_live=True, print_progress=True, print_func=None, save_bounds=True): """ **A wrapper that executes the main nested sampling loop.** Iteratively replace the worst live point with a sample drawn uniformly from the prior until the provided stopping criteria are reached. Parameters ---------- maxiter : int, optional Maximum number of iterations. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations. Iteration may stop earlier if termination condition is reached. Default is `sys.maxsize` (no limit). dlogz : float, optional Iteration will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. If `add_live` is `True`, the default is `1e-3 * (nlive - 1) + 0.01`. Otherwise, the default is `0.01`. logl_max : float, optional Iteration will stop when the sampled ln(likelihood) exceeds the threshold set by `logl_max`. Default is no bound (`np.inf`). add_live : bool, optional Whether or not to add the remaining set of live points to the list of samples at the end of each run. Default is `True`. print_progress : bool, optional Whether or not to output a simple summary of the current run that updates with each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. save_bounds : bool, optional Whether or not to save past bounding distributions used to bound the live points internally. Default is *True*. """ # Initialize quantities/ if print_func is None: print_func = print_fn # Define our stopping criteria. if dlogz is None: if add_live: dlogz = 1e-3 * (self.nlive - 1.) + 0.01 else: dlogz = 0.01 # Run the main nested sampling loop. ncall = self.ncall for it, results in enumerate(self.sample(maxiter=maxiter, maxcall=maxcall, dlogz=dlogz, logl_max=logl_max, save_bounds=save_bounds, save_samples=True)): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results ncall += nc if delta_logz > 1e6: delta_logz = np.inf if logz <= -1e6: logz = -np.inf # Print progress. if print_progress: i = self.it - 1 print_func(results, i, ncall, dlogz=dlogz, logl_max=logl_max) # Add remaining live points to samples. if add_live: it = self.it - 1 for i, results in enumerate(self.add_live_points()): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results if delta_logz > 1e6: delta_logz = np.inf if logz <= -1e6: logz = -np.inf # Print progress. if print_progress: print_func(results, it, ncall, add_live_it=i+1, dlogz=dlogz, logl_max=logl_max)
**A wrapper that executes the main nested sampling loop.** Iteratively replace the worst live point with a sample drawn uniformly from the prior until the provided stopping criteria are reached. Parameters ---------- maxiter : int, optional Maximum number of iterations. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations. Iteration may stop earlier if termination condition is reached. Default is `sys.maxsize` (no limit). dlogz : float, optional Iteration will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. If `add_live` is `True`, the default is `1e-3 * (nlive - 1) + 0.01`. Otherwise, the default is `0.01`. logl_max : float, optional Iteration will stop when the sampled ln(likelihood) exceeds the threshold set by `logl_max`. Default is no bound (`np.inf`). add_live : bool, optional Whether or not to add the remaining set of live points to the list of samples at the end of each run. Default is `True`. print_progress : bool, optional Whether or not to output a simple summary of the current run that updates with each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. save_bounds : bool, optional Whether or not to save past bounding distributions used to bound the live points internally. Default is *True*.
Below is the the instruction that describes the task: ### Input: **A wrapper that executes the main nested sampling loop.** Iteratively replace the worst live point with a sample drawn uniformly from the prior until the provided stopping criteria are reached. Parameters ---------- maxiter : int, optional Maximum number of iterations. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations. Iteration may stop earlier if termination condition is reached. Default is `sys.maxsize` (no limit). dlogz : float, optional Iteration will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. If `add_live` is `True`, the default is `1e-3 * (nlive - 1) + 0.01`. Otherwise, the default is `0.01`. logl_max : float, optional Iteration will stop when the sampled ln(likelihood) exceeds the threshold set by `logl_max`. Default is no bound (`np.inf`). add_live : bool, optional Whether or not to add the remaining set of live points to the list of samples at the end of each run. Default is `True`. print_progress : bool, optional Whether or not to output a simple summary of the current run that updates with each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. save_bounds : bool, optional Whether or not to save past bounding distributions used to bound the live points internally. Default is *True*. ### Response: def run_nested(self, maxiter=None, maxcall=None, dlogz=None, logl_max=np.inf, add_live=True, print_progress=True, print_func=None, save_bounds=True): """ **A wrapper that executes the main nested sampling loop.** Iteratively replace the worst live point with a sample drawn uniformly from the prior until the provided stopping criteria are reached. Parameters ---------- maxiter : int, optional Maximum number of iterations. Iteration may stop earlier if the termination condition is reached. Default is `sys.maxsize` (no limit). maxcall : int, optional Maximum number of likelihood evaluations. Iteration may stop earlier if termination condition is reached. Default is `sys.maxsize` (no limit). dlogz : float, optional Iteration will stop when the estimated contribution of the remaining prior volume to the total evidence falls below this threshold. Explicitly, the stopping criterion is `ln(z + z_est) - ln(z) < dlogz`, where `z` is the current evidence from all saved samples and `z_est` is the estimated contribution from the remaining volume. If `add_live` is `True`, the default is `1e-3 * (nlive - 1) + 0.01`. Otherwise, the default is `0.01`. logl_max : float, optional Iteration will stop when the sampled ln(likelihood) exceeds the threshold set by `logl_max`. Default is no bound (`np.inf`). add_live : bool, optional Whether or not to add the remaining set of live points to the list of samples at the end of each run. Default is `True`. print_progress : bool, optional Whether or not to output a simple summary of the current run that updates with each iteration. Default is `True`. print_func : function, optional A function that prints out the current state of the sampler. If not provided, the default :meth:`results.print_fn` is used. save_bounds : bool, optional Whether or not to save past bounding distributions used to bound the live points internally. Default is *True*. """ # Initialize quantities/ if print_func is None: print_func = print_fn # Define our stopping criteria. if dlogz is None: if add_live: dlogz = 1e-3 * (self.nlive - 1.) + 0.01 else: dlogz = 0.01 # Run the main nested sampling loop. ncall = self.ncall for it, results in enumerate(self.sample(maxiter=maxiter, maxcall=maxcall, dlogz=dlogz, logl_max=logl_max, save_bounds=save_bounds, save_samples=True)): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results ncall += nc if delta_logz > 1e6: delta_logz = np.inf if logz <= -1e6: logz = -np.inf # Print progress. if print_progress: i = self.it - 1 print_func(results, i, ncall, dlogz=dlogz, logl_max=logl_max) # Add remaining live points to samples. if add_live: it = self.it - 1 for i, results in enumerate(self.add_live_points()): (worst, ustar, vstar, loglstar, logvol, logwt, logz, logzvar, h, nc, worst_it, boundidx, bounditer, eff, delta_logz) = results if delta_logz > 1e6: delta_logz = np.inf if logz <= -1e6: logz = -np.inf # Print progress. if print_progress: print_func(results, it, ncall, add_live_it=i+1, dlogz=dlogz, logl_max=logl_max)
def can_afford(self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool=True) -> "CanAffordWrapper": """Tests if the player has enough resources to build a unit or cast an ability.""" enough_supply = True if isinstance(item_id, UnitTypeId): unit = self._game_data.units[item_id.value] cost = self._game_data.calculate_ability_cost(unit.creation_ability) if check_supply_cost: enough_supply = self.can_feed(item_id) elif isinstance(item_id, UpgradeId): cost = self._game_data.upgrades[item_id.value].cost else: cost = self._game_data.calculate_ability_cost(item_id) return CanAffordWrapper(cost.minerals <= self.minerals, cost.vespene <= self.vespene, enough_supply)
Tests if the player has enough resources to build a unit or cast an ability.
Below is the the instruction that describes the task: ### Input: Tests if the player has enough resources to build a unit or cast an ability. ### Response: def can_afford(self, item_id: Union[UnitTypeId, UpgradeId, AbilityId], check_supply_cost: bool=True) -> "CanAffordWrapper": """Tests if the player has enough resources to build a unit or cast an ability.""" enough_supply = True if isinstance(item_id, UnitTypeId): unit = self._game_data.units[item_id.value] cost = self._game_data.calculate_ability_cost(unit.creation_ability) if check_supply_cost: enough_supply = self.can_feed(item_id) elif isinstance(item_id, UpgradeId): cost = self._game_data.upgrades[item_id.value].cost else: cost = self._game_data.calculate_ability_cost(item_id) return CanAffordWrapper(cost.minerals <= self.minerals, cost.vespene <= self.vespene, enough_supply)
def get_all(self, key, fallback=None): """returns all header values for given key""" if key in self.headers: value = self.headers[key] else: value = fallback or [] return value
returns all header values for given key
Below is the the instruction that describes the task: ### Input: returns all header values for given key ### Response: def get_all(self, key, fallback=None): """returns all header values for given key""" if key in self.headers: value = self.headers[key] else: value = fallback or [] return value
def format_job_instance(job): ''' Format the job instance correctly ''' ret = {'Function': job.get('fun', 'unknown-function'), 'Arguments': list(job.get('arg', [])), # unlikely but safeguard from invalid returns 'Target': job.get('tgt', 'unknown-target'), 'Target-type': job.get('tgt_type', 'list'), 'User': job.get('user', 'root')} if 'metadata' in job: ret['Metadata'] = job.get('metadata', {}) else: if 'kwargs' in job: if 'metadata' in job['kwargs']: ret['Metadata'] = job['kwargs'].get('metadata', {}) return ret
Format the job instance correctly
Below is the the instruction that describes the task: ### Input: Format the job instance correctly ### Response: def format_job_instance(job): ''' Format the job instance correctly ''' ret = {'Function': job.get('fun', 'unknown-function'), 'Arguments': list(job.get('arg', [])), # unlikely but safeguard from invalid returns 'Target': job.get('tgt', 'unknown-target'), 'Target-type': job.get('tgt_type', 'list'), 'User': job.get('user', 'root')} if 'metadata' in job: ret['Metadata'] = job.get('metadata', {}) else: if 'kwargs' in job: if 'metadata' in job['kwargs']: ret['Metadata'] = job['kwargs'].get('metadata', {}) return ret
def read_gtfs(path: Path, dist_units: str) -> "Feed": """ Create a Feed instance from the given path and given distance units. The path should be a directory containing GTFS text files or a zip file that unzips as a collection of GTFS text files (and not as a directory containing GTFS text files). The distance units given must lie in :const:`constants.dist_units` Notes ----- - Ignore non-GTFS files - Automatically strip whitespace from the column names in GTFS files """ path = Path(path) if not path.exists(): raise ValueError(f"Path {path} does not exist") # Unzip path to temporary directory if necessary if path.is_file(): zipped = True tmp_dir = tempfile.TemporaryDirectory() src_path = Path(tmp_dir.name) shutil.unpack_archive(str(path), tmp_dir.name, "zip") else: zipped = False src_path = path # Read files into feed dictionary of DataFrames feed_dict = {table: None for table in cs.GTFS_REF["table"]} for p in src_path.iterdir(): table = p.stem # Skip empty files, irrelevant files, and files with no data if p.is_file() and p.stat().st_size and table in feed_dict: # utf-8-sig gets rid of the byte order mark (BOM); # see http://stackoverflow.com/questions/17912307/u-ufeff-in-python-string df = pd.read_csv(p, dtype=cs.DTYPE, encoding="utf-8-sig") if not df.empty: feed_dict[table] = cn.clean_column_names(df) feed_dict["dist_units"] = dist_units # Delete temporary directory if zipped: tmp_dir.cleanup() # Create feed return Feed(**feed_dict)
Create a Feed instance from the given path and given distance units. The path should be a directory containing GTFS text files or a zip file that unzips as a collection of GTFS text files (and not as a directory containing GTFS text files). The distance units given must lie in :const:`constants.dist_units` Notes ----- - Ignore non-GTFS files - Automatically strip whitespace from the column names in GTFS files
Below is the the instruction that describes the task: ### Input: Create a Feed instance from the given path and given distance units. The path should be a directory containing GTFS text files or a zip file that unzips as a collection of GTFS text files (and not as a directory containing GTFS text files). The distance units given must lie in :const:`constants.dist_units` Notes ----- - Ignore non-GTFS files - Automatically strip whitespace from the column names in GTFS files ### Response: def read_gtfs(path: Path, dist_units: str) -> "Feed": """ Create a Feed instance from the given path and given distance units. The path should be a directory containing GTFS text files or a zip file that unzips as a collection of GTFS text files (and not as a directory containing GTFS text files). The distance units given must lie in :const:`constants.dist_units` Notes ----- - Ignore non-GTFS files - Automatically strip whitespace from the column names in GTFS files """ path = Path(path) if not path.exists(): raise ValueError(f"Path {path} does not exist") # Unzip path to temporary directory if necessary if path.is_file(): zipped = True tmp_dir = tempfile.TemporaryDirectory() src_path = Path(tmp_dir.name) shutil.unpack_archive(str(path), tmp_dir.name, "zip") else: zipped = False src_path = path # Read files into feed dictionary of DataFrames feed_dict = {table: None for table in cs.GTFS_REF["table"]} for p in src_path.iterdir(): table = p.stem # Skip empty files, irrelevant files, and files with no data if p.is_file() and p.stat().st_size and table in feed_dict: # utf-8-sig gets rid of the byte order mark (BOM); # see http://stackoverflow.com/questions/17912307/u-ufeff-in-python-string df = pd.read_csv(p, dtype=cs.DTYPE, encoding="utf-8-sig") if not df.empty: feed_dict[table] = cn.clean_column_names(df) feed_dict["dist_units"] = dist_units # Delete temporary directory if zipped: tmp_dir.cleanup() # Create feed return Feed(**feed_dict)
def update(self, **kwargs): '''Update the object, removing device group if inherited If inheritedDevicegroup is the string "true" we need to remove deviceGroup from the args before we update or we get the following error: The floating traffic-group: /Common/traffic-group-1 can only be set on /testfolder if its device-group is inherited from the root folder ''' inherit_device_group = self.__dict__.get('inheritedDevicegroup', False) if inherit_device_group == 'true': self.__dict__.pop('deviceGroup') return self._update(**kwargs)
Update the object, removing device group if inherited If inheritedDevicegroup is the string "true" we need to remove deviceGroup from the args before we update or we get the following error: The floating traffic-group: /Common/traffic-group-1 can only be set on /testfolder if its device-group is inherited from the root folder
Below is the the instruction that describes the task: ### Input: Update the object, removing device group if inherited If inheritedDevicegroup is the string "true" we need to remove deviceGroup from the args before we update or we get the following error: The floating traffic-group: /Common/traffic-group-1 can only be set on /testfolder if its device-group is inherited from the root folder ### Response: def update(self, **kwargs): '''Update the object, removing device group if inherited If inheritedDevicegroup is the string "true" we need to remove deviceGroup from the args before we update or we get the following error: The floating traffic-group: /Common/traffic-group-1 can only be set on /testfolder if its device-group is inherited from the root folder ''' inherit_device_group = self.__dict__.get('inheritedDevicegroup', False) if inherit_device_group == 'true': self.__dict__.pop('deviceGroup') return self._update(**kwargs)
def get_cts_metadata(self, key: str, lang: str = None) -> Literal: """ Get easily a metadata from the CTS namespace :param key: CTS property to retrieve :param lang: Language in which it should be :return: Literal value of the CTS graph property """ return self.metadata.get_single(RDF_NAMESPACES.CTS.term(key), lang)
Get easily a metadata from the CTS namespace :param key: CTS property to retrieve :param lang: Language in which it should be :return: Literal value of the CTS graph property
Below is the the instruction that describes the task: ### Input: Get easily a metadata from the CTS namespace :param key: CTS property to retrieve :param lang: Language in which it should be :return: Literal value of the CTS graph property ### Response: def get_cts_metadata(self, key: str, lang: str = None) -> Literal: """ Get easily a metadata from the CTS namespace :param key: CTS property to retrieve :param lang: Language in which it should be :return: Literal value of the CTS graph property """ return self.metadata.get_single(RDF_NAMESPACES.CTS.term(key), lang)
def _from_dict(cls, _dict): """Initialize a NluEnrichmentFeatures object from a json dictionary.""" args = {} if 'keywords' in _dict: args['keywords'] = NluEnrichmentKeywords._from_dict( _dict.get('keywords')) if 'entities' in _dict: args['entities'] = NluEnrichmentEntities._from_dict( _dict.get('entities')) if 'sentiment' in _dict: args['sentiment'] = NluEnrichmentSentiment._from_dict( _dict.get('sentiment')) if 'emotion' in _dict: args['emotion'] = NluEnrichmentEmotion._from_dict( _dict.get('emotion')) if 'categories' in _dict: args['categories'] = NluEnrichmentCategories._from_dict( _dict.get('categories')) if 'semantic_roles' in _dict: args['semantic_roles'] = NluEnrichmentSemanticRoles._from_dict( _dict.get('semantic_roles')) if 'relations' in _dict: args['relations'] = NluEnrichmentRelations._from_dict( _dict.get('relations')) if 'concepts' in _dict: args['concepts'] = NluEnrichmentConcepts._from_dict( _dict.get('concepts')) return cls(**args)
Initialize a NluEnrichmentFeatures object from a json dictionary.
Below is the the instruction that describes the task: ### Input: Initialize a NluEnrichmentFeatures object from a json dictionary. ### Response: def _from_dict(cls, _dict): """Initialize a NluEnrichmentFeatures object from a json dictionary.""" args = {} if 'keywords' in _dict: args['keywords'] = NluEnrichmentKeywords._from_dict( _dict.get('keywords')) if 'entities' in _dict: args['entities'] = NluEnrichmentEntities._from_dict( _dict.get('entities')) if 'sentiment' in _dict: args['sentiment'] = NluEnrichmentSentiment._from_dict( _dict.get('sentiment')) if 'emotion' in _dict: args['emotion'] = NluEnrichmentEmotion._from_dict( _dict.get('emotion')) if 'categories' in _dict: args['categories'] = NluEnrichmentCategories._from_dict( _dict.get('categories')) if 'semantic_roles' in _dict: args['semantic_roles'] = NluEnrichmentSemanticRoles._from_dict( _dict.get('semantic_roles')) if 'relations' in _dict: args['relations'] = NluEnrichmentRelations._from_dict( _dict.get('relations')) if 'concepts' in _dict: args['concepts'] = NluEnrichmentConcepts._from_dict( _dict.get('concepts')) return cls(**args)
def format_time( sec ): ''' Re-formats time duration in seconds (*sec*) into more easily readable form, where (days,) hours, minutes, and seconds are explicitly shown. Returns the new duration as a formatted string. ''' import time if sec < 864000: # Idea from: http://stackoverflow.com/a/1384565 return time.strftime('%H:%M:%S', time.gmtime(sec)) else: days = int(sec / 864000) secs = sec % 864000 return str(days)+'d, '+time.strftime('%H:%M:%S', time.gmtime(secs))
Re-formats time duration in seconds (*sec*) into more easily readable form, where (days,) hours, minutes, and seconds are explicitly shown. Returns the new duration as a formatted string.
Below is the the instruction that describes the task: ### Input: Re-formats time duration in seconds (*sec*) into more easily readable form, where (days,) hours, minutes, and seconds are explicitly shown. Returns the new duration as a formatted string. ### Response: def format_time( sec ): ''' Re-formats time duration in seconds (*sec*) into more easily readable form, where (days,) hours, minutes, and seconds are explicitly shown. Returns the new duration as a formatted string. ''' import time if sec < 864000: # Idea from: http://stackoverflow.com/a/1384565 return time.strftime('%H:%M:%S', time.gmtime(sec)) else: days = int(sec / 864000) secs = sec % 864000 return str(days)+'d, '+time.strftime('%H:%M:%S', time.gmtime(secs))
def resample(self, edges): """Resample data to a new grid with edges *edges*. This method creates a new grid with the data from the current grid resampled to a regular grid specified by *edges*. The order of the interpolation is set by :attr:`Grid.interpolation_spline_order`: change the value *before* calling :meth:`resample`. Parameters ---------- edges : tuple of arrays or Grid edges of the new grid or a :class:`Grid` instance that provides :attr:`Grid.edges` Returns ------- Grid a new :class:`Grid` with the data interpolated over the new grid cells Examples -------- Providing *edges* (a tuple of three arrays, indicating the boundaries of each grid cell):: g = grid.resample(edges) As a convenience, one can also supply another :class:`Grid` as the argument for this method :: g = grid.resample(othergrid) and the edges are taken from :attr:`Grid.edges`. """ try: edges = edges.edges # can also supply another Grid except AttributeError: pass midpoints = self._midpoints(edges) coordinates = ndmeshgrid(*midpoints) # feed a meshgrid to generate all points newgrid = self.interpolated(*coordinates) return self.__class__(newgrid, edges)
Resample data to a new grid with edges *edges*. This method creates a new grid with the data from the current grid resampled to a regular grid specified by *edges*. The order of the interpolation is set by :attr:`Grid.interpolation_spline_order`: change the value *before* calling :meth:`resample`. Parameters ---------- edges : tuple of arrays or Grid edges of the new grid or a :class:`Grid` instance that provides :attr:`Grid.edges` Returns ------- Grid a new :class:`Grid` with the data interpolated over the new grid cells Examples -------- Providing *edges* (a tuple of three arrays, indicating the boundaries of each grid cell):: g = grid.resample(edges) As a convenience, one can also supply another :class:`Grid` as the argument for this method :: g = grid.resample(othergrid) and the edges are taken from :attr:`Grid.edges`.
Below is the the instruction that describes the task: ### Input: Resample data to a new grid with edges *edges*. This method creates a new grid with the data from the current grid resampled to a regular grid specified by *edges*. The order of the interpolation is set by :attr:`Grid.interpolation_spline_order`: change the value *before* calling :meth:`resample`. Parameters ---------- edges : tuple of arrays or Grid edges of the new grid or a :class:`Grid` instance that provides :attr:`Grid.edges` Returns ------- Grid a new :class:`Grid` with the data interpolated over the new grid cells Examples -------- Providing *edges* (a tuple of three arrays, indicating the boundaries of each grid cell):: g = grid.resample(edges) As a convenience, one can also supply another :class:`Grid` as the argument for this method :: g = grid.resample(othergrid) and the edges are taken from :attr:`Grid.edges`. ### Response: def resample(self, edges): """Resample data to a new grid with edges *edges*. This method creates a new grid with the data from the current grid resampled to a regular grid specified by *edges*. The order of the interpolation is set by :attr:`Grid.interpolation_spline_order`: change the value *before* calling :meth:`resample`. Parameters ---------- edges : tuple of arrays or Grid edges of the new grid or a :class:`Grid` instance that provides :attr:`Grid.edges` Returns ------- Grid a new :class:`Grid` with the data interpolated over the new grid cells Examples -------- Providing *edges* (a tuple of three arrays, indicating the boundaries of each grid cell):: g = grid.resample(edges) As a convenience, one can also supply another :class:`Grid` as the argument for this method :: g = grid.resample(othergrid) and the edges are taken from :attr:`Grid.edges`. """ try: edges = edges.edges # can also supply another Grid except AttributeError: pass midpoints = self._midpoints(edges) coordinates = ndmeshgrid(*midpoints) # feed a meshgrid to generate all points newgrid = self.interpolated(*coordinates) return self.__class__(newgrid, edges)
def convert_permute(builder, layer, input_names, output_names, keras_layer): """Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = (input_names[0], output_names[0]) keras_dims = keras_layer.dims # Keras permute layer index begins at 1 if len(keras_dims) == 3: # Keras input tensor interpret as (H,W,C) x = list(np.array(keras_dims)) i1, i2, i3 = x.index(1), x.index(2), x.index(3) x[i1], x[i2], x[i3] = 2, 3, 1 # add a sequence axis x = [0] + x dim = tuple(x) elif len(keras_dims) == 4: # Here we use Keras converter as a place holder for inserting # permutations - the values here are not valid Keras dim parameters # but parameters we need to use to convert to CoreML model dim = keras_dims else: raise NotImplementedError('Supports only 3d permutation.') builder.add_permute(name = layer, dim=dim, input_name = input_name, output_name = output_name)
Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object.
Below is the the instruction that describes the task: ### Input: Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. ### Response: def convert_permute(builder, layer, input_names, output_names, keras_layer): """Convert a softmax layer from keras to coreml. Parameters keras_layer: layer ---------- A keras layer object. builder: NeuralNetworkBuilder A neural network builder object. """ input_name, output_name = (input_names[0], output_names[0]) keras_dims = keras_layer.dims # Keras permute layer index begins at 1 if len(keras_dims) == 3: # Keras input tensor interpret as (H,W,C) x = list(np.array(keras_dims)) i1, i2, i3 = x.index(1), x.index(2), x.index(3) x[i1], x[i2], x[i3] = 2, 3, 1 # add a sequence axis x = [0] + x dim = tuple(x) elif len(keras_dims) == 4: # Here we use Keras converter as a place holder for inserting # permutations - the values here are not valid Keras dim parameters # but parameters we need to use to convert to CoreML model dim = keras_dims else: raise NotImplementedError('Supports only 3d permutation.') builder.add_permute(name = layer, dim=dim, input_name = input_name, output_name = output_name)
def to_xarray(self, diagnostics=False): """ Convert process variables to ``xarray.Dataset`` format. With ``diagnostics=True``, both state and diagnostic variables are included. Otherwise just the state variables are included. Returns an ``xarray.Dataset`` object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. :Example: Create a single column radiation model and view as ``xarray`` object:: >>> import climlab >>> state = climlab.column_state(num_lev=20) >>> model = climlab.radiation.RRTMG(state=state) >>> # display model state as xarray: >>> model.to_xarray() <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.0 Tatm (lev) float64 200.0 204.1 208.2 212.3 216.4 220.5 224.6 ... >>> # take a single timestep to populate the diagnostic variables >>> model.step_forward() >>> # Now look at the full output in xarray format >>> model.to_xarray(diagnostics=True) <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.7 Tatm (lev) float64 201.3 204.0 208.0 212.0 216.1 220.2 ... ASR (depth) float64 240.0 ASRcld (depth) float64 0.0 ASRclr (depth) float64 240.0 LW_flux_down (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_down_clr (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_net (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_net_clr (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_up (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_flux_up_clr (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_sfc (depth) float64 128.9 LW_sfc_clr (depth) float64 128.9 OLR (depth) float64 240.1 OLRcld (depth) float64 0.0 OLRclr (depth) float64 240.1 SW_flux_down (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_down_clr (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_net (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_net_clr (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_up (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_flux_up_clr (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_sfc (depth) float64 163.8 SW_sfc_clr (depth) float64 163.8 TdotLW (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotLW_clr (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotSW (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... TdotSW_clr (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... """ if diagnostics: dic = self.state.copy() dic.update(self.diagnostics) return state_to_xarray(dic) else: return state_to_xarray(self.state)
Convert process variables to ``xarray.Dataset`` format. With ``diagnostics=True``, both state and diagnostic variables are included. Otherwise just the state variables are included. Returns an ``xarray.Dataset`` object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. :Example: Create a single column radiation model and view as ``xarray`` object:: >>> import climlab >>> state = climlab.column_state(num_lev=20) >>> model = climlab.radiation.RRTMG(state=state) >>> # display model state as xarray: >>> model.to_xarray() <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.0 Tatm (lev) float64 200.0 204.1 208.2 212.3 216.4 220.5 224.6 ... >>> # take a single timestep to populate the diagnostic variables >>> model.step_forward() >>> # Now look at the full output in xarray format >>> model.to_xarray(diagnostics=True) <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.7 Tatm (lev) float64 201.3 204.0 208.0 212.0 216.1 220.2 ... ASR (depth) float64 240.0 ASRcld (depth) float64 0.0 ASRclr (depth) float64 240.0 LW_flux_down (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_down_clr (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_net (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_net_clr (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_up (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_flux_up_clr (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_sfc (depth) float64 128.9 LW_sfc_clr (depth) float64 128.9 OLR (depth) float64 240.1 OLRcld (depth) float64 0.0 OLRclr (depth) float64 240.1 SW_flux_down (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_down_clr (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_net (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_net_clr (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_up (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_flux_up_clr (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_sfc (depth) float64 163.8 SW_sfc_clr (depth) float64 163.8 TdotLW (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotLW_clr (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotSW (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... TdotSW_clr (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ...
Below is the the instruction that describes the task: ### Input: Convert process variables to ``xarray.Dataset`` format. With ``diagnostics=True``, both state and diagnostic variables are included. Otherwise just the state variables are included. Returns an ``xarray.Dataset`` object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. :Example: Create a single column radiation model and view as ``xarray`` object:: >>> import climlab >>> state = climlab.column_state(num_lev=20) >>> model = climlab.radiation.RRTMG(state=state) >>> # display model state as xarray: >>> model.to_xarray() <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.0 Tatm (lev) float64 200.0 204.1 208.2 212.3 216.4 220.5 224.6 ... >>> # take a single timestep to populate the diagnostic variables >>> model.step_forward() >>> # Now look at the full output in xarray format >>> model.to_xarray(diagnostics=True) <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.7 Tatm (lev) float64 201.3 204.0 208.0 212.0 216.1 220.2 ... ASR (depth) float64 240.0 ASRcld (depth) float64 0.0 ASRclr (depth) float64 240.0 LW_flux_down (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_down_clr (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_net (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_net_clr (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_up (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_flux_up_clr (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_sfc (depth) float64 128.9 LW_sfc_clr (depth) float64 128.9 OLR (depth) float64 240.1 OLRcld (depth) float64 0.0 OLRclr (depth) float64 240.1 SW_flux_down (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_down_clr (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_net (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_net_clr (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_up (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_flux_up_clr (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_sfc (depth) float64 163.8 SW_sfc_clr (depth) float64 163.8 TdotLW (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotLW_clr (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotSW (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... TdotSW_clr (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... ### Response: def to_xarray(self, diagnostics=False): """ Convert process variables to ``xarray.Dataset`` format. With ``diagnostics=True``, both state and diagnostic variables are included. Otherwise just the state variables are included. Returns an ``xarray.Dataset`` object with all spatial axes, including 'bounds' axes indicating cell boundaries in each spatial dimension. :Example: Create a single column radiation model and view as ``xarray`` object:: >>> import climlab >>> state = climlab.column_state(num_lev=20) >>> model = climlab.radiation.RRTMG(state=state) >>> # display model state as xarray: >>> model.to_xarray() <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.0 Tatm (lev) float64 200.0 204.1 208.2 212.3 216.4 220.5 224.6 ... >>> # take a single timestep to populate the diagnostic variables >>> model.step_forward() >>> # Now look at the full output in xarray format >>> model.to_xarray(diagnostics=True) <xarray.Dataset> Dimensions: (depth: 1, depth_bounds: 2, lev: 20, lev_bounds: 21) Coordinates: * depth (depth) float64 0.5 * depth_bounds (depth_bounds) float64 0.0 1.0 * lev (lev) float64 25.0 75.0 125.0 175.0 225.0 275.0 325.0 ... * lev_bounds (lev_bounds) float64 0.0 50.0 100.0 150.0 200.0 250.0 ... Data variables: Ts (depth) float64 288.7 Tatm (lev) float64 201.3 204.0 208.0 212.0 216.1 220.2 ... ASR (depth) float64 240.0 ASRcld (depth) float64 0.0 ASRclr (depth) float64 240.0 LW_flux_down (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_down_clr (lev_bounds) float64 0.0 12.63 19.47 26.07 32.92 40.1 ... LW_flux_net (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_net_clr (lev_bounds) float64 240.1 231.2 227.6 224.1 220.5 ... LW_flux_up (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_flux_up_clr (lev_bounds) float64 240.1 243.9 247.1 250.2 253.4 ... LW_sfc (depth) float64 128.9 LW_sfc_clr (depth) float64 128.9 OLR (depth) float64 240.1 OLRcld (depth) float64 0.0 OLRclr (depth) float64 240.1 SW_flux_down (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_down_clr (lev_bounds) float64 341.3 323.1 318.0 313.5 309.5 ... SW_flux_net (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_net_clr (lev_bounds) float64 240.0 223.3 220.2 217.9 215.9 ... SW_flux_up (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_flux_up_clr (lev_bounds) float64 101.3 99.88 97.77 95.64 93.57 ... SW_sfc (depth) float64 163.8 SW_sfc_clr (depth) float64 163.8 TdotLW (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotLW_clr (lev) float64 -1.502 -0.6148 -0.5813 -0.6173 -0.6426 ... TdotSW (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... TdotSW_clr (lev) float64 2.821 0.5123 0.3936 0.3368 0.3174 0.3299 ... """ if diagnostics: dic = self.state.copy() dic.update(self.diagnostics) return state_to_xarray(dic) else: return state_to_xarray(self.state)
def parse_irreg(self, l): """ Constructeur de la classe Irreg. :param l: Ligne de chargement des irréguliers :type l: str """ ecl = l.split(':') grq = ecl[0] exclusif = False if grq.endswith("*"): grq = grq[:-1] exclusif = True return Irreg( graphie_accentuee=grq, graphie=atone(grq), exclusif=exclusif, morphos=listeI(ecl[2]), lemme=self.lemmatiseur.lemme(ecl[1]), parent=self.lemmatiseur )
Constructeur de la classe Irreg. :param l: Ligne de chargement des irréguliers :type l: str
Below is the the instruction that describes the task: ### Input: Constructeur de la classe Irreg. :param l: Ligne de chargement des irréguliers :type l: str ### Response: def parse_irreg(self, l): """ Constructeur de la classe Irreg. :param l: Ligne de chargement des irréguliers :type l: str """ ecl = l.split(':') grq = ecl[0] exclusif = False if grq.endswith("*"): grq = grq[:-1] exclusif = True return Irreg( graphie_accentuee=grq, graphie=atone(grq), exclusif=exclusif, morphos=listeI(ecl[2]), lemme=self.lemmatiseur.lemme(ecl[1]), parent=self.lemmatiseur )
def cell_ends_with_code(lines): """Is the last line of the cell a line with code?""" if not lines: return False if not lines[-1].strip(): return False if lines[-1].startswith('#'): return False return True
Is the last line of the cell a line with code?
Below is the the instruction that describes the task: ### Input: Is the last line of the cell a line with code? ### Response: def cell_ends_with_code(lines): """Is the last line of the cell a line with code?""" if not lines: return False if not lines[-1].strip(): return False if lines[-1].startswith('#'): return False return True
def fetch_items(self, category, **kwargs): """Fetch packages and summary from Crates.io :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] if category == CATEGORY_CRATES: return self.__fetch_crates(from_date) else: return self.__fetch_summary()
Fetch packages and summary from Crates.io :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items
Below is the the instruction that describes the task: ### Input: Fetch packages and summary from Crates.io :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items ### Response: def fetch_items(self, category, **kwargs): """Fetch packages and summary from Crates.io :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] if category == CATEGORY_CRATES: return self.__fetch_crates(from_date) else: return self.__fetch_summary()
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameGetVersionConfirmation): return False self.version = frame.version self.success = True return True
Handle incoming API frame, return True if this was the expected frame.
Below is the the instruction that describes the task: ### Input: Handle incoming API frame, return True if this was the expected frame. ### Response: async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameGetVersionConfirmation): return False self.version = frame.version self.success = True return True
def create(self, access_tokens, days_requested, options=None): ''' Create an asset report. :param [str] access_tokens: A list of access tokens, one token for each Item to be included in the Asset Report. :param int days_requested: Days of transaction history requested to be included in the Asset Report. :param dict options: An optional dictionary. For more information on the options object, see the documentation site listed above. ''' options = options or {} return self.client.post('/asset_report/create', { 'access_tokens': access_tokens, 'days_requested': days_requested, 'options': options, })
Create an asset report. :param [str] access_tokens: A list of access tokens, one token for each Item to be included in the Asset Report. :param int days_requested: Days of transaction history requested to be included in the Asset Report. :param dict options: An optional dictionary. For more information on the options object, see the documentation site listed above.
Below is the the instruction that describes the task: ### Input: Create an asset report. :param [str] access_tokens: A list of access tokens, one token for each Item to be included in the Asset Report. :param int days_requested: Days of transaction history requested to be included in the Asset Report. :param dict options: An optional dictionary. For more information on the options object, see the documentation site listed above. ### Response: def create(self, access_tokens, days_requested, options=None): ''' Create an asset report. :param [str] access_tokens: A list of access tokens, one token for each Item to be included in the Asset Report. :param int days_requested: Days of transaction history requested to be included in the Asset Report. :param dict options: An optional dictionary. For more information on the options object, see the documentation site listed above. ''' options = options or {} return self.client.post('/asset_report/create', { 'access_tokens': access_tokens, 'days_requested': days_requested, 'options': options, })
def export(self, fn:PathOrStr, **kwargs): "Export the minimal state and save it in `fn` to load an empty version for inference." pickle.dump(self.get_state(**kwargs), open(fn, 'wb'))
Export the minimal state and save it in `fn` to load an empty version for inference.
Below is the the instruction that describes the task: ### Input: Export the minimal state and save it in `fn` to load an empty version for inference. ### Response: def export(self, fn:PathOrStr, **kwargs): "Export the minimal state and save it in `fn` to load an empty version for inference." pickle.dump(self.get_state(**kwargs), open(fn, 'wb'))
def hasDependency(self, name, target=None, test_dependencies=False): ''' Check if this module has any dependencies with the specified name in its dependencies list, or in target dependencies for the specified target ''' if name in self.description.get('dependencies', {}).keys(): return True target_deps = self.description.get('targetDependencies', {}) if target is not None: for conf_key, target_conf_deps in target_deps.items(): if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated(): if name in target_conf_deps: return True if test_dependencies: if name in self.description.get('testDependencies', {}).keys(): return True if target is not None: test_target_deps = self.description.get('testTargetDependencies', {}) for conf_key, target_conf_deps in test_target_deps.items(): if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated(): if name in target_conf_deps: return True return False
Check if this module has any dependencies with the specified name in its dependencies list, or in target dependencies for the specified target
Below is the the instruction that describes the task: ### Input: Check if this module has any dependencies with the specified name in its dependencies list, or in target dependencies for the specified target ### Response: def hasDependency(self, name, target=None, test_dependencies=False): ''' Check if this module has any dependencies with the specified name in its dependencies list, or in target dependencies for the specified target ''' if name in self.description.get('dependencies', {}).keys(): return True target_deps = self.description.get('targetDependencies', {}) if target is not None: for conf_key, target_conf_deps in target_deps.items(): if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated(): if name in target_conf_deps: return True if test_dependencies: if name in self.description.get('testDependencies', {}).keys(): return True if target is not None: test_target_deps = self.description.get('testTargetDependencies', {}) for conf_key, target_conf_deps in test_target_deps.items(): if _truthyConfValue(target.getConfigValue(conf_key)) or conf_key in target.getSimilarTo_Deprecated(): if name in target_conf_deps: return True return False
def google_get_token(self, config, prefix): """Make request to Google API to get token.""" params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
Make request to Google API to get token.
Below is the the instruction that describes the task: ### Input: Make request to Google API to get token. ### Response: def google_get_token(self, config, prefix): """Make request to Google API to get token.""" params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
def getSheet(book=None,sheet=None): """returns the pyorigin object for a sheet.""" # figure out what book to use if book and not book.lower() in [x.lower() for x in bookNames()]: print("book %s doesn't exist"%book) return if book is None: book=activeBook().lower() if book is None: print("no book given or selected") return # figure out what sheet to use if sheet and not sheet.lower() in [x.lower() for x in sheetNames(book)]: print("sheet %s doesn't exist"%sheet) return if sheet is None: sheet=activeSheet().lower() if sheet is None: return("no sheet given or selected") print # by now, we know the book/sheet exists and can be found for poSheet in PyOrigin.WorksheetPages(book).Layers(): if poSheet.GetName().lower()==sheet.lower(): return poSheet return False
returns the pyorigin object for a sheet.
Below is the the instruction that describes the task: ### Input: returns the pyorigin object for a sheet. ### Response: def getSheet(book=None,sheet=None): """returns the pyorigin object for a sheet.""" # figure out what book to use if book and not book.lower() in [x.lower() for x in bookNames()]: print("book %s doesn't exist"%book) return if book is None: book=activeBook().lower() if book is None: print("no book given or selected") return # figure out what sheet to use if sheet and not sheet.lower() in [x.lower() for x in sheetNames(book)]: print("sheet %s doesn't exist"%sheet) return if sheet is None: sheet=activeSheet().lower() if sheet is None: return("no sheet given or selected") print # by now, we know the book/sheet exists and can be found for poSheet in PyOrigin.WorksheetPages(book).Layers(): if poSheet.GetName().lower()==sheet.lower(): return poSheet return False
def create_data_disk(vm_=None, linode_id=None, data_size=None): r''' Create a data disk for the linode (type is hardcoded to ext4 at the moment) .. versionadded:: 2016.3.0 vm\_ The VM profile to create the data disk for. linode_id The ID of the Linode to create the data disk for. data_size The size of the disk, in MB. ''' kwargs = {} kwargs.update({'LinodeID': linode_id, 'Label': vm_['name']+"_data", 'Type': 'ext4', 'Size': data_size }) result = _query('linode', 'disk.create', args=kwargs) return _clean_data(result)
r''' Create a data disk for the linode (type is hardcoded to ext4 at the moment) .. versionadded:: 2016.3.0 vm\_ The VM profile to create the data disk for. linode_id The ID of the Linode to create the data disk for. data_size The size of the disk, in MB.
Below is the the instruction that describes the task: ### Input: r''' Create a data disk for the linode (type is hardcoded to ext4 at the moment) .. versionadded:: 2016.3.0 vm\_ The VM profile to create the data disk for. linode_id The ID of the Linode to create the data disk for. data_size The size of the disk, in MB. ### Response: def create_data_disk(vm_=None, linode_id=None, data_size=None): r''' Create a data disk for the linode (type is hardcoded to ext4 at the moment) .. versionadded:: 2016.3.0 vm\_ The VM profile to create the data disk for. linode_id The ID of the Linode to create the data disk for. data_size The size of the disk, in MB. ''' kwargs = {} kwargs.update({'LinodeID': linode_id, 'Label': vm_['name']+"_data", 'Type': 'ext4', 'Size': data_size }) result = _query('linode', 'disk.create', args=kwargs) return _clean_data(result)
def _build_prior(self, unconstrained_tensor, constrained_tensor): """ Build a tensorflow representation of the prior density. The log Jacobian is included. """ if not misc.is_tensor(unconstrained_tensor): raise GPflowError("Unconstrained input must be a tensor.") if not misc.is_tensor(constrained_tensor): raise GPflowError("Constrained input must be a tensor.") prior_name = 'prior' if self.prior is None: return tf.constant(0.0, settings.float_type, name=prior_name) log_jacobian = self.transform.log_jacobian_tensor(unconstrained_tensor) logp_var = self.prior.logp(constrained_tensor) return tf.squeeze(tf.add(logp_var, log_jacobian, name=prior_name))
Build a tensorflow representation of the prior density. The log Jacobian is included.
Below is the the instruction that describes the task: ### Input: Build a tensorflow representation of the prior density. The log Jacobian is included. ### Response: def _build_prior(self, unconstrained_tensor, constrained_tensor): """ Build a tensorflow representation of the prior density. The log Jacobian is included. """ if not misc.is_tensor(unconstrained_tensor): raise GPflowError("Unconstrained input must be a tensor.") if not misc.is_tensor(constrained_tensor): raise GPflowError("Constrained input must be a tensor.") prior_name = 'prior' if self.prior is None: return tf.constant(0.0, settings.float_type, name=prior_name) log_jacobian = self.transform.log_jacobian_tensor(unconstrained_tensor) logp_var = self.prior.logp(constrained_tensor) return tf.squeeze(tf.add(logp_var, log_jacobian, name=prior_name))
def get_sent(self, expired=False, for_all=False): """Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects """ method, url = get_URL('get_sent') payload = { 'apikey': self.session.cookies.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getexpired': expired, 'getforallusers': for_all } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res.json())
Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects
Below is the the instruction that describes the task: ### Input: Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects ### Response: def get_sent(self, expired=False, for_all=False): """Retreve information on previously sent transfers. :param expired: Whether or not to return expired transfers. :param for_all: Get transfers for all users. Requires a Filemail Business account. :type for_all: bool :type expired: bool :rtype: ``list`` of :class:`pyfilemail.Transfer` objects """ method, url = get_URL('get_sent') payload = { 'apikey': self.session.cookies.get('apikey'), 'logintoken': self.session.cookies.get('logintoken'), 'getexpired': expired, 'getforallusers': for_all } res = getattr(self.session, method)(url, params=payload) if res.status_code == 200: return self._restore_transfers(res) hellraiser(res.json())
def find(self, instance_id): """ find an instance Create a new instance and populate it with data stored if it exists. Args: instance_id (str): UUID of the instance Returns: AtlasServiceInstance.Instance: An instance """ instance = AtlasServiceInstance.Instance(instance_id, self.backend) self.backend.storage.populate(instance) return instance
find an instance Create a new instance and populate it with data stored if it exists. Args: instance_id (str): UUID of the instance Returns: AtlasServiceInstance.Instance: An instance
Below is the the instruction that describes the task: ### Input: find an instance Create a new instance and populate it with data stored if it exists. Args: instance_id (str): UUID of the instance Returns: AtlasServiceInstance.Instance: An instance ### Response: def find(self, instance_id): """ find an instance Create a new instance and populate it with data stored if it exists. Args: instance_id (str): UUID of the instance Returns: AtlasServiceInstance.Instance: An instance """ instance = AtlasServiceInstance.Instance(instance_id, self.backend) self.backend.storage.populate(instance) return instance
async def answer_audio(self, audio: typing.Union[base.InputFile, base.String], caption: typing.Union[base.String, None] = None, duration: typing.Union[base.Integer, None] = None, performer: typing.Union[base.String, None] = None, title: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_markup=None, reply=False) -> Message: """ Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. For sending voice messages, use the sendVoice method instead. Source: https://core.telegram.org/bots/api#sendaudio :param audio: Audio file to send. :type audio: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Audio caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the audio in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param performer: Performer :type performer: :obj:`typing.Union[base.String, None]` :param title: Track name :type title: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message` """ return await self.bot.send_audio(chat_id=self.chat.id, audio=audio, caption=caption, duration=duration, performer=performer, title=title, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup)
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. For sending voice messages, use the sendVoice method instead. Source: https://core.telegram.org/bots/api#sendaudio :param audio: Audio file to send. :type audio: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Audio caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the audio in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param performer: Performer :type performer: :obj:`typing.Union[base.String, None]` :param title: Track name :type title: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message`
Below is the the instruction that describes the task: ### Input: Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. For sending voice messages, use the sendVoice method instead. Source: https://core.telegram.org/bots/api#sendaudio :param audio: Audio file to send. :type audio: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Audio caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the audio in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param performer: Performer :type performer: :obj:`typing.Union[base.String, None]` :param title: Track name :type title: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message` ### Response: async def answer_audio(self, audio: typing.Union[base.InputFile, base.String], caption: typing.Union[base.String, None] = None, duration: typing.Union[base.Integer, None] = None, performer: typing.Union[base.String, None] = None, title: typing.Union[base.String, None] = None, disable_notification: typing.Union[base.Boolean, None] = None, reply_markup=None, reply=False) -> Message: """ Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .mp3 format. For sending voice messages, use the sendVoice method instead. Source: https://core.telegram.org/bots/api#sendaudio :param audio: Audio file to send. :type audio: :obj:`typing.Union[base.InputFile, base.String]` :param caption: Audio caption, 0-200 characters :type caption: :obj:`typing.Union[base.String, None]` :param duration: Duration of the audio in seconds :type duration: :obj:`typing.Union[base.Integer, None]` :param performer: Performer :type performer: :obj:`typing.Union[base.String, None]` :param title: Track name :type title: :obj:`typing.Union[base.String, None]` :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: :obj:`typing.Union[base.Boolean, None]` :param reply_markup: Additional interface options. :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, types.ReplyKeyboardMarkup, types.ReplyKeyboardRemove, types.ForceReply, None]` :param reply: fill 'reply_to_message_id' :return: On success, the sent Message is returned. :rtype: :obj:`types.Message` """ return await self.bot.send_audio(chat_id=self.chat.id, audio=audio, caption=caption, duration=duration, performer=performer, title=title, disable_notification=disable_notification, reply_to_message_id=self.message_id if reply else None, reply_markup=reply_markup)
def _generate_provisional_name(q, astrom_header, fits_header): """ Generates a name for an object given the information in its astrom observation header and FITS header. :param q: a queue of provisional names to return. :type q: Queue :param astrom_header: :param fits_header: """ while True: ef = get_epoch_field(astrom_header, fits_header) epoch_field = ef[0] + ef[1] count = storage.increment_object_counter(storage.MEASURE3, epoch_field) try: q.put(ef[1] + count) except: break
Generates a name for an object given the information in its astrom observation header and FITS header. :param q: a queue of provisional names to return. :type q: Queue :param astrom_header: :param fits_header:
Below is the the instruction that describes the task: ### Input: Generates a name for an object given the information in its astrom observation header and FITS header. :param q: a queue of provisional names to return. :type q: Queue :param astrom_header: :param fits_header: ### Response: def _generate_provisional_name(q, astrom_header, fits_header): """ Generates a name for an object given the information in its astrom observation header and FITS header. :param q: a queue of provisional names to return. :type q: Queue :param astrom_header: :param fits_header: """ while True: ef = get_epoch_field(astrom_header, fits_header) epoch_field = ef[0] + ef[1] count = storage.increment_object_counter(storage.MEASURE3, epoch_field) try: q.put(ef[1] + count) except: break
def land_temp_prob(self, tlow, thigh): """Temperature-based probability of cloud over land Equation 14 (Zhu and Woodcock, 2012) Parameters ---------- tirs1: ndarray tlow: float Low (17.5 percentile) temperature of land thigh: float High (82.5 percentile) temperature of land Output ------ ndarray : probability of cloud over land based on temperature """ temp_diff = 4 # degrees return (thigh + temp_diff - self.tirs1) / (thigh + 4 - (tlow - 4))
Temperature-based probability of cloud over land Equation 14 (Zhu and Woodcock, 2012) Parameters ---------- tirs1: ndarray tlow: float Low (17.5 percentile) temperature of land thigh: float High (82.5 percentile) temperature of land Output ------ ndarray : probability of cloud over land based on temperature
Below is the the instruction that describes the task: ### Input: Temperature-based probability of cloud over land Equation 14 (Zhu and Woodcock, 2012) Parameters ---------- tirs1: ndarray tlow: float Low (17.5 percentile) temperature of land thigh: float High (82.5 percentile) temperature of land Output ------ ndarray : probability of cloud over land based on temperature ### Response: def land_temp_prob(self, tlow, thigh): """Temperature-based probability of cloud over land Equation 14 (Zhu and Woodcock, 2012) Parameters ---------- tirs1: ndarray tlow: float Low (17.5 percentile) temperature of land thigh: float High (82.5 percentile) temperature of land Output ------ ndarray : probability of cloud over land based on temperature """ temp_diff = 4 # degrees return (thigh + temp_diff - self.tirs1) / (thigh + 4 - (tlow - 4))
def single_val(self): """return relative error of worst point that might make the data none symmetric. """ sv_t = self._sv(self._tdsphere) sv_p = self._sv(self._tdsphere) return (sv_t, sv_p)
return relative error of worst point that might make the data none symmetric.
Below is the the instruction that describes the task: ### Input: return relative error of worst point that might make the data none symmetric. ### Response: def single_val(self): """return relative error of worst point that might make the data none symmetric. """ sv_t = self._sv(self._tdsphere) sv_p = self._sv(self._tdsphere) return (sv_t, sv_p)
def get_names(cs): """Return list of every name.""" records = [] for c in cs: records.extend(c.get('names', [])) return records
Return list of every name.
Below is the the instruction that describes the task: ### Input: Return list of every name. ### Response: def get_names(cs): """Return list of every name.""" records = [] for c in cs: records.extend(c.get('names', [])) return records
def html_to_pdf(content, encoding="utf-8", link_callback=fetch_resources, **kwargs): """ Converts html ``content`` into PDF document. :param unicode content: html content :returns: PDF content :rtype: :class:`bytes` :raises: :exc:`~easy_pdf.exceptions.PDFRenderingError` """ src = BytesIO(content.encode(encoding)) dest = BytesIO() pdf = pisa.pisaDocument(src, dest, encoding=encoding, link_callback=link_callback, **kwargs) if pdf.err: logger.error("Error rendering PDF document") for entry in pdf.log: if entry[0] == xhtml2pdf.default.PML_ERROR: logger_x2p.error("line %s, msg: %s, fragment: %s", entry[1], entry[2], entry[3]) raise PDFRenderingError("Errors rendering PDF", content=content, log=pdf.log) if pdf.warn: for entry in pdf.log: if entry[0] == xhtml2pdf.default.PML_WARNING: logger_x2p.warning("line %s, msg: %s, fragment: %s", entry[1], entry[2], entry[3]) return dest.getvalue()
Converts html ``content`` into PDF document. :param unicode content: html content :returns: PDF content :rtype: :class:`bytes` :raises: :exc:`~easy_pdf.exceptions.PDFRenderingError`
Below is the the instruction that describes the task: ### Input: Converts html ``content`` into PDF document. :param unicode content: html content :returns: PDF content :rtype: :class:`bytes` :raises: :exc:`~easy_pdf.exceptions.PDFRenderingError` ### Response: def html_to_pdf(content, encoding="utf-8", link_callback=fetch_resources, **kwargs): """ Converts html ``content`` into PDF document. :param unicode content: html content :returns: PDF content :rtype: :class:`bytes` :raises: :exc:`~easy_pdf.exceptions.PDFRenderingError` """ src = BytesIO(content.encode(encoding)) dest = BytesIO() pdf = pisa.pisaDocument(src, dest, encoding=encoding, link_callback=link_callback, **kwargs) if pdf.err: logger.error("Error rendering PDF document") for entry in pdf.log: if entry[0] == xhtml2pdf.default.PML_ERROR: logger_x2p.error("line %s, msg: %s, fragment: %s", entry[1], entry[2], entry[3]) raise PDFRenderingError("Errors rendering PDF", content=content, log=pdf.log) if pdf.warn: for entry in pdf.log: if entry[0] == xhtml2pdf.default.PML_WARNING: logger_x2p.warning("line %s, msg: %s, fragment: %s", entry[1], entry[2], entry[3]) return dest.getvalue()
def load_object(target, namespace=None): """This helper function loads an object identified by a dotted-notation string. For example: # Load class Foo from example.objects load_object('example.objects:Foo') If a plugin namespace is provided simple name references are allowed. For example: # Load the plugin named 'routing' from the 'web.dispatch' namespace load_object('routing', 'web.dispatch') Providing a namespace does not prevent full object lookup (dot-colon notation) from working. """ if namespace and ':' not in target: allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace)) if target not in allowable: raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable)) return allowable[target].load() parts, target = target.split(':') if ':' in target else (target, None) module = __import__(parts) for part in parts.split('.')[1:] + ([target] if target else []): module = getattr(module, part) return module
This helper function loads an object identified by a dotted-notation string. For example: # Load class Foo from example.objects load_object('example.objects:Foo') If a plugin namespace is provided simple name references are allowed. For example: # Load the plugin named 'routing' from the 'web.dispatch' namespace load_object('routing', 'web.dispatch') Providing a namespace does not prevent full object lookup (dot-colon notation) from working.
Below is the the instruction that describes the task: ### Input: This helper function loads an object identified by a dotted-notation string. For example: # Load class Foo from example.objects load_object('example.objects:Foo') If a plugin namespace is provided simple name references are allowed. For example: # Load the plugin named 'routing' from the 'web.dispatch' namespace load_object('routing', 'web.dispatch') Providing a namespace does not prevent full object lookup (dot-colon notation) from working. ### Response: def load_object(target, namespace=None): """This helper function loads an object identified by a dotted-notation string. For example: # Load class Foo from example.objects load_object('example.objects:Foo') If a plugin namespace is provided simple name references are allowed. For example: # Load the plugin named 'routing' from the 'web.dispatch' namespace load_object('routing', 'web.dispatch') Providing a namespace does not prevent full object lookup (dot-colon notation) from working. """ if namespace and ':' not in target: allowable = dict((i.name, i) for i in pkg_resources.iter_entry_points(namespace)) if target not in allowable: raise ValueError('Unknown plugin "' + target + '"; found: ' + ', '.join(allowable)) return allowable[target].load() parts, target = target.split(':') if ':' in target else (target, None) module = __import__(parts) for part in parts.split('.')[1:] + ([target] if target else []): module = getattr(module, part) return module
def Nu_cylinder_Whitaker(Re, Pr, mu=None, muw=None): r'''Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu_D = (0.4 Re_D^{0.5} + 0.06Re_D^{2/3})Pr^{0.4} \left(\frac{\mu}{\mu_w}\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Developed considering data from 1 to 1E5 Re, 0.67 to 300 Pr, and range of viscosity ratios from 0.25 to 5.2. Found experimental data to generally agree with it within 25%. Examples -------- >>> Nu_cylinder_Whitaker(6071, 0.7) 45.94527461589126 References ---------- .. [1] Whitaker, Stephen. "Forced Convection Heat Transfer Correlations for Flow in Pipes, Past Flat Plates, Single Cylinders, Single Spheres, and for Flow in Packed Beds and Tube Bundles." AIChE Journal 18, no. 2 (March 1, 1972): 361-371. doi:10.1002/aic.690180219. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. ''' Nu = (0.4*Re**0.5 + 0.06*Re**(2/3.))*Pr**0.3 if mu and muw: Nu *= (mu/muw)**0.25 return Nu
r'''Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu_D = (0.4 Re_D^{0.5} + 0.06Re_D^{2/3})Pr^{0.4} \left(\frac{\mu}{\mu_w}\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Developed considering data from 1 to 1E5 Re, 0.67 to 300 Pr, and range of viscosity ratios from 0.25 to 5.2. Found experimental data to generally agree with it within 25%. Examples -------- >>> Nu_cylinder_Whitaker(6071, 0.7) 45.94527461589126 References ---------- .. [1] Whitaker, Stephen. "Forced Convection Heat Transfer Correlations for Flow in Pipes, Past Flat Plates, Single Cylinders, Single Spheres, and for Flow in Packed Beds and Tube Bundles." AIChE Journal 18, no. 2 (March 1, 1972): 361-371. doi:10.1002/aic.690180219. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012.
Below is the the instruction that describes the task: ### Input: r'''Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu_D = (0.4 Re_D^{0.5} + 0.06Re_D^{2/3})Pr^{0.4} \left(\frac{\mu}{\mu_w}\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Developed considering data from 1 to 1E5 Re, 0.67 to 300 Pr, and range of viscosity ratios from 0.25 to 5.2. Found experimental data to generally agree with it within 25%. Examples -------- >>> Nu_cylinder_Whitaker(6071, 0.7) 45.94527461589126 References ---------- .. [1] Whitaker, Stephen. "Forced Convection Heat Transfer Correlations for Flow in Pipes, Past Flat Plates, Single Cylinders, Single Spheres, and for Flow in Packed Beds and Tube Bundles." AIChE Journal 18, no. 2 (March 1, 1972): 361-371. doi:10.1002/aic.690180219. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. ### Response: def Nu_cylinder_Whitaker(Re, Pr, mu=None, muw=None): r'''Calculates Nusselt number for crossflow across a single tube as shown in [1]_ at a specified `Re` and `Pr`, both evaluated at the free stream temperature. Recommends a viscosity exponent correction of 0.25, which is applied only if provided. Also shown in [2]_. .. math:: Nu_D = (0.4 Re_D^{0.5} + 0.06Re_D^{2/3})Pr^{0.4} \left(\frac{\mu}{\mu_w}\right)^{0.25} Parameters ---------- Re : float Reynolds number with respect to cylinder diameter, [-] Pr : float Prandtl number at free stream temperature, [-] mu : float, optional Viscosity of fluid at the free stream temperature [Pa*s] muw : float, optional Viscosity of fluid at the wall temperature [Pa*s] Returns ------- Nu : float Nusselt number with respect to cylinder diameter, [-] Notes ----- Developed considering data from 1 to 1E5 Re, 0.67 to 300 Pr, and range of viscosity ratios from 0.25 to 5.2. Found experimental data to generally agree with it within 25%. Examples -------- >>> Nu_cylinder_Whitaker(6071, 0.7) 45.94527461589126 References ---------- .. [1] Whitaker, Stephen. "Forced Convection Heat Transfer Correlations for Flow in Pipes, Past Flat Plates, Single Cylinders, Single Spheres, and for Flow in Packed Beds and Tube Bundles." AIChE Journal 18, no. 2 (March 1, 1972): 361-371. doi:10.1002/aic.690180219. .. [2] Sanitjai, S., and R. J. Goldstein. "Forced Convection Heat Transfer from a Circular Cylinder in Crossflow to Air and Liquids." International Journal of Heat and Mass Transfer 47, no. 22 (October 2004): 4795-4805. doi:10.1016/j.ijheatmasstransfer.2004.05.012. ''' Nu = (0.4*Re**0.5 + 0.06*Re**(2/3.))*Pr**0.3 if mu and muw: Nu *= (mu/muw)**0.25 return Nu
def open_sciobj_file_by_pid_ctx(pid, write=False): """Open the file containing the Science Object bytes of ``pid`` in the default location within the tree of the local SciObj store. If ``write`` is True, the file is opened for writing and any missing directories are created. Return the file handle and file_url with the file location in a suitable form for storing in the DB. If nothing was written to the file, it is deleted. """ abs_path = get_abs_sciobj_file_path_by_pid(pid) with open_sciobj_file_by_path_ctx(abs_path, write) as sciobj_file: yield sciobj_file
Open the file containing the Science Object bytes of ``pid`` in the default location within the tree of the local SciObj store. If ``write`` is True, the file is opened for writing and any missing directories are created. Return the file handle and file_url with the file location in a suitable form for storing in the DB. If nothing was written to the file, it is deleted.
Below is the the instruction that describes the task: ### Input: Open the file containing the Science Object bytes of ``pid`` in the default location within the tree of the local SciObj store. If ``write`` is True, the file is opened for writing and any missing directories are created. Return the file handle and file_url with the file location in a suitable form for storing in the DB. If nothing was written to the file, it is deleted. ### Response: def open_sciobj_file_by_pid_ctx(pid, write=False): """Open the file containing the Science Object bytes of ``pid`` in the default location within the tree of the local SciObj store. If ``write`` is True, the file is opened for writing and any missing directories are created. Return the file handle and file_url with the file location in a suitable form for storing in the DB. If nothing was written to the file, it is deleted. """ abs_path = get_abs_sciobj_file_path_by_pid(pid) with open_sciobj_file_by_path_ctx(abs_path, write) as sciobj_file: yield sciobj_file
def findRequirements(platform): """ Read the requirements.txt file and parse into requirements for setup's install_requirements option. """ includePycapnp = platform not in WINDOWS_PLATFORMS requirementsPath = fixPath(os.path.join(PY_BINDINGS, "requirements.txt")) return [ line.strip() for line in open(requirementsPath).readlines() if not line.startswith("#") and (not line.startswith("pycapnp") or includePycapnp) ]
Read the requirements.txt file and parse into requirements for setup's install_requirements option.
Below is the the instruction that describes the task: ### Input: Read the requirements.txt file and parse into requirements for setup's install_requirements option. ### Response: def findRequirements(platform): """ Read the requirements.txt file and parse into requirements for setup's install_requirements option. """ includePycapnp = platform not in WINDOWS_PLATFORMS requirementsPath = fixPath(os.path.join(PY_BINDINGS, "requirements.txt")) return [ line.strip() for line in open(requirementsPath).readlines() if not line.startswith("#") and (not line.startswith("pycapnp") or includePycapnp) ]
def read_elastic_tensor(self): """ Parse the elastic tensor data. Returns: 6x6 array corresponding to the elastic tensor from the OUTCAR. """ header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" \ r"Direction\s+([X-Z][X-Z]\s+)+" \ r"\-+" row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6) footer_pattern = r"\-+" et_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float) self.data["elastic_tensor"] = et_table
Parse the elastic tensor data. Returns: 6x6 array corresponding to the elastic tensor from the OUTCAR.
Below is the the instruction that describes the task: ### Input: Parse the elastic tensor data. Returns: 6x6 array corresponding to the elastic tensor from the OUTCAR. ### Response: def read_elastic_tensor(self): """ Parse the elastic tensor data. Returns: 6x6 array corresponding to the elastic tensor from the OUTCAR. """ header_pattern = r"TOTAL ELASTIC MODULI \(kBar\)\s+" \ r"Direction\s+([X-Z][X-Z]\s+)+" \ r"\-+" row_pattern = r"[X-Z][X-Z]\s+" + r"\s+".join([r"(\-*[\.\d]+)"] * 6) footer_pattern = r"\-+" et_table = self.read_table_pattern(header_pattern, row_pattern, footer_pattern, postprocess=float) self.data["elastic_tensor"] = et_table
def _build_resolver(cls, session: AppSession): '''Build resolver.''' args = session.args dns_timeout = args.dns_timeout if args.timeout: dns_timeout = args.timeout if args.inet_family == 'IPv4': family = IPFamilyPreference.ipv4_only elif args.inet_family == 'IPv6': family = IPFamilyPreference.ipv6_only elif args.prefer_family == 'IPv6': family = IPFamilyPreference.prefer_ipv6 elif args.prefer_family == 'IPv4': family = IPFamilyPreference.prefer_ipv4 else: family = IPFamilyPreference.any return session.factory.new( 'Resolver', family=family, timeout=dns_timeout, rotate=args.rotate_dns, cache=session.factory.class_map['Resolver'].new_cache() if args.dns_cache else None, )
Build resolver.
Below is the the instruction that describes the task: ### Input: Build resolver. ### Response: def _build_resolver(cls, session: AppSession): '''Build resolver.''' args = session.args dns_timeout = args.dns_timeout if args.timeout: dns_timeout = args.timeout if args.inet_family == 'IPv4': family = IPFamilyPreference.ipv4_only elif args.inet_family == 'IPv6': family = IPFamilyPreference.ipv6_only elif args.prefer_family == 'IPv6': family = IPFamilyPreference.prefer_ipv6 elif args.prefer_family == 'IPv4': family = IPFamilyPreference.prefer_ipv4 else: family = IPFamilyPreference.any return session.factory.new( 'Resolver', family=family, timeout=dns_timeout, rotate=args.rotate_dns, cache=session.factory.class_map['Resolver'].new_cache() if args.dns_cache else None, )
def send_file(self, path, contents, shutit_pexpect_child=None, truncate=False, note=None, user=None, echo=False, group=None, loglevel=logging.INFO, encoding=None): """Sends the passed-in string as a file to the passed-in path on the target. @param path: Target location of file on target. @param contents: Contents of file as a string. @param shutit_pexpect_child: See send() @param note: See send() @param user: Set ownership to this user (defaults to whoami) @param group: Set group to this user (defaults to first group in groups) @type path: string @type contents: string """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.send_file(path, contents, truncate=truncate, note=note, echo=echo, user=user, group=group, loglevel=loglevel, encoding=encoding)
Sends the passed-in string as a file to the passed-in path on the target. @param path: Target location of file on target. @param contents: Contents of file as a string. @param shutit_pexpect_child: See send() @param note: See send() @param user: Set ownership to this user (defaults to whoami) @param group: Set group to this user (defaults to first group in groups) @type path: string @type contents: string
Below is the the instruction that describes the task: ### Input: Sends the passed-in string as a file to the passed-in path on the target. @param path: Target location of file on target. @param contents: Contents of file as a string. @param shutit_pexpect_child: See send() @param note: See send() @param user: Set ownership to this user (defaults to whoami) @param group: Set group to this user (defaults to first group in groups) @type path: string @type contents: string ### Response: def send_file(self, path, contents, shutit_pexpect_child=None, truncate=False, note=None, user=None, echo=False, group=None, loglevel=logging.INFO, encoding=None): """Sends the passed-in string as a file to the passed-in path on the target. @param path: Target location of file on target. @param contents: Contents of file as a string. @param shutit_pexpect_child: See send() @param note: See send() @param user: Set ownership to this user (defaults to whoami) @param group: Set group to this user (defaults to first group in groups) @type path: string @type contents: string """ shutit_global.shutit_global_object.yield_to_draw() shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child) return shutit_pexpect_session.send_file(path, contents, truncate=truncate, note=note, echo=echo, user=user, group=group, loglevel=loglevel, encoding=encoding)
def makePhe(segID, N, CA, C, O, geo): '''Creates a Phenylalanine residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_CD1_length=geo.CG_CD1_length CB_CG_CD1_angle=geo.CB_CG_CD1_angle CA_CB_CG_CD1_diangle=geo.CA_CB_CG_CD1_diangle CG_CD2_length=geo.CG_CD2_length CB_CG_CD2_angle=geo.CB_CG_CD2_angle CA_CB_CG_CD2_diangle= geo.CA_CB_CG_CD2_diangle CD1_CE1_length=geo.CD1_CE1_length CG_CD1_CE1_angle=geo.CG_CD1_CE1_angle CB_CG_CD1_CE1_diangle=geo.CB_CG_CD1_CE1_diangle CD2_CE2_length=geo.CD2_CE2_length CG_CD2_CE2_angle=geo.CG_CD2_CE2_angle CB_CG_CD2_CE2_diangle=geo.CB_CG_CD2_CE2_diangle CE1_CZ_length=geo.CE1_CZ_length CD1_CE1_CZ_angle=geo.CD1_CE1_CZ_angle CG_CD1_CE1_CZ_diangle=geo.CG_CD1_CE1_CZ_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") carbon_d1= calculateCoordinates(CA, CB, CG, CG_CD1_length, CB_CG_CD1_angle, CA_CB_CG_CD1_diangle) CD1= Atom("CD1", carbon_d1, 0.0, 1.0, " ", " CD1", 0, "C") carbon_d2= calculateCoordinates(CA, CB, CG, CG_CD2_length, CB_CG_CD2_angle, CA_CB_CG_CD2_diangle) CD2= Atom("CD2", carbon_d2, 0.0, 1.0, " ", " CD2", 0, "C") carbon_e1= calculateCoordinates(CB, CG, CD1, CD1_CE1_length, CG_CD1_CE1_angle, CB_CG_CD1_CE1_diangle) CE1= Atom("CE1", carbon_e1, 0.0, 1.0, " ", " CE1", 0, "C") carbon_e2= calculateCoordinates(CB, CG, CD2, CD2_CE2_length, CG_CD2_CE2_angle, CB_CG_CD2_CE2_diangle) CE2= Atom("CE2", carbon_e2, 0.0, 1.0, " ", " CE2", 0, "C") carbon_z= calculateCoordinates(CG, CD1, CE1, CE1_CZ_length, CD1_CE1_CZ_angle, CG_CD1_CE1_CZ_diangle) CZ= Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C") ##Create Residue Data Structures res= Residue((' ', segID, ' '), "PHE", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(CD1) res.add(CE1) res.add(CD2) res.add(CE2) res.add(CZ) return res
Creates a Phenylalanine residue
Below is the the instruction that describes the task: ### Input: Creates a Phenylalanine residue ### Response: def makePhe(segID, N, CA, C, O, geo): '''Creates a Phenylalanine residue''' ##R-Group CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle CB_CG_length=geo.CB_CG_length CA_CB_CG_angle=geo.CA_CB_CG_angle N_CA_CB_CG_diangle=geo.N_CA_CB_CG_diangle CG_CD1_length=geo.CG_CD1_length CB_CG_CD1_angle=geo.CB_CG_CD1_angle CA_CB_CG_CD1_diangle=geo.CA_CB_CG_CD1_diangle CG_CD2_length=geo.CG_CD2_length CB_CG_CD2_angle=geo.CB_CG_CD2_angle CA_CB_CG_CD2_diangle= geo.CA_CB_CG_CD2_diangle CD1_CE1_length=geo.CD1_CE1_length CG_CD1_CE1_angle=geo.CG_CD1_CE1_angle CB_CG_CD1_CE1_diangle=geo.CB_CG_CD1_CE1_diangle CD2_CE2_length=geo.CD2_CE2_length CG_CD2_CE2_angle=geo.CG_CD2_CE2_angle CB_CG_CD2_CE2_diangle=geo.CB_CG_CD2_CE2_diangle CE1_CZ_length=geo.CE1_CZ_length CD1_CE1_CZ_angle=geo.CD1_CE1_CZ_angle CG_CD1_CE1_CZ_diangle=geo.CG_CD1_CE1_CZ_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") carbon_g= calculateCoordinates(N, CA, CB, CB_CG_length, CA_CB_CG_angle, N_CA_CB_CG_diangle) CG= Atom("CG", carbon_g, 0.0, 1.0, " ", " CG", 0, "C") carbon_d1= calculateCoordinates(CA, CB, CG, CG_CD1_length, CB_CG_CD1_angle, CA_CB_CG_CD1_diangle) CD1= Atom("CD1", carbon_d1, 0.0, 1.0, " ", " CD1", 0, "C") carbon_d2= calculateCoordinates(CA, CB, CG, CG_CD2_length, CB_CG_CD2_angle, CA_CB_CG_CD2_diangle) CD2= Atom("CD2", carbon_d2, 0.0, 1.0, " ", " CD2", 0, "C") carbon_e1= calculateCoordinates(CB, CG, CD1, CD1_CE1_length, CG_CD1_CE1_angle, CB_CG_CD1_CE1_diangle) CE1= Atom("CE1", carbon_e1, 0.0, 1.0, " ", " CE1", 0, "C") carbon_e2= calculateCoordinates(CB, CG, CD2, CD2_CE2_length, CG_CD2_CE2_angle, CB_CG_CD2_CE2_diangle) CE2= Atom("CE2", carbon_e2, 0.0, 1.0, " ", " CE2", 0, "C") carbon_z= calculateCoordinates(CG, CD1, CE1, CE1_CZ_length, CD1_CE1_CZ_angle, CG_CD1_CE1_CZ_diangle) CZ= Atom("CZ", carbon_z, 0.0, 1.0, " ", " CZ", 0, "C") ##Create Residue Data Structures res= Residue((' ', segID, ' '), "PHE", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) res.add(CG) res.add(CD1) res.add(CE1) res.add(CD2) res.add(CE2) res.add(CZ) return res
def write(self, file_name, delim=',', sep='\t'): """Writes an undirected hypergraph from a file, where nodes are represented as strings. Each column is separated by "sep", and the individual nodes are delimited by "delim". The header line is currently ignored, but columns should be of the format: node1[delim]..nodeM[sep]node1[delim]..nodeN[sep]weight As a concrete example, an arbitrary line with delim=',' and sep=' ' (4 spaces) may look like: :: x1,x2,x3,x5 12 which defines a hyperedge of weight 12 from a node set containing nodes "x1", "x2", "x3", and "x5". """ out_file = open(file_name, 'w') # write first header line out_file.write("nodes" + sep + "weight\n") for hyperedge_id in self.get_hyperedge_id_set(): line = "" # Write each node to the line, separated by delim for node in self.get_hyperedge_nodes(hyperedge_id): line += node + delim # Remove last (extra) delim line = line[:-1] # Write the weight to the line and end the line line += sep + str(self.get_hyperedge_weight(hyperedge_id)) + "\n" out_file.write(line) out_file.close()
Writes an undirected hypergraph from a file, where nodes are represented as strings. Each column is separated by "sep", and the individual nodes are delimited by "delim". The header line is currently ignored, but columns should be of the format: node1[delim]..nodeM[sep]node1[delim]..nodeN[sep]weight As a concrete example, an arbitrary line with delim=',' and sep=' ' (4 spaces) may look like: :: x1,x2,x3,x5 12 which defines a hyperedge of weight 12 from a node set containing nodes "x1", "x2", "x3", and "x5".
Below is the the instruction that describes the task: ### Input: Writes an undirected hypergraph from a file, where nodes are represented as strings. Each column is separated by "sep", and the individual nodes are delimited by "delim". The header line is currently ignored, but columns should be of the format: node1[delim]..nodeM[sep]node1[delim]..nodeN[sep]weight As a concrete example, an arbitrary line with delim=',' and sep=' ' (4 spaces) may look like: :: x1,x2,x3,x5 12 which defines a hyperedge of weight 12 from a node set containing nodes "x1", "x2", "x3", and "x5". ### Response: def write(self, file_name, delim=',', sep='\t'): """Writes an undirected hypergraph from a file, where nodes are represented as strings. Each column is separated by "sep", and the individual nodes are delimited by "delim". The header line is currently ignored, but columns should be of the format: node1[delim]..nodeM[sep]node1[delim]..nodeN[sep]weight As a concrete example, an arbitrary line with delim=',' and sep=' ' (4 spaces) may look like: :: x1,x2,x3,x5 12 which defines a hyperedge of weight 12 from a node set containing nodes "x1", "x2", "x3", and "x5". """ out_file = open(file_name, 'w') # write first header line out_file.write("nodes" + sep + "weight\n") for hyperedge_id in self.get_hyperedge_id_set(): line = "" # Write each node to the line, separated by delim for node in self.get_hyperedge_nodes(hyperedge_id): line += node + delim # Remove last (extra) delim line = line[:-1] # Write the weight to the line and end the line line += sep + str(self.get_hyperedge_weight(hyperedge_id)) + "\n" out_file.write(line) out_file.close()
def container(self, name, length, type, *parameters): """Define a container with given length. This is a convenience method creating a `Struct` with `length` containing fields defined in `type`. """ self.new_struct('Container', name, 'length=%s' % length) BuiltIn().run_keyword(type, *parameters) self.end_struct()
Define a container with given length. This is a convenience method creating a `Struct` with `length` containing fields defined in `type`.
Below is the the instruction that describes the task: ### Input: Define a container with given length. This is a convenience method creating a `Struct` with `length` containing fields defined in `type`. ### Response: def container(self, name, length, type, *parameters): """Define a container with given length. This is a convenience method creating a `Struct` with `length` containing fields defined in `type`. """ self.new_struct('Container', name, 'length=%s' % length) BuiltIn().run_keyword(type, *parameters) self.end_struct()
def read_folder(directory): """read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text """ res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding="utf-8") as f: content = f.read() res.append(content) return res
read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text
Below is the the instruction that describes the task: ### Input: read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text ### Response: def read_folder(directory): """read text files in directory and returns them as array Args: directory: where the text files are Returns: Array of text """ res = [] for filename in os.listdir(directory): with io.open(os.path.join(directory, filename), encoding="utf-8") as f: content = f.read() res.append(content) return res
def not26(func): """Function decorator for methods not implemented in Python 2.6.""" @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if hexversion < 0x02070000: return errfunc else: return func
Function decorator for methods not implemented in Python 2.6.
Below is the the instruction that describes the task: ### Input: Function decorator for methods not implemented in Python 2.6. ### Response: def not26(func): """Function decorator for methods not implemented in Python 2.6.""" @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if hexversion < 0x02070000: return errfunc else: return func
def cancel(self, job_ids): """ Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False. """ for job in job_ids: logger.debug("Terminating job/proc_id : {0}".format(job)) # Here we are assuming that for local, the job_ids are the process id's self._delete_deployment(job) self.resources[job]['status'] = 'CANCELLED' rets = [True for i in job_ids] return rets
Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False.
Below is the the instruction that describes the task: ### Input: Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False. ### Response: def cancel(self, job_ids): """ Cancels the jobs specified by a list of job ids Args: job_ids : [<job_id> ...] Returns : [True/False...] : If the cancel operation fails the entire list will be False. """ for job in job_ids: logger.debug("Terminating job/proc_id : {0}".format(job)) # Here we are assuming that for local, the job_ids are the process id's self._delete_deployment(job) self.resources[job]['status'] = 'CANCELLED' rets = [True for i in job_ids] return rets
def backtrace(node): """ Backtrace according to the parent records and return the path. (including both start and end nodes) """ path = [(node.x, node.y)] while node.parent: node = node.parent path.append((node.x, node.y)) path.reverse() return path
Backtrace according to the parent records and return the path. (including both start and end nodes)
Below is the the instruction that describes the task: ### Input: Backtrace according to the parent records and return the path. (including both start and end nodes) ### Response: def backtrace(node): """ Backtrace according to the parent records and return the path. (including both start and end nodes) """ path = [(node.x, node.y)] while node.parent: node = node.parent path.append((node.x, node.y)) path.reverse() return path
def get_volume(self, controller, zone): """ Gets the volume level which needs to be doubled to get it to the range of 0..100 - it is located on a 2 byte offset """ volume_level = self.get_zone_info(controller, zone, 2) if volume_level is not None: volume_level *= 2 return volume_level
Gets the volume level which needs to be doubled to get it to the range of 0..100 - it is located on a 2 byte offset
Below is the the instruction that describes the task: ### Input: Gets the volume level which needs to be doubled to get it to the range of 0..100 - it is located on a 2 byte offset ### Response: def get_volume(self, controller, zone): """ Gets the volume level which needs to be doubled to get it to the range of 0..100 - it is located on a 2 byte offset """ volume_level = self.get_zone_info(controller, zone, 2) if volume_level is not None: volume_level *= 2 return volume_level
def send_file_from_directory(filename, directory, app=None): """Helper to add static rules, like in `abilian.app`.app. Example use:: app.add_url_rule( app.static_url_path + '/abilian/<path:filename>', endpoint='abilian_static', view_func=partial(send_file_from_directory, directory='/path/to/static/files/dir')) """ if app is None: app = current_app cache_timeout = app.get_send_file_max_age(filename) return send_from_directory(directory, filename, cache_timeout=cache_timeout)
Helper to add static rules, like in `abilian.app`.app. Example use:: app.add_url_rule( app.static_url_path + '/abilian/<path:filename>', endpoint='abilian_static', view_func=partial(send_file_from_directory, directory='/path/to/static/files/dir'))
Below is the the instruction that describes the task: ### Input: Helper to add static rules, like in `abilian.app`.app. Example use:: app.add_url_rule( app.static_url_path + '/abilian/<path:filename>', endpoint='abilian_static', view_func=partial(send_file_from_directory, directory='/path/to/static/files/dir')) ### Response: def send_file_from_directory(filename, directory, app=None): """Helper to add static rules, like in `abilian.app`.app. Example use:: app.add_url_rule( app.static_url_path + '/abilian/<path:filename>', endpoint='abilian_static', view_func=partial(send_file_from_directory, directory='/path/to/static/files/dir')) """ if app is None: app = current_app cache_timeout = app.get_send_file_max_age(filename) return send_from_directory(directory, filename, cache_timeout=cache_timeout)
def update_internal_boundary_x_y (self, solution_array): """update the inner boundary with the same send/recv pattern as the MPIPartitioner""" nsd_ = self.nsd dtype = solution_array.dtype if nsd_!=len(self.in_lower_buffers) | nsd_!=len(self.out_lower_buffers): print("Buffers for communicating with lower neighbors not ready") return if nsd_!=len(self.in_upper_buffers) | nsd_!=len(self.out_upper_buffers): print("Buffers for communicating with upper neighbors not ready") return loc_nx = self.subd_hi_ix[0]-self.subd_lo_ix[0] loc_ny = self.subd_hi_ix[1]-self.subd_lo_ix[1] lower_x_neigh = self.lower_neighbors[0] upper_x_neigh = self.upper_neighbors[0] lower_y_neigh = self.lower_neighbors[1] upper_y_neigh = self.upper_neighbors[1] trackers = [] flags = dict(copy=False, track=False) # communicate in the x-direction first if lower_x_neigh>-1: if self.slice_copy: self.out_lower_buffers[0] = ascontiguousarray(solution_array[1,:]) else: for i in xrange(0,loc_ny+1): self.out_lower_buffers[0][i] = solution_array[1,i] t = self.comm.west.send(self.out_lower_buffers[0], **flags) trackers.append(t) if upper_x_neigh>-1: msg = self.comm.east.recv(copy=False) self.in_upper_buffers[0] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[loc_nx,:] = self.in_upper_buffers[0] self.out_upper_buffers[0] = ascontiguousarray(solution_array[loc_nx-1,:]) else: for i in xrange(0,loc_ny+1): solution_array[loc_nx,i] = self.in_upper_buffers[0][i] self.out_upper_buffers[0][i] = solution_array[loc_nx-1,i] t = self.comm.east.send(self.out_upper_buffers[0], **flags) trackers.append(t) if lower_x_neigh>-1: msg = self.comm.west.recv(copy=False) self.in_lower_buffers[0] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[0,:] = self.in_lower_buffers[0] else: for i in xrange(0,loc_ny+1): solution_array[0,i] = self.in_lower_buffers[0][i] # communicate in the y-direction afterwards if lower_y_neigh>-1: if self.slice_copy: self.out_lower_buffers[1] = ascontiguousarray(solution_array[:,1]) else: for i in xrange(0,loc_nx+1): self.out_lower_buffers[1][i] = solution_array[i,1] t = self.comm.south.send(self.out_lower_buffers[1], **flags) trackers.append(t) if upper_y_neigh>-1: msg = self.comm.north.recv(copy=False) self.in_upper_buffers[1] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[:,loc_ny] = self.in_upper_buffers[1] self.out_upper_buffers[1] = ascontiguousarray(solution_array[:,loc_ny-1]) else: for i in xrange(0,loc_nx+1): solution_array[i,loc_ny] = self.in_upper_buffers[1][i] self.out_upper_buffers[1][i] = solution_array[i,loc_ny-1] t = self.comm.north.send(self.out_upper_buffers[1], **flags) trackers.append(t) if lower_y_neigh>-1: msg = self.comm.south.recv(copy=False) self.in_lower_buffers[1] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[:,0] = self.in_lower_buffers[1] else: for i in xrange(0,loc_nx+1): solution_array[i,0] = self.in_lower_buffers[1][i] # wait for sends to complete: if flags['track']: for t in trackers: t.wait()
update the inner boundary with the same send/recv pattern as the MPIPartitioner
Below is the the instruction that describes the task: ### Input: update the inner boundary with the same send/recv pattern as the MPIPartitioner ### Response: def update_internal_boundary_x_y (self, solution_array): """update the inner boundary with the same send/recv pattern as the MPIPartitioner""" nsd_ = self.nsd dtype = solution_array.dtype if nsd_!=len(self.in_lower_buffers) | nsd_!=len(self.out_lower_buffers): print("Buffers for communicating with lower neighbors not ready") return if nsd_!=len(self.in_upper_buffers) | nsd_!=len(self.out_upper_buffers): print("Buffers for communicating with upper neighbors not ready") return loc_nx = self.subd_hi_ix[0]-self.subd_lo_ix[0] loc_ny = self.subd_hi_ix[1]-self.subd_lo_ix[1] lower_x_neigh = self.lower_neighbors[0] upper_x_neigh = self.upper_neighbors[0] lower_y_neigh = self.lower_neighbors[1] upper_y_neigh = self.upper_neighbors[1] trackers = [] flags = dict(copy=False, track=False) # communicate in the x-direction first if lower_x_neigh>-1: if self.slice_copy: self.out_lower_buffers[0] = ascontiguousarray(solution_array[1,:]) else: for i in xrange(0,loc_ny+1): self.out_lower_buffers[0][i] = solution_array[1,i] t = self.comm.west.send(self.out_lower_buffers[0], **flags) trackers.append(t) if upper_x_neigh>-1: msg = self.comm.east.recv(copy=False) self.in_upper_buffers[0] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[loc_nx,:] = self.in_upper_buffers[0] self.out_upper_buffers[0] = ascontiguousarray(solution_array[loc_nx-1,:]) else: for i in xrange(0,loc_ny+1): solution_array[loc_nx,i] = self.in_upper_buffers[0][i] self.out_upper_buffers[0][i] = solution_array[loc_nx-1,i] t = self.comm.east.send(self.out_upper_buffers[0], **flags) trackers.append(t) if lower_x_neigh>-1: msg = self.comm.west.recv(copy=False) self.in_lower_buffers[0] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[0,:] = self.in_lower_buffers[0] else: for i in xrange(0,loc_ny+1): solution_array[0,i] = self.in_lower_buffers[0][i] # communicate in the y-direction afterwards if lower_y_neigh>-1: if self.slice_copy: self.out_lower_buffers[1] = ascontiguousarray(solution_array[:,1]) else: for i in xrange(0,loc_nx+1): self.out_lower_buffers[1][i] = solution_array[i,1] t = self.comm.south.send(self.out_lower_buffers[1], **flags) trackers.append(t) if upper_y_neigh>-1: msg = self.comm.north.recv(copy=False) self.in_upper_buffers[1] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[:,loc_ny] = self.in_upper_buffers[1] self.out_upper_buffers[1] = ascontiguousarray(solution_array[:,loc_ny-1]) else: for i in xrange(0,loc_nx+1): solution_array[i,loc_ny] = self.in_upper_buffers[1][i] self.out_upper_buffers[1][i] = solution_array[i,loc_ny-1] t = self.comm.north.send(self.out_upper_buffers[1], **flags) trackers.append(t) if lower_y_neigh>-1: msg = self.comm.south.recv(copy=False) self.in_lower_buffers[1] = frombuffer(msg, dtype=dtype) if self.slice_copy: solution_array[:,0] = self.in_lower_buffers[1] else: for i in xrange(0,loc_nx+1): solution_array[i,0] = self.in_lower_buffers[1][i] # wait for sends to complete: if flags['track']: for t in trackers: t.wait()
def top_parents(papers, topn=20, verbose=False): """ Returns a list of :class:`.Paper` that cite the topn most cited papers. Parameters ---------- papers : list A list of :class:`.Paper` objects. topn : int or float {0.-1.} Number (int) or percentage (float) of top-cited papers. verbose : bool If True, prints status messages. Returns ------- papers : list A list of :class:`.Paper` objects. top : list A list of 'ayjid' keys for the topn most cited papers. counts : dict Citation counts for all papers cited by papers. """ if verbose: print "Getting parents of top "+unicode(topn)+" most cited papers." top, counts = top_cited(papers, topn=topn, verbose=verbose) papers = [ P for P in papers if P['citations'] is not None ] parents = [ P for P in papers if len( set([ c['ayjid'] for c in P['citations'] ]) & set(top) ) > 0 ] if verbose: print "Found " + unicode(len(parents)) + " parents." return parents, top, counts
Returns a list of :class:`.Paper` that cite the topn most cited papers. Parameters ---------- papers : list A list of :class:`.Paper` objects. topn : int or float {0.-1.} Number (int) or percentage (float) of top-cited papers. verbose : bool If True, prints status messages. Returns ------- papers : list A list of :class:`.Paper` objects. top : list A list of 'ayjid' keys for the topn most cited papers. counts : dict Citation counts for all papers cited by papers.
Below is the the instruction that describes the task: ### Input: Returns a list of :class:`.Paper` that cite the topn most cited papers. Parameters ---------- papers : list A list of :class:`.Paper` objects. topn : int or float {0.-1.} Number (int) or percentage (float) of top-cited papers. verbose : bool If True, prints status messages. Returns ------- papers : list A list of :class:`.Paper` objects. top : list A list of 'ayjid' keys for the topn most cited papers. counts : dict Citation counts for all papers cited by papers. ### Response: def top_parents(papers, topn=20, verbose=False): """ Returns a list of :class:`.Paper` that cite the topn most cited papers. Parameters ---------- papers : list A list of :class:`.Paper` objects. topn : int or float {0.-1.} Number (int) or percentage (float) of top-cited papers. verbose : bool If True, prints status messages. Returns ------- papers : list A list of :class:`.Paper` objects. top : list A list of 'ayjid' keys for the topn most cited papers. counts : dict Citation counts for all papers cited by papers. """ if verbose: print "Getting parents of top "+unicode(topn)+" most cited papers." top, counts = top_cited(papers, topn=topn, verbose=verbose) papers = [ P for P in papers if P['citations'] is not None ] parents = [ P for P in papers if len( set([ c['ayjid'] for c in P['citations'] ]) & set(top) ) > 0 ] if verbose: print "Found " + unicode(len(parents)) + " parents." return parents, top, counts
def topng(images, path, prefix="image", overwrite=False, credentials=None): """ Write out PNG files for 2d image data. See also -------- thunder.data.images.topng """ value_shape = images.value_shape if not len(value_shape) in [2, 3]: raise ValueError("Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len(value_shape)) from scipy.misc import imsave from io import BytesIO from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix+"-"+"%05d.png" % int(key) bytebuf = BytesIO() imsave(bytebuf, img, format='PNG') return fname, bytebuf.getvalue() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x)))
Write out PNG files for 2d image data. See also -------- thunder.data.images.topng
Below is the the instruction that describes the task: ### Input: Write out PNG files for 2d image data. See also -------- thunder.data.images.topng ### Response: def topng(images, path, prefix="image", overwrite=False, credentials=None): """ Write out PNG files for 2d image data. See also -------- thunder.data.images.topng """ value_shape = images.value_shape if not len(value_shape) in [2, 3]: raise ValueError("Only 2D or 3D images can be exported to png, " "images are %d-dimensional." % len(value_shape)) from scipy.misc import imsave from io import BytesIO from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix+"-"+"%05d.png" % int(key) bytebuf = BytesIO() imsave(bytebuf, img, format='PNG') return fname, bytebuf.getvalue() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x)))
def startJVM(jvm=None, *args, **kwargs): """ Starts a Java Virtual Machine. Without options it will start the JVM with the default classpath and jvm. The default classpath will be determined by jpype.getClassPath(). The default JVM is determined by jpype.getDefaultJVMPath(). Args: jvm (str): Path to the jvm library file (libjvm.so, jvm.dll, ...) default=None will use jpype.getDefaultJVMPath() *args (str[]): Arguments to give to the JVM classpath (Optional[string]): set the classpath for the jvm. This will override any classpath supplied in the arguments list. ignoreUnrecognized (Optional[bool]): option to jvm to ignore invalid jvm arguments. (Default False) """ if jvm is None: jvm = get_default_jvm_path() # Check to see that the user has not set the classpath # Otherwise use the default if not specified if not _hasClassPath(args) and 'classpath' not in kwargs: kwargs['classpath']=_classpath.getClassPath() print("Use default classpath") if 'ignoreUnrecognized' not in kwargs: kwargs['ignoreUnrecognized']=False # Classpath handling args = list(args) if 'classpath' in kwargs and kwargs['classpath']!=None: args.append('-Djava.class.path=%s'%(kwargs['classpath'])) print("Set classpath") _jpype.startup(jvm, tuple(args), kwargs['ignoreUnrecognized']) _initialize() # start the reference daemon thread if _usePythonThreadForDaemon: _refdaemon.startPython() else: _refdaemon.startJava()
Starts a Java Virtual Machine. Without options it will start the JVM with the default classpath and jvm. The default classpath will be determined by jpype.getClassPath(). The default JVM is determined by jpype.getDefaultJVMPath(). Args: jvm (str): Path to the jvm library file (libjvm.so, jvm.dll, ...) default=None will use jpype.getDefaultJVMPath() *args (str[]): Arguments to give to the JVM classpath (Optional[string]): set the classpath for the jvm. This will override any classpath supplied in the arguments list. ignoreUnrecognized (Optional[bool]): option to jvm to ignore invalid jvm arguments. (Default False)
Below is the the instruction that describes the task: ### Input: Starts a Java Virtual Machine. Without options it will start the JVM with the default classpath and jvm. The default classpath will be determined by jpype.getClassPath(). The default JVM is determined by jpype.getDefaultJVMPath(). Args: jvm (str): Path to the jvm library file (libjvm.so, jvm.dll, ...) default=None will use jpype.getDefaultJVMPath() *args (str[]): Arguments to give to the JVM classpath (Optional[string]): set the classpath for the jvm. This will override any classpath supplied in the arguments list. ignoreUnrecognized (Optional[bool]): option to jvm to ignore invalid jvm arguments. (Default False) ### Response: def startJVM(jvm=None, *args, **kwargs): """ Starts a Java Virtual Machine. Without options it will start the JVM with the default classpath and jvm. The default classpath will be determined by jpype.getClassPath(). The default JVM is determined by jpype.getDefaultJVMPath(). Args: jvm (str): Path to the jvm library file (libjvm.so, jvm.dll, ...) default=None will use jpype.getDefaultJVMPath() *args (str[]): Arguments to give to the JVM classpath (Optional[string]): set the classpath for the jvm. This will override any classpath supplied in the arguments list. ignoreUnrecognized (Optional[bool]): option to jvm to ignore invalid jvm arguments. (Default False) """ if jvm is None: jvm = get_default_jvm_path() # Check to see that the user has not set the classpath # Otherwise use the default if not specified if not _hasClassPath(args) and 'classpath' not in kwargs: kwargs['classpath']=_classpath.getClassPath() print("Use default classpath") if 'ignoreUnrecognized' not in kwargs: kwargs['ignoreUnrecognized']=False # Classpath handling args = list(args) if 'classpath' in kwargs and kwargs['classpath']!=None: args.append('-Djava.class.path=%s'%(kwargs['classpath'])) print("Set classpath") _jpype.startup(jvm, tuple(args), kwargs['ignoreUnrecognized']) _initialize() # start the reference daemon thread if _usePythonThreadForDaemon: _refdaemon.startPython() else: _refdaemon.startJava()
def showMessage(self, message, *args): """ Public method to show a message in the bottom part of the splashscreen. @param message message to be shown (string or QString) """ QSplashScreen.showMessage( self, message, Qt.AlignBottom | Qt.AlignRight | Qt.AlignAbsolute, QColor(Qt.white))
Public method to show a message in the bottom part of the splashscreen. @param message message to be shown (string or QString)
Below is the the instruction that describes the task: ### Input: Public method to show a message in the bottom part of the splashscreen. @param message message to be shown (string or QString) ### Response: def showMessage(self, message, *args): """ Public method to show a message in the bottom part of the splashscreen. @param message message to be shown (string or QString) """ QSplashScreen.showMessage( self, message, Qt.AlignBottom | Qt.AlignRight | Qt.AlignAbsolute, QColor(Qt.white))
def make_sshable(c): """ Set up passwordless SSH keypair & authorized_hosts access to localhost. """ user = c.travis.sudo.user home = "~{0}".format(user) # Run sudo() as the new sudo user; means less chown'ing, etc. c.config.sudo.user = user ssh_dir = "{0}/.ssh".format(home) # TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this? for cmd in ("mkdir {0}", "chmod 0700 {0}"): c.sudo(cmd.format(ssh_dir, user)) c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir)) c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir))
Set up passwordless SSH keypair & authorized_hosts access to localhost.
Below is the the instruction that describes the task: ### Input: Set up passwordless SSH keypair & authorized_hosts access to localhost. ### Response: def make_sshable(c): """ Set up passwordless SSH keypair & authorized_hosts access to localhost. """ user = c.travis.sudo.user home = "~{0}".format(user) # Run sudo() as the new sudo user; means less chown'ing, etc. c.config.sudo.user = user ssh_dir = "{0}/.ssh".format(home) # TODO: worth wrapping in 'sh -c' and using '&&' instead of doing this? for cmd in ("mkdir {0}", "chmod 0700 {0}"): c.sudo(cmd.format(ssh_dir, user)) c.sudo('ssh-keygen -f {0}/id_rsa -N ""'.format(ssh_dir)) c.sudo("cp {0}/{{id_rsa.pub,authorized_keys}}".format(ssh_dir))
def calculate_temperature_equivalent(temperatures): """ Calculates the temperature equivalent from a series of average daily temperatures according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2 Parameters ---------- series : Pandas Series Returns ------- Pandas Series """ ret = 0.6*temperatures + 0.3*temperatures.shift(1) + 0.1*temperatures.shift(2) ret.name = 'temp_equivalent' return ret
Calculates the temperature equivalent from a series of average daily temperatures according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2 Parameters ---------- series : Pandas Series Returns ------- Pandas Series
Below is the the instruction that describes the task: ### Input: Calculates the temperature equivalent from a series of average daily temperatures according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2 Parameters ---------- series : Pandas Series Returns ------- Pandas Series ### Response: def calculate_temperature_equivalent(temperatures): """ Calculates the temperature equivalent from a series of average daily temperatures according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2 Parameters ---------- series : Pandas Series Returns ------- Pandas Series """ ret = 0.6*temperatures + 0.3*temperatures.shift(1) + 0.1*temperatures.shift(2) ret.name = 'temp_equivalent' return ret
def configure(screen_name=None, config_file=None, app=None, **kwargs): """ Set up a config dictionary using a bots.yaml config file and optional keyword args. Args: screen_name (str): screen_name of user to search for in config file config_file (str): Path to read for the config file app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}. default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS. default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES. """ # Use passed config file, or look for it in the default path. # Super-optionally, accept a different place to look for the file dirs = kwargs.pop('default_directories', None) bases = kwargs.pop('default_bases', None) file_config = {} if config_file is not False: config_file = find_file(config_file, dirs, bases) file_config = parse(config_file) # config and keys dicts # Pull non-authentication settings from the file. # Kwargs, user, app, and general settings are included, in that order of preference # Exclude apps and users sections from config config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')} user_conf = file_config.get('users', {}).get(screen_name, {}) app = app or user_conf.get('app') app_conf = file_config.get('apps', {}).get(app, {}) # Pull user and app data from the file config.update(app_conf) config.update(user_conf) # kwargs take precendence over config file config.update({k: v for k, v in kwargs.items() if v is not None}) return config
Set up a config dictionary using a bots.yaml config file and optional keyword args. Args: screen_name (str): screen_name of user to search for in config file config_file (str): Path to read for the config file app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}. default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS. default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES.
Below is the the instruction that describes the task: ### Input: Set up a config dictionary using a bots.yaml config file and optional keyword args. Args: screen_name (str): screen_name of user to search for in config file config_file (str): Path to read for the config file app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}. default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS. default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES. ### Response: def configure(screen_name=None, config_file=None, app=None, **kwargs): """ Set up a config dictionary using a bots.yaml config file and optional keyword args. Args: screen_name (str): screen_name of user to search for in config file config_file (str): Path to read for the config file app (str): Name of the app to look for in the config file. Defaults to the one set in users.{screen_name}. default_directories (str): Directories to read for the bots.yaml/json file. Defaults to CONFIG_DIRS. default_bases (str): File names to look for in the directories. Defaults to CONFIG_BASES. """ # Use passed config file, or look for it in the default path. # Super-optionally, accept a different place to look for the file dirs = kwargs.pop('default_directories', None) bases = kwargs.pop('default_bases', None) file_config = {} if config_file is not False: config_file = find_file(config_file, dirs, bases) file_config = parse(config_file) # config and keys dicts # Pull non-authentication settings from the file. # Kwargs, user, app, and general settings are included, in that order of preference # Exclude apps and users sections from config config = {k: v for k, v in file_config.items() if k not in ('apps', 'users')} user_conf = file_config.get('users', {}).get(screen_name, {}) app = app or user_conf.get('app') app_conf = file_config.get('apps', {}).get(app, {}) # Pull user and app data from the file config.update(app_conf) config.update(user_conf) # kwargs take precendence over config file config.update({k: v for k, v in kwargs.items() if v is not None}) return config
def Tm(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[]): r'''This function handles the retrieval of a chemical's melting point. Lookup is based on CASRNs. Will automatically select a data source to use if no Method is provided; returns None if the data is not available. Prefered sources are 'Open Notebook Melting Points', with backup sources 'CRC Physical Constants, organic' for organic chemicals, and 'CRC Physical Constants, inorganic' for inorganic chemicals. Function has data for approximately 14000 chemicals. Parameters ---------- CASRN : string CASRN [-] Returns ------- Tm : float Melting temperature, [K] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain Tm with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in Tm_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain Tm for the desired chemical, and will return methods instead of Tm IgnoreMethods : list, optional A list of methods to ignore in obtaining the full list of methods Notes ----- A total of three sources are available for this function. They are: * 'OPEN_NTBKM, a compillation of data on organics as published in [1]_ as Open Notebook Melting Points; Averaged (median) values were used when multiple points were available. For more information on this invaluable and excellent collection, see http://onswebservices.wikispaces.com/meltingpoint. * 'CRC_ORG', a compillation of data on organics as published in [2]_. * 'CRC_INORG', a compillation of data on inorganic as published in [2]_. Examples -------- >>> Tm(CASRN='7732-18-5') 273.15 References ---------- .. [1] Bradley, Jean-Claude, Antony Williams, and Andrew Lang. "Jean-Claude Bradley Open Melting Point Dataset", May 20, 2014. https://figshare.com/articles/Jean_Claude_Bradley_Open_Melting_Point_Datset/1031637. .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014. ''' def list_methods(): methods = [] if CASRN in Tm_ON_data.index: methods.append(OPEN_NTBKM) if CASRN in CRC_inorganic_data.index and not np.isnan(CRC_inorganic_data.at[CASRN, 'Tm']): methods.append(CRC_INORG) if CASRN in CRC_organic_data.index and not np.isnan(CRC_organic_data.at[CASRN, 'Tm']): methods.append(CRC_ORG) if IgnoreMethods: for Method in IgnoreMethods: if Method in methods: methods.remove(Method) methods.append(NONE) return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == OPEN_NTBKM: return float(Tm_ON_data.at[CASRN, 'Tm']) elif Method == CRC_INORG: return float(CRC_inorganic_data.at[CASRN, 'Tm']) elif Method == CRC_ORG: return float(CRC_organic_data.at[CASRN, 'Tm']) elif Method == NONE: return None else: raise Exception('Failure in in function')
r'''This function handles the retrieval of a chemical's melting point. Lookup is based on CASRNs. Will automatically select a data source to use if no Method is provided; returns None if the data is not available. Prefered sources are 'Open Notebook Melting Points', with backup sources 'CRC Physical Constants, organic' for organic chemicals, and 'CRC Physical Constants, inorganic' for inorganic chemicals. Function has data for approximately 14000 chemicals. Parameters ---------- CASRN : string CASRN [-] Returns ------- Tm : float Melting temperature, [K] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain Tm with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in Tm_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain Tm for the desired chemical, and will return methods instead of Tm IgnoreMethods : list, optional A list of methods to ignore in obtaining the full list of methods Notes ----- A total of three sources are available for this function. They are: * 'OPEN_NTBKM, a compillation of data on organics as published in [1]_ as Open Notebook Melting Points; Averaged (median) values were used when multiple points were available. For more information on this invaluable and excellent collection, see http://onswebservices.wikispaces.com/meltingpoint. * 'CRC_ORG', a compillation of data on organics as published in [2]_. * 'CRC_INORG', a compillation of data on inorganic as published in [2]_. Examples -------- >>> Tm(CASRN='7732-18-5') 273.15 References ---------- .. [1] Bradley, Jean-Claude, Antony Williams, and Andrew Lang. "Jean-Claude Bradley Open Melting Point Dataset", May 20, 2014. https://figshare.com/articles/Jean_Claude_Bradley_Open_Melting_Point_Datset/1031637. .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
Below is the the instruction that describes the task: ### Input: r'''This function handles the retrieval of a chemical's melting point. Lookup is based on CASRNs. Will automatically select a data source to use if no Method is provided; returns None if the data is not available. Prefered sources are 'Open Notebook Melting Points', with backup sources 'CRC Physical Constants, organic' for organic chemicals, and 'CRC Physical Constants, inorganic' for inorganic chemicals. Function has data for approximately 14000 chemicals. Parameters ---------- CASRN : string CASRN [-] Returns ------- Tm : float Melting temperature, [K] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain Tm with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in Tm_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain Tm for the desired chemical, and will return methods instead of Tm IgnoreMethods : list, optional A list of methods to ignore in obtaining the full list of methods Notes ----- A total of three sources are available for this function. They are: * 'OPEN_NTBKM, a compillation of data on organics as published in [1]_ as Open Notebook Melting Points; Averaged (median) values were used when multiple points were available. For more information on this invaluable and excellent collection, see http://onswebservices.wikispaces.com/meltingpoint. * 'CRC_ORG', a compillation of data on organics as published in [2]_. * 'CRC_INORG', a compillation of data on inorganic as published in [2]_. Examples -------- >>> Tm(CASRN='7732-18-5') 273.15 References ---------- .. [1] Bradley, Jean-Claude, Antony Williams, and Andrew Lang. "Jean-Claude Bradley Open Melting Point Dataset", May 20, 2014. https://figshare.com/articles/Jean_Claude_Bradley_Open_Melting_Point_Datset/1031637. .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014. ### Response: def Tm(CASRN, AvailableMethods=False, Method=None, IgnoreMethods=[]): r'''This function handles the retrieval of a chemical's melting point. Lookup is based on CASRNs. Will automatically select a data source to use if no Method is provided; returns None if the data is not available. Prefered sources are 'Open Notebook Melting Points', with backup sources 'CRC Physical Constants, organic' for organic chemicals, and 'CRC Physical Constants, inorganic' for inorganic chemicals. Function has data for approximately 14000 chemicals. Parameters ---------- CASRN : string CASRN [-] Returns ------- Tm : float Melting temperature, [K] methods : list, only returned if AvailableMethods == True List of methods which can be used to obtain Tm with the given inputs Other Parameters ---------------- Method : string, optional A string for the method name to use, as defined by constants in Tm_methods AvailableMethods : bool, optional If True, function will determine which methods can be used to obtain Tm for the desired chemical, and will return methods instead of Tm IgnoreMethods : list, optional A list of methods to ignore in obtaining the full list of methods Notes ----- A total of three sources are available for this function. They are: * 'OPEN_NTBKM, a compillation of data on organics as published in [1]_ as Open Notebook Melting Points; Averaged (median) values were used when multiple points were available. For more information on this invaluable and excellent collection, see http://onswebservices.wikispaces.com/meltingpoint. * 'CRC_ORG', a compillation of data on organics as published in [2]_. * 'CRC_INORG', a compillation of data on inorganic as published in [2]_. Examples -------- >>> Tm(CASRN='7732-18-5') 273.15 References ---------- .. [1] Bradley, Jean-Claude, Antony Williams, and Andrew Lang. "Jean-Claude Bradley Open Melting Point Dataset", May 20, 2014. https://figshare.com/articles/Jean_Claude_Bradley_Open_Melting_Point_Datset/1031637. .. [2] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014. ''' def list_methods(): methods = [] if CASRN in Tm_ON_data.index: methods.append(OPEN_NTBKM) if CASRN in CRC_inorganic_data.index and not np.isnan(CRC_inorganic_data.at[CASRN, 'Tm']): methods.append(CRC_INORG) if CASRN in CRC_organic_data.index and not np.isnan(CRC_organic_data.at[CASRN, 'Tm']): methods.append(CRC_ORG) if IgnoreMethods: for Method in IgnoreMethods: if Method in methods: methods.remove(Method) methods.append(NONE) return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == OPEN_NTBKM: return float(Tm_ON_data.at[CASRN, 'Tm']) elif Method == CRC_INORG: return float(CRC_inorganic_data.at[CASRN, 'Tm']) elif Method == CRC_ORG: return float(CRC_organic_data.at[CASRN, 'Tm']) elif Method == NONE: return None else: raise Exception('Failure in in function')
def get_turbine_data_from_oedb(turbine_type, fetch_curve, overwrite=False): r""" Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s. """ # hdf5 filename filename = os.path.join(os.path.dirname(__file__), 'data', 'turbine_data_oedb.h5') if os.path.isfile(filename) and not overwrite: logging.debug("Turbine data is fetched from {}".format(filename)) with pd.HDFStore(filename) as hdf_store: turbine_data = hdf_store.get('turbine_data') else: turbine_data = load_turbine_data_from_oedb() turbine_data.set_index('turbine_type', inplace=True) # Set `curve` depending on `fetch_curve` to match names in oedb curve = ('cp_curve' if fetch_curve == 'power_coefficient_curve' else fetch_curve) # Select curve and nominal power of turbine type try: df = turbine_data.loc[turbine_type] except KeyError: raise KeyError("Turbine type '{}' not in database. ".format( turbine_type) + "Use 'get_turbine_types()' to see a table of " + "possible wind turbine types.") if df[curve] is not None: df = pd.DataFrame(df[curve]) else: sys.exit("{} of {} not available in ".format(curve, turbine_type) + "oedb. Use 'get_turbine_types()' to see for which turbine " + "types power coefficient curves are available.") nominal_power = turbine_data.loc[turbine_type][ 'installed_capacity_kw'] * 1000 df.columns = ['wind_speed', 'value'] if fetch_curve == 'power_curve': # power in W df['value'] = df['value'] * 1000 return df, nominal_power
r""" Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s.
Below is the the instruction that describes the task: ### Input: r""" Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s. ### Response: def get_turbine_data_from_oedb(turbine_type, fetch_curve, overwrite=False): r""" Fetches data for one wind turbine type from the OpenEnergy Database (oedb). If turbine data exists in local repository it is loaded from this file. The file is created when turbine data was loaded from oedb in :py:func:`~.load_turbine_data_from_oedb`. Use this function with `overwrite=True` to overwrite your file with newly fetched data. Use :py:func:`~.check_local_turbine_data` to check weather your local file is up to date. Parameters ---------- turbine_type : string Specifies the turbine type data is fetched for. Use :py:func:`~.get_turbine_types` to see a table of all wind turbines for which power (coefficient) curve data is provided. fetch_curve : string Parameter to specify whether a power or power coefficient curve should be retrieved from the provided turbine data. Valid options are 'power_curve' and 'power_coefficient_curve'. Default: None. overwrite : boolean If True local file is overwritten by newly fetch data from oedb, if False turbine data is fetched from previously saved file. Returns ------- Tuple (pandas.DataFrame, float) Power curve or power coefficient curve (pandas.DataFrame) and nominal power (float) of one wind turbine type. Power (coefficient) curve DataFrame contains power coefficient curve values (dimensionless) or power curve values in W with the corresponding wind speeds in m/s. """ # hdf5 filename filename = os.path.join(os.path.dirname(__file__), 'data', 'turbine_data_oedb.h5') if os.path.isfile(filename) and not overwrite: logging.debug("Turbine data is fetched from {}".format(filename)) with pd.HDFStore(filename) as hdf_store: turbine_data = hdf_store.get('turbine_data') else: turbine_data = load_turbine_data_from_oedb() turbine_data.set_index('turbine_type', inplace=True) # Set `curve` depending on `fetch_curve` to match names in oedb curve = ('cp_curve' if fetch_curve == 'power_coefficient_curve' else fetch_curve) # Select curve and nominal power of turbine type try: df = turbine_data.loc[turbine_type] except KeyError: raise KeyError("Turbine type '{}' not in database. ".format( turbine_type) + "Use 'get_turbine_types()' to see a table of " + "possible wind turbine types.") if df[curve] is not None: df = pd.DataFrame(df[curve]) else: sys.exit("{} of {} not available in ".format(curve, turbine_type) + "oedb. Use 'get_turbine_types()' to see for which turbine " + "types power coefficient curves are available.") nominal_power = turbine_data.loc[turbine_type][ 'installed_capacity_kw'] * 1000 df.columns = ['wind_speed', 'value'] if fetch_curve == 'power_curve': # power in W df['value'] = df['value'] * 1000 return df, nominal_power
def topic_over_time(self, k, mode='counts', slice_kwargs={}): """ Calculate the representation of topic ``k`` in the corpus over time. """ return self.corpus.feature_distribution('topics', k, mode=mode, **slice_kwargs)
Calculate the representation of topic ``k`` in the corpus over time.
Below is the the instruction that describes the task: ### Input: Calculate the representation of topic ``k`` in the corpus over time. ### Response: def topic_over_time(self, k, mode='counts', slice_kwargs={}): """ Calculate the representation of topic ``k`` in the corpus over time. """ return self.corpus.feature_distribution('topics', k, mode=mode, **slice_kwargs)
def _get_reader(self, network_reader): """ Get a reader or None if another reader is already reading. """ with (yield from self._lock): if self._reader_process is None: self._reader_process = network_reader if self._reader: if self._reader_process == network_reader: self._current_read = asyncio.async(self._reader.read(READ_SIZE)) return self._current_read return None
Get a reader or None if another reader is already reading.
Below is the the instruction that describes the task: ### Input: Get a reader or None if another reader is already reading. ### Response: def _get_reader(self, network_reader): """ Get a reader or None if another reader is already reading. """ with (yield from self._lock): if self._reader_process is None: self._reader_process = network_reader if self._reader: if self._reader_process == network_reader: self._current_read = asyncio.async(self._reader.read(READ_SIZE)) return self._current_read return None
def _adjust_beforenext(self, real_wave_mfcc, algo_parameters): """ BEFORENEXT """ def new_time(nsi): """ The new boundary time value is ``delay`` before the end of the nonspeech interval ``nsi``. If ``nsi`` has length less than ``delay``, set the new boundary time to the begin of ``nsi``. """ delay = max(algo_parameters[0], TimeValue("0.000")) return max(nsi.end - delay, nsi.begin) self.log(u"Called _adjust_beforenext") self._adjust_on_nonspeech(real_wave_mfcc, new_time)
BEFORENEXT
Below is the the instruction that describes the task: ### Input: BEFORENEXT ### Response: def _adjust_beforenext(self, real_wave_mfcc, algo_parameters): """ BEFORENEXT """ def new_time(nsi): """ The new boundary time value is ``delay`` before the end of the nonspeech interval ``nsi``. If ``nsi`` has length less than ``delay``, set the new boundary time to the begin of ``nsi``. """ delay = max(algo_parameters[0], TimeValue("0.000")) return max(nsi.end - delay, nsi.begin) self.log(u"Called _adjust_beforenext") self._adjust_on_nonspeech(real_wave_mfcc, new_time)
def calculate_size(name, key): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_data(key) return data_size
Calculates the request payload size
Below is the the instruction that describes the task: ### Input: Calculates the request payload size ### Response: def calculate_size(name, key): """ Calculates the request payload size""" data_size = 0 data_size += calculate_size_str(name) data_size += calculate_size_data(key) return data_size
def from_string(species_string: str): """ Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted. """ m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string) if m: sym = m.group(1) oxi = 1 if m.group(2) == "" else float(m.group(2)) oxi = -oxi if m.group(3) == "-" else oxi properties = None if m.group(4): toks = m.group(4).replace(",", "").split("=") properties = {toks[0]: float(toks[1])} return Specie(sym, oxi, properties) else: raise ValueError("Invalid Species String")
Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted.
Below is the the instruction that describes the task: ### Input: Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted. ### Response: def from_string(species_string: str): """ Returns a Specie from a string representation. Args: species_string (str): A typical string representation of a species, e.g., "Mn2+", "Fe3+", "O2-". Returns: A Specie object. Raises: ValueError if species_string cannot be intepreted. """ m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-])(.*)", species_string) if m: sym = m.group(1) oxi = 1 if m.group(2) == "" else float(m.group(2)) oxi = -oxi if m.group(3) == "-" else oxi properties = None if m.group(4): toks = m.group(4).replace(",", "").split("=") properties = {toks[0]: float(toks[1])} return Specie(sym, oxi, properties) else: raise ValueError("Invalid Species String")
def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin name, default, help, comma_compat=False, flag_values=_flagvalues.FLAGS, **args): """Registers a flag whose value is a whitespace-separated list of strings. Any whitespace can be used as a separator. Args: name: str, the flag name. default: list|str|None, the default value of the flag. help: str, the help message. comma_compat: bool - Whether to support comma as an additional separator. If false then only whitespace is supported. This is intended only for backwards compatibility with flags that used to be comma-separated. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. **args: Dictionary with extra keyword args that are passed to the Flag __init__. """ parser = _argument_parser.WhitespaceSeparatedListParser( comma_compat=comma_compat) serializer = _argument_parser.ListSerializer(' ') DEFINE(parser, name, default, help, flag_values, serializer, **args)
Registers a flag whose value is a whitespace-separated list of strings. Any whitespace can be used as a separator. Args: name: str, the flag name. default: list|str|None, the default value of the flag. help: str, the help message. comma_compat: bool - Whether to support comma as an additional separator. If false then only whitespace is supported. This is intended only for backwards compatibility with flags that used to be comma-separated. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. **args: Dictionary with extra keyword args that are passed to the Flag __init__.
Below is the the instruction that describes the task: ### Input: Registers a flag whose value is a whitespace-separated list of strings. Any whitespace can be used as a separator. Args: name: str, the flag name. default: list|str|None, the default value of the flag. help: str, the help message. comma_compat: bool - Whether to support comma as an additional separator. If false then only whitespace is supported. This is intended only for backwards compatibility with flags that used to be comma-separated. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. **args: Dictionary with extra keyword args that are passed to the Flag __init__. ### Response: def DEFINE_spaceseplist( # pylint: disable=invalid-name,redefined-builtin name, default, help, comma_compat=False, flag_values=_flagvalues.FLAGS, **args): """Registers a flag whose value is a whitespace-separated list of strings. Any whitespace can be used as a separator. Args: name: str, the flag name. default: list|str|None, the default value of the flag. help: str, the help message. comma_compat: bool - Whether to support comma as an additional separator. If false then only whitespace is supported. This is intended only for backwards compatibility with flags that used to be comma-separated. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. **args: Dictionary with extra keyword args that are passed to the Flag __init__. """ parser = _argument_parser.WhitespaceSeparatedListParser( comma_compat=comma_compat) serializer = _argument_parser.ListSerializer(' ') DEFINE(parser, name, default, help, flag_values, serializer, **args)
def show_top_losses(self, k:int, max_len:int=70)->None: """ Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of actual class. `max_len` is the maximum number of tokens displayed. """ from IPython.display import display, HTML items = [] tl_val,tl_idx = self.top_losses() for i,idx in enumerate(tl_idx): if k <= 0: break k -= 1 tx,cl = self.data.dl(self.ds_type).dataset[idx] cl = cl.data classes = self.data.classes txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}', f'{self.probs[idx][cl]:.2f}'] items.append(tmp) items = np.array(items) names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability'] df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context('display.max_colwidth', -1): display(HTML(df.to_html(index=False)))
Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of actual class. `max_len` is the maximum number of tokens displayed.
Below is the the instruction that describes the task: ### Input: Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of actual class. `max_len` is the maximum number of tokens displayed. ### Response: def show_top_losses(self, k:int, max_len:int=70)->None: """ Create a tabulation showing the first `k` texts in top_losses along with their prediction, actual,loss, and probability of actual class. `max_len` is the maximum number of tokens displayed. """ from IPython.display import display, HTML items = [] tl_val,tl_idx = self.top_losses() for i,idx in enumerate(tl_idx): if k <= 0: break k -= 1 tx,cl = self.data.dl(self.ds_type).dataset[idx] cl = cl.data classes = self.data.classes txt = ' '.join(tx.text.split(' ')[:max_len]) if max_len is not None else tx.text tmp = [txt, f'{classes[self.pred_class[idx]]}', f'{classes[cl]}', f'{self.losses[idx]:.2f}', f'{self.probs[idx][cl]:.2f}'] items.append(tmp) items = np.array(items) names = ['Text', 'Prediction', 'Actual', 'Loss', 'Probability'] df = pd.DataFrame({n:items[:,i] for i,n in enumerate(names)}, columns=names) with pd.option_context('display.max_colwidth', -1): display(HTML(df.to_html(index=False)))
def request_service(self, app_name, port, expected_output=None): """ Make request on service of app. If there is connection error function return False. :param app_name: str, name of the app :param expected_output: str, If not None method will check output returned from request and try to find matching string. :param port: str or int, port of the service :return: bool, True if connection was established False if there was connection error """ # get ip of service ip = [service.get_ip() for service in self.list_services(namespace=self.project) if service.name == app_name][0] # make http request to obtain output if expected_output is not None: try: output = self.http_request(host=ip, port=port) if expected_output not in output.text: raise ConuException( "Connection to service established, but didn't match expected output") else: logger.info("Connection to service established and return expected output!") return True except ConnectionError as e: logger.info("Connection to service failed %s!", e) return False elif check_port(port, host=ip): # check if port is open return True return False
Make request on service of app. If there is connection error function return False. :param app_name: str, name of the app :param expected_output: str, If not None method will check output returned from request and try to find matching string. :param port: str or int, port of the service :return: bool, True if connection was established False if there was connection error
Below is the the instruction that describes the task: ### Input: Make request on service of app. If there is connection error function return False. :param app_name: str, name of the app :param expected_output: str, If not None method will check output returned from request and try to find matching string. :param port: str or int, port of the service :return: bool, True if connection was established False if there was connection error ### Response: def request_service(self, app_name, port, expected_output=None): """ Make request on service of app. If there is connection error function return False. :param app_name: str, name of the app :param expected_output: str, If not None method will check output returned from request and try to find matching string. :param port: str or int, port of the service :return: bool, True if connection was established False if there was connection error """ # get ip of service ip = [service.get_ip() for service in self.list_services(namespace=self.project) if service.name == app_name][0] # make http request to obtain output if expected_output is not None: try: output = self.http_request(host=ip, port=port) if expected_output not in output.text: raise ConuException( "Connection to service established, but didn't match expected output") else: logger.info("Connection to service established and return expected output!") return True except ConnectionError as e: logger.info("Connection to service failed %s!", e) return False elif check_port(port, host=ip): # check if port is open return True return False
def dict_to_literal(dict_container: dict): """ Transforms a JSON+LD PyLD dictionary into an RDFLib object""" if isinstance(dict_container["@value"], int): return dict_container["@value"], else: return dict_container["@value"], dict_container.get("@language", None)
Transforms a JSON+LD PyLD dictionary into an RDFLib object
Below is the the instruction that describes the task: ### Input: Transforms a JSON+LD PyLD dictionary into an RDFLib object ### Response: def dict_to_literal(dict_container: dict): """ Transforms a JSON+LD PyLD dictionary into an RDFLib object""" if isinstance(dict_container["@value"], int): return dict_container["@value"], else: return dict_container["@value"], dict_container.get("@language", None)
def _traverse_command(self, name, *args, **kwargs): """ Add the key to the args and call the Redis command. """ if not name in self.available_commands: raise AttributeError("%s is not an available command for %s" % (name, self.__class__.__name__)) attr = getattr(self.connection, "%s" % name) key = self.key log.debug(u"Requesting %s with key %s and args %s" % (name, key, args)) result = attr(key, *args, **kwargs) result = self.post_command( sender=self, name=name, result=result, args=args, kwargs=kwargs ) return result
Add the key to the args and call the Redis command.
Below is the the instruction that describes the task: ### Input: Add the key to the args and call the Redis command. ### Response: def _traverse_command(self, name, *args, **kwargs): """ Add the key to the args and call the Redis command. """ if not name in self.available_commands: raise AttributeError("%s is not an available command for %s" % (name, self.__class__.__name__)) attr = getattr(self.connection, "%s" % name) key = self.key log.debug(u"Requesting %s with key %s and args %s" % (name, key, args)) result = attr(key, *args, **kwargs) result = self.post_command( sender=self, name=name, result=result, args=args, kwargs=kwargs ) return result
def page(self, email=values.unset, status=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of AuthorizationDocumentInstance records from the API. Request is executed immediately :param unicode email: Email. :param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AuthorizationDocumentInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage """ params = values.of({ 'Email': email, 'Status': status, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return AuthorizationDocumentPage(self._version, response, self._solution)
Retrieve a single page of AuthorizationDocumentInstance records from the API. Request is executed immediately :param unicode email: Email. :param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AuthorizationDocumentInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage
Below is the the instruction that describes the task: ### Input: Retrieve a single page of AuthorizationDocumentInstance records from the API. Request is executed immediately :param unicode email: Email. :param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AuthorizationDocumentInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage ### Response: def page(self, email=values.unset, status=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of AuthorizationDocumentInstance records from the API. Request is executed immediately :param unicode email: Email. :param AuthorizationDocumentInstance.Status status: The Status of this AuthorizationDocument. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of AuthorizationDocumentInstance :rtype: twilio.rest.preview.hosted_numbers.authorization_document.AuthorizationDocumentPage """ params = values.of({ 'Email': email, 'Status': status, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return AuthorizationDocumentPage(self._version, response, self._solution)
def add_template_events_to_network(self, columns, vectors): """ Add a vector indexed """ # Just call through to the standard function self.template_events = self.template_event_dict['network'] self.add_template_network_events(columns, vectors) self.template_event_dict['network'] = self.template_events self.template_events = None
Add a vector indexed
Below is the the instruction that describes the task: ### Input: Add a vector indexed ### Response: def add_template_events_to_network(self, columns, vectors): """ Add a vector indexed """ # Just call through to the standard function self.template_events = self.template_event_dict['network'] self.add_template_network_events(columns, vectors) self.template_event_dict['network'] = self.template_events self.template_events = None
def filter_duplicate(self, url): """ url去重 """ if self.filterDuplicate: if url in self.historys: raise Exception('duplicate excepiton: %s is duplicate' % url) else: self.historys.add(url) else: pass
url去重
Below is the the instruction that describes the task: ### Input: url去重 ### Response: def filter_duplicate(self, url): """ url去重 """ if self.filterDuplicate: if url in self.historys: raise Exception('duplicate excepiton: %s is duplicate' % url) else: self.historys.add(url) else: pass
def get_forwarding_address_details(destination_address, api_key, callback_url=None, coin_symbol='btc'): """ Give a destination address and return the details of the input address that will automatically forward to the destination address Note: a blockcypher api_key is required for this method """ assert is_valid_coin_symbol(coin_symbol) assert api_key, 'api_key required' url = make_url(coin_symbol, 'payments') logger.info(url) params = {'token': api_key} data = { 'destination': destination_address, } if callback_url: data['callback_url'] = callback_url r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
Give a destination address and return the details of the input address that will automatically forward to the destination address Note: a blockcypher api_key is required for this method
Below is the the instruction that describes the task: ### Input: Give a destination address and return the details of the input address that will automatically forward to the destination address Note: a blockcypher api_key is required for this method ### Response: def get_forwarding_address_details(destination_address, api_key, callback_url=None, coin_symbol='btc'): """ Give a destination address and return the details of the input address that will automatically forward to the destination address Note: a blockcypher api_key is required for this method """ assert is_valid_coin_symbol(coin_symbol) assert api_key, 'api_key required' url = make_url(coin_symbol, 'payments') logger.info(url) params = {'token': api_key} data = { 'destination': destination_address, } if callback_url: data['callback_url'] = callback_url r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
def depth_december_average_ground_temperature(self, value=None): """Corresponds to IDD Field `depth_december_average_ground_temperature` Args: value (float): value for IDD Field `depth_december_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_december_average_ground_temperature`'.format(value)) self._depth_december_average_ground_temperature = value
Corresponds to IDD Field `depth_december_average_ground_temperature` Args: value (float): value for IDD Field `depth_december_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
Below is the the instruction that describes the task: ### Input: Corresponds to IDD Field `depth_december_average_ground_temperature` Args: value (float): value for IDD Field `depth_december_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value ### Response: def depth_december_average_ground_temperature(self, value=None): """Corresponds to IDD Field `depth_december_average_ground_temperature` Args: value (float): value for IDD Field `depth_december_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `depth_december_average_ground_temperature`'.format(value)) self._depth_december_average_ground_temperature = value
def copy(self): """Return a new :class:`Card` just like me.""" d = {} for att in ( 'deck', 'idx', 'ud', 'foreground_source', 'foreground_color', 'foreground_image', 'foreground_texture', 'background_source', 'background_color', 'background_image', 'background_texture', 'outline_color', 'content_outline_color', 'foreground_outline_color', 'art_outline_color', 'art_source', 'art_color', 'art_image', 'art_texture', 'show_art', 'headline_text', 'headline_markup', 'headline_font_name', 'headline_font_size', 'headline_color', 'midline_text', 'midline_markup', 'midline_font_name', 'midline_font_size', 'midline_color', 'footer_text', 'footer_markup', 'footer_font_name', 'footer_font_size', 'footer_color', 'text', 'text_color', 'markup', 'font_name', 'font_size' ): v = getattr(self, att) if v is not None: d[att] = v return Card(**d)
Return a new :class:`Card` just like me.
Below is the the instruction that describes the task: ### Input: Return a new :class:`Card` just like me. ### Response: def copy(self): """Return a new :class:`Card` just like me.""" d = {} for att in ( 'deck', 'idx', 'ud', 'foreground_source', 'foreground_color', 'foreground_image', 'foreground_texture', 'background_source', 'background_color', 'background_image', 'background_texture', 'outline_color', 'content_outline_color', 'foreground_outline_color', 'art_outline_color', 'art_source', 'art_color', 'art_image', 'art_texture', 'show_art', 'headline_text', 'headline_markup', 'headline_font_name', 'headline_font_size', 'headline_color', 'midline_text', 'midline_markup', 'midline_font_name', 'midline_font_size', 'midline_color', 'footer_text', 'footer_markup', 'footer_font_name', 'footer_font_size', 'footer_color', 'text', 'text_color', 'markup', 'font_name', 'font_size' ): v = getattr(self, att) if v is not None: d[att] = v return Card(**d)
def lease_storage_container(kwargs=None, storage_conn=None, call=None): ''' .. versionadded:: 2015.8.0 Lease a container associated with the storage account CLI Example: .. code-block:: bash salt-cloud -f lease_storage_container my-azure name=mycontainer name: Name of container to create. lease_action: Required. Possible values: acquire|renew|release|break|change lease_id: Required if the container has an active lease. lease_duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. For backwards compatibility, the default is 60, and the value is only used on an acquire operation. lease_break_period: Optional. For a break operation, this is the proposed duration of seconds that the lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. proposed_lease_id: Optional for acquire, required for change. Proposed lease ID, in a GUID string format. ''' if call != 'function': raise SaltCloudSystemExit( 'The lease_storage_container function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('An storage container name must be specified as "name"') lease_actions = ('acquire', 'renew', 'release', 'break', 'change') if kwargs.get('lease_action', None) not in lease_actions: raise SaltCloudSystemExit( 'A lease_action must be one of: {0}'.format( ', '.join(lease_actions) ) ) if kwargs['lease_action'] != 'acquire' and 'lease_id' not in kwargs: raise SaltCloudSystemExit( 'A lease ID must be specified for the "{0}" lease action ' 'as "lease_id"'.format(kwargs['lease_action']) ) if not storage_conn: storage_conn = get_storage_conn(conn_kwargs=kwargs) data = storage_conn.lease_container( container_name=kwargs['name'], x_ms_lease_action=kwargs['lease_action'], x_ms_lease_id=kwargs.get('lease_id', None), x_ms_lease_duration=kwargs.get('lease_duration', 60), x_ms_lease_break_period=kwargs.get('lease_break_period', None), x_ms_proposed_lease_id=kwargs.get('proposed_lease_id', None), ) return data
.. versionadded:: 2015.8.0 Lease a container associated with the storage account CLI Example: .. code-block:: bash salt-cloud -f lease_storage_container my-azure name=mycontainer name: Name of container to create. lease_action: Required. Possible values: acquire|renew|release|break|change lease_id: Required if the container has an active lease. lease_duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. For backwards compatibility, the default is 60, and the value is only used on an acquire operation. lease_break_period: Optional. For a break operation, this is the proposed duration of seconds that the lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. proposed_lease_id: Optional for acquire, required for change. Proposed lease ID, in a GUID string format.
Below is the the instruction that describes the task: ### Input: .. versionadded:: 2015.8.0 Lease a container associated with the storage account CLI Example: .. code-block:: bash salt-cloud -f lease_storage_container my-azure name=mycontainer name: Name of container to create. lease_action: Required. Possible values: acquire|renew|release|break|change lease_id: Required if the container has an active lease. lease_duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. For backwards compatibility, the default is 60, and the value is only used on an acquire operation. lease_break_period: Optional. For a break operation, this is the proposed duration of seconds that the lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. proposed_lease_id: Optional for acquire, required for change. Proposed lease ID, in a GUID string format. ### Response: def lease_storage_container(kwargs=None, storage_conn=None, call=None): ''' .. versionadded:: 2015.8.0 Lease a container associated with the storage account CLI Example: .. code-block:: bash salt-cloud -f lease_storage_container my-azure name=mycontainer name: Name of container to create. lease_action: Required. Possible values: acquire|renew|release|break|change lease_id: Required if the container has an active lease. lease_duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration cannot be changed using renew or change. For backwards compatibility, the default is 60, and the value is only used on an acquire operation. lease_break_period: Optional. For a break operation, this is the proposed duration of seconds that the lease should continue before it is broken, between 0 and 60 seconds. This break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately. proposed_lease_id: Optional for acquire, required for change. Proposed lease ID, in a GUID string format. ''' if call != 'function': raise SaltCloudSystemExit( 'The lease_storage_container function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('An storage container name must be specified as "name"') lease_actions = ('acquire', 'renew', 'release', 'break', 'change') if kwargs.get('lease_action', None) not in lease_actions: raise SaltCloudSystemExit( 'A lease_action must be one of: {0}'.format( ', '.join(lease_actions) ) ) if kwargs['lease_action'] != 'acquire' and 'lease_id' not in kwargs: raise SaltCloudSystemExit( 'A lease ID must be specified for the "{0}" lease action ' 'as "lease_id"'.format(kwargs['lease_action']) ) if not storage_conn: storage_conn = get_storage_conn(conn_kwargs=kwargs) data = storage_conn.lease_container( container_name=kwargs['name'], x_ms_lease_action=kwargs['lease_action'], x_ms_lease_id=kwargs.get('lease_id', None), x_ms_lease_duration=kwargs.get('lease_duration', 60), x_ms_lease_break_period=kwargs.get('lease_break_period', None), x_ms_proposed_lease_id=kwargs.get('proposed_lease_id', None), ) return data
def as_svg_data_uri(matrix, version, scale=1, border=None, color='#000', background=None, xmldecl=False, svgns=True, title=None, desc=None, svgid=None, svgclass='segno', lineclass='qrline', omitsize=False, unit='', encoding='utf-8', svgversion=None, nl=False, encode_minimal=False, omit_charset=False): """\ Converts the matrix to a SVG data URI. The XML declaration is omitted by default (set ``xmldecl`` to ``True`` to enable it), further the newline is omitted by default (set ``nl`` to ``True`` to enable it). Aside from the missing ``out`` parameter and the different ``xmldecl`` and ``nl`` default values and the additional parameter ``encode_minimal`` and ``omit_charset`` this function uses the same parameters as the usual SVG serializer. :param bool encode_minimal: Indicates if the resulting data URI should use minimal percent encoding (disabled by default). :param bool omit_charset: Indicates if the ``;charset=...`` should be omitted (disabled by default) :rtype: str """ encode = partial(quote, safe=b"") if not encode_minimal else partial(quote, safe=b" :/='") buff = io.BytesIO() write_svg(matrix, version, buff, scale=scale, color=color, background=background, border=border, xmldecl=xmldecl, svgns=svgns, title=title, desc=desc, svgclass=svgclass, lineclass=lineclass, omitsize=omitsize, encoding=encoding, svgid=svgid, unit=unit, svgversion=svgversion, nl=nl) return 'data:image/svg+xml{0},{1}' \ .format(';charset=' + encoding if not omit_charset else '', # Replace " quotes with ' and URL encode the result # See also https://codepen.io/tigt/post/optimizing-svgs-in-data-uris encode(_replace_quotes(buff.getvalue())))
\ Converts the matrix to a SVG data URI. The XML declaration is omitted by default (set ``xmldecl`` to ``True`` to enable it), further the newline is omitted by default (set ``nl`` to ``True`` to enable it). Aside from the missing ``out`` parameter and the different ``xmldecl`` and ``nl`` default values and the additional parameter ``encode_minimal`` and ``omit_charset`` this function uses the same parameters as the usual SVG serializer. :param bool encode_minimal: Indicates if the resulting data URI should use minimal percent encoding (disabled by default). :param bool omit_charset: Indicates if the ``;charset=...`` should be omitted (disabled by default) :rtype: str
Below is the the instruction that describes the task: ### Input: \ Converts the matrix to a SVG data URI. The XML declaration is omitted by default (set ``xmldecl`` to ``True`` to enable it), further the newline is omitted by default (set ``nl`` to ``True`` to enable it). Aside from the missing ``out`` parameter and the different ``xmldecl`` and ``nl`` default values and the additional parameter ``encode_minimal`` and ``omit_charset`` this function uses the same parameters as the usual SVG serializer. :param bool encode_minimal: Indicates if the resulting data URI should use minimal percent encoding (disabled by default). :param bool omit_charset: Indicates if the ``;charset=...`` should be omitted (disabled by default) :rtype: str ### Response: def as_svg_data_uri(matrix, version, scale=1, border=None, color='#000', background=None, xmldecl=False, svgns=True, title=None, desc=None, svgid=None, svgclass='segno', lineclass='qrline', omitsize=False, unit='', encoding='utf-8', svgversion=None, nl=False, encode_minimal=False, omit_charset=False): """\ Converts the matrix to a SVG data URI. The XML declaration is omitted by default (set ``xmldecl`` to ``True`` to enable it), further the newline is omitted by default (set ``nl`` to ``True`` to enable it). Aside from the missing ``out`` parameter and the different ``xmldecl`` and ``nl`` default values and the additional parameter ``encode_minimal`` and ``omit_charset`` this function uses the same parameters as the usual SVG serializer. :param bool encode_minimal: Indicates if the resulting data URI should use minimal percent encoding (disabled by default). :param bool omit_charset: Indicates if the ``;charset=...`` should be omitted (disabled by default) :rtype: str """ encode = partial(quote, safe=b"") if not encode_minimal else partial(quote, safe=b" :/='") buff = io.BytesIO() write_svg(matrix, version, buff, scale=scale, color=color, background=background, border=border, xmldecl=xmldecl, svgns=svgns, title=title, desc=desc, svgclass=svgclass, lineclass=lineclass, omitsize=omitsize, encoding=encoding, svgid=svgid, unit=unit, svgversion=svgversion, nl=nl) return 'data:image/svg+xml{0},{1}' \ .format(';charset=' + encoding if not omit_charset else '', # Replace " quotes with ' and URL encode the result # See also https://codepen.io/tigt/post/optimizing-svgs-in-data-uris encode(_replace_quotes(buff.getvalue())))
async def fetch_postcodes_from_coordinates(lat: float, long: float) -> Optional[List[Postcode]]: """ Gets a postcode object from the lat and long. :param lat: The latitude to look up. :param long: The longitude to look up. :return: The mapping corresponding to the lat and long or none if the postcode does not exist. :raises ApiError: When there was an error connecting to the API. :raises CircuitBreakerError: When the circuit breaker is open. """ postcode_lookup = f"/postcodes?lat={lat}&lon={long}" return await _get_postcode_from_url(postcode_lookup)
Gets a postcode object from the lat and long. :param lat: The latitude to look up. :param long: The longitude to look up. :return: The mapping corresponding to the lat and long or none if the postcode does not exist. :raises ApiError: When there was an error connecting to the API. :raises CircuitBreakerError: When the circuit breaker is open.
Below is the the instruction that describes the task: ### Input: Gets a postcode object from the lat and long. :param lat: The latitude to look up. :param long: The longitude to look up. :return: The mapping corresponding to the lat and long or none if the postcode does not exist. :raises ApiError: When there was an error connecting to the API. :raises CircuitBreakerError: When the circuit breaker is open. ### Response: async def fetch_postcodes_from_coordinates(lat: float, long: float) -> Optional[List[Postcode]]: """ Gets a postcode object from the lat and long. :param lat: The latitude to look up. :param long: The longitude to look up. :return: The mapping corresponding to the lat and long or none if the postcode does not exist. :raises ApiError: When there was an error connecting to the API. :raises CircuitBreakerError: When the circuit breaker is open. """ postcode_lookup = f"/postcodes?lat={lat}&lon={long}" return await _get_postcode_from_url(postcode_lookup)
def login_user(user, remember=None): """Perform the login routine. If SECURITY_TRACKABLE is used, make sure you commit changes after this request (i.e. ``app.security.datastore.commit()``). :param user: The user to login :param remember: Flag specifying if the remember cookie should be set. Defaults to ``False`` """ if remember is None: remember = config_value('DEFAULT_REMEMBER_ME') if not _login_user(user, remember): # pragma: no cover return False if _security.trackable: remote_addr = request.remote_addr or None # make sure it is None old_current_login, new_current_login = ( user.current_login_at, _security.datetime_factory() ) old_current_ip, new_current_ip = user.current_login_ip, remote_addr user.last_login_at = old_current_login or new_current_login user.current_login_at = new_current_login user.last_login_ip = old_current_ip user.current_login_ip = new_current_ip user.login_count = user.login_count + 1 if user.login_count else 1 _datastore.put(user) identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) return True
Perform the login routine. If SECURITY_TRACKABLE is used, make sure you commit changes after this request (i.e. ``app.security.datastore.commit()``). :param user: The user to login :param remember: Flag specifying if the remember cookie should be set. Defaults to ``False``
Below is the the instruction that describes the task: ### Input: Perform the login routine. If SECURITY_TRACKABLE is used, make sure you commit changes after this request (i.e. ``app.security.datastore.commit()``). :param user: The user to login :param remember: Flag specifying if the remember cookie should be set. Defaults to ``False`` ### Response: def login_user(user, remember=None): """Perform the login routine. If SECURITY_TRACKABLE is used, make sure you commit changes after this request (i.e. ``app.security.datastore.commit()``). :param user: The user to login :param remember: Flag specifying if the remember cookie should be set. Defaults to ``False`` """ if remember is None: remember = config_value('DEFAULT_REMEMBER_ME') if not _login_user(user, remember): # pragma: no cover return False if _security.trackable: remote_addr = request.remote_addr or None # make sure it is None old_current_login, new_current_login = ( user.current_login_at, _security.datetime_factory() ) old_current_ip, new_current_ip = user.current_login_ip, remote_addr user.last_login_at = old_current_login or new_current_login user.current_login_at = new_current_login user.last_login_ip = old_current_ip user.current_login_ip = new_current_ip user.login_count = user.login_count + 1 if user.login_count else 1 _datastore.put(user) identity_changed.send(current_app._get_current_object(), identity=Identity(user.id)) return True
def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info): """ Called when an exception has been raised in the code run by ZeroRPC """ # Hide the zerorpc internal frames for readability, for a REQ/REP or # REQ/STREAM server the frames to hide are: # - core.ServerBase._async_task # - core.Pattern*.process_call # - core.DecoratorBase.__call__ # # For a PUSH/PULL or PUB/SUB server the frame to hide is: # - core.Puller._receiver if self._hide_zerorpc_frames: traceback = exc_info[2] while traceback: zerorpc_frame = traceback.tb_frame zerorpc_frame.f_locals['__traceback_hide__'] = True frame_info = inspect.getframeinfo(zerorpc_frame) # Is there a better way than this (or looking up the filenames # or hardcoding the number of frames to skip) to know when we # are out of zerorpc? if frame_info.function == '__call__' \ or frame_info.function == '_receiver': break traceback = traceback.tb_next self._sentry_client.captureException( exc_info, extra=task_ctx )
Called when an exception has been raised in the code run by ZeroRPC
Below is the the instruction that describes the task: ### Input: Called when an exception has been raised in the code run by ZeroRPC ### Response: def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info): """ Called when an exception has been raised in the code run by ZeroRPC """ # Hide the zerorpc internal frames for readability, for a REQ/REP or # REQ/STREAM server the frames to hide are: # - core.ServerBase._async_task # - core.Pattern*.process_call # - core.DecoratorBase.__call__ # # For a PUSH/PULL or PUB/SUB server the frame to hide is: # - core.Puller._receiver if self._hide_zerorpc_frames: traceback = exc_info[2] while traceback: zerorpc_frame = traceback.tb_frame zerorpc_frame.f_locals['__traceback_hide__'] = True frame_info = inspect.getframeinfo(zerorpc_frame) # Is there a better way than this (or looking up the filenames # or hardcoding the number of frames to skip) to know when we # are out of zerorpc? if frame_info.function == '__call__' \ or frame_info.function == '_receiver': break traceback = traceback.tb_next self._sentry_client.captureException( exc_info, extra=task_ctx )
def get_items(self): """ Return the item models associated with this Publish group. """ from .layers import Layer # no expansion support, just URLs results = [] for url in self.items: if '/layers/' in url: r = self._client.request('GET', url) results.append(self._client.get_manager(Layer).create_from_result(r.json())) else: raise NotImplementedError("No support for %s" % url) return results
Return the item models associated with this Publish group.
Below is the the instruction that describes the task: ### Input: Return the item models associated with this Publish group. ### Response: def get_items(self): """ Return the item models associated with this Publish group. """ from .layers import Layer # no expansion support, just URLs results = [] for url in self.items: if '/layers/' in url: r = self._client.request('GET', url) results.append(self._client.get_manager(Layer).create_from_result(r.json())) else: raise NotImplementedError("No support for %s" % url) return results
def _parse_boolean(value, default=False): """ Attempt to cast *value* into a bool, returning *default* if it fails. """ if value is None: return default try: return bool(value) except ValueError: return default
Attempt to cast *value* into a bool, returning *default* if it fails.
Below is the the instruction that describes the task: ### Input: Attempt to cast *value* into a bool, returning *default* if it fails. ### Response: def _parse_boolean(value, default=False): """ Attempt to cast *value* into a bool, returning *default* if it fails. """ if value is None: return default try: return bool(value) except ValueError: return default
def load_profiles(self, overwrite=False): """Load the profiles into the dropdown list. :param overwrite: If we overwrite existing profiles from the plugin. :type overwrite: bool """ for profile in self.minimum_needs.get_profiles(overwrite): self.profile_combo.addItem(profile) minimum_needs = self.minimum_needs.get_full_needs() self.profile_combo.setCurrentIndex( self.profile_combo.findText(minimum_needs['profile']))
Load the profiles into the dropdown list. :param overwrite: If we overwrite existing profiles from the plugin. :type overwrite: bool
Below is the the instruction that describes the task: ### Input: Load the profiles into the dropdown list. :param overwrite: If we overwrite existing profiles from the plugin. :type overwrite: bool ### Response: def load_profiles(self, overwrite=False): """Load the profiles into the dropdown list. :param overwrite: If we overwrite existing profiles from the plugin. :type overwrite: bool """ for profile in self.minimum_needs.get_profiles(overwrite): self.profile_combo.addItem(profile) minimum_needs = self.minimum_needs.get_full_needs() self.profile_combo.setCurrentIndex( self.profile_combo.findText(minimum_needs['profile']))
def __vCmdSetCamAperture(self, args): '''ToDo: Validate CAM number and Valid Aperture Value''' if len(args) == 1: for cam in self.camera_list: cam.boSetAperture(int(args[0])) elif len(args) == 2: cam = self.camera_list[int(args[1])] cam.boSetAperture(int(args[0])) else: print ("Usage: setCamAperture APERTURE [CAMNUMBER], APERTURE is value x10")
ToDo: Validate CAM number and Valid Aperture Value
Below is the the instruction that describes the task: ### Input: ToDo: Validate CAM number and Valid Aperture Value ### Response: def __vCmdSetCamAperture(self, args): '''ToDo: Validate CAM number and Valid Aperture Value''' if len(args) == 1: for cam in self.camera_list: cam.boSetAperture(int(args[0])) elif len(args) == 2: cam = self.camera_list[int(args[1])] cam.boSetAperture(int(args[0])) else: print ("Usage: setCamAperture APERTURE [CAMNUMBER], APERTURE is value x10")
def detachKernelDriver(self, interface): r"""Detach a kernel driver from the interface (if one is attached, we have permission and the operation is supported by the OS) Arguments: interface: interface number or an Interface object. """ if isinstance(interface, Interface): interface = interface.interfaceNumber self.dev.detach_kernel_driver(interface)
r"""Detach a kernel driver from the interface (if one is attached, we have permission and the operation is supported by the OS) Arguments: interface: interface number or an Interface object.
Below is the the instruction that describes the task: ### Input: r"""Detach a kernel driver from the interface (if one is attached, we have permission and the operation is supported by the OS) Arguments: interface: interface number or an Interface object. ### Response: def detachKernelDriver(self, interface): r"""Detach a kernel driver from the interface (if one is attached, we have permission and the operation is supported by the OS) Arguments: interface: interface number or an Interface object. """ if isinstance(interface, Interface): interface = interface.interfaceNumber self.dev.detach_kernel_driver(interface)
def get_dual_rmetric( self, invert_h = False, mode_inv = 'svd' ): """ Compute the dual Riemannian Metric This is not satisfactory, because if mdimG<mdimY the shape of H will not be the same as the shape of G. TODO(maybe): return a (copied) smaller H with only the rows and columns in G. """ if self.H is None: self.H, self.G, self.Hvv, self.Hsvals, self.Gsvals = riemann_metric(self.Y, self.L, self.mdimG, invert_h = invert_h, mode_inv = mode_inv) if invert_h: return self.H, self.G else: return self.H
Compute the dual Riemannian Metric This is not satisfactory, because if mdimG<mdimY the shape of H will not be the same as the shape of G. TODO(maybe): return a (copied) smaller H with only the rows and columns in G.
Below is the the instruction that describes the task: ### Input: Compute the dual Riemannian Metric This is not satisfactory, because if mdimG<mdimY the shape of H will not be the same as the shape of G. TODO(maybe): return a (copied) smaller H with only the rows and columns in G. ### Response: def get_dual_rmetric( self, invert_h = False, mode_inv = 'svd' ): """ Compute the dual Riemannian Metric This is not satisfactory, because if mdimG<mdimY the shape of H will not be the same as the shape of G. TODO(maybe): return a (copied) smaller H with only the rows and columns in G. """ if self.H is None: self.H, self.G, self.Hvv, self.Hsvals, self.Gsvals = riemann_metric(self.Y, self.L, self.mdimG, invert_h = invert_h, mode_inv = mode_inv) if invert_h: return self.H, self.G else: return self.H
def QA_fetch_get_goods_list(ip=None, port=None): """[summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) 42 3 商品指数 TI 60 3 主力期货合约 MA 28 3 郑州商品 QZ 29 3 大连商品 QD 30 3 上海期货(原油+贵金属) QS 47 3 中金所期货 CZ 50 3 渤海商品 BH 76 3 齐鲁商品 QL 46 11 上海黄金(伦敦金T+D) SG """ global extension_market_list extension_market_list = QA_fetch_get_extensionmarket_list( ) if extension_market_list is None else extension_market_list return extension_market_list.query('market==50 or market==76 or market==46')
[summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) 42 3 商品指数 TI 60 3 主力期货合约 MA 28 3 郑州商品 QZ 29 3 大连商品 QD 30 3 上海期货(原油+贵金属) QS 47 3 中金所期货 CZ 50 3 渤海商品 BH 76 3 齐鲁商品 QL 46 11 上海黄金(伦敦金T+D) SG
Below is the the instruction that describes the task: ### Input: [summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) 42 3 商品指数 TI 60 3 主力期货合约 MA 28 3 郑州商品 QZ 29 3 大连商品 QD 30 3 上海期货(原油+贵金属) QS 47 3 中金所期货 CZ 50 3 渤海商品 BH 76 3 齐鲁商品 QL 46 11 上海黄金(伦敦金T+D) SG ### Response: def QA_fetch_get_goods_list(ip=None, port=None): """[summary] Keyword Arguments: ip {[type]} -- [description] (default: {None}) port {[type]} -- [description] (default: {None}) 42 3 商品指数 TI 60 3 主力期货合约 MA 28 3 郑州商品 QZ 29 3 大连商品 QD 30 3 上海期货(原油+贵金属) QS 47 3 中金所期货 CZ 50 3 渤海商品 BH 76 3 齐鲁商品 QL 46 11 上海黄金(伦敦金T+D) SG """ global extension_market_list extension_market_list = QA_fetch_get_extensionmarket_list( ) if extension_market_list is None else extension_market_list return extension_market_list.query('market==50 or market==76 or market==46')
def _integrate_segmentation_masks(segmasks): """ `segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks. """ if len(segmasks) == 1: return segmasks assert len(segmasks) > 0, "Passed in empty list of segmentation masks" coarse_mask = np.copy(segmasks[0]) mask_ids = [id for id in np.unique(coarse_mask) if id != 0] for id in mask_ids: for mask in segmasks[1:]: finer_ids = [i for i in np.unique(mask) if i != 0] for finer_id in finer_ids: _update_segmentation_mask_if_overlap(coarse_mask, mask, id, finer_id) # Lastly, merge all adjacent blocks, but just kidding, since this algorithm is waaaay to slow #_merge_adjacent_segments(coarse_mask) return coarse_mask
`segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks.
Below is the the instruction that describes the task: ### Input: `segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks. ### Response: def _integrate_segmentation_masks(segmasks): """ `segmasks` should be in sorted order of [coarsest, ..., finest]. Integrates the given list of segmentation masks together to form one segmentation mask by having each segment subsume ones that exist in the finer masks. """ if len(segmasks) == 1: return segmasks assert len(segmasks) > 0, "Passed in empty list of segmentation masks" coarse_mask = np.copy(segmasks[0]) mask_ids = [id for id in np.unique(coarse_mask) if id != 0] for id in mask_ids: for mask in segmasks[1:]: finer_ids = [i for i in np.unique(mask) if i != 0] for finer_id in finer_ids: _update_segmentation_mask_if_overlap(coarse_mask, mask, id, finer_id) # Lastly, merge all adjacent blocks, but just kidding, since this algorithm is waaaay to slow #_merge_adjacent_segments(coarse_mask) return coarse_mask
def update_models_recursively(state_m, expected=True): """ If a state model is reused the model depth maybe is to low. Therefore this method checks if all library state models are created with reliable depth :param bool expected: Define newly generated library models as expected or triggers logger warnings if False """ assert isinstance(state_m, AbstractStateModel) if isinstance(state_m, LibraryStateModel): if not state_m.state_copy_initialized: if not expected: logger.warning("State {0} generates unexpected missing state copy models.".format(state_m)) state_m.recursive_generate_models(load_meta_data=False) import rafcon.gui.helpers.meta_data as gui_helper_meta_data gui_helper_meta_data.scale_library_content(state_m) if isinstance(state_m, ContainerStateModel): for child_state_m in state_m.states.values(): update_models_recursively(child_state_m, expected)
If a state model is reused the model depth maybe is to low. Therefore this method checks if all library state models are created with reliable depth :param bool expected: Define newly generated library models as expected or triggers logger warnings if False
Below is the the instruction that describes the task: ### Input: If a state model is reused the model depth maybe is to low. Therefore this method checks if all library state models are created with reliable depth :param bool expected: Define newly generated library models as expected or triggers logger warnings if False ### Response: def update_models_recursively(state_m, expected=True): """ If a state model is reused the model depth maybe is to low. Therefore this method checks if all library state models are created with reliable depth :param bool expected: Define newly generated library models as expected or triggers logger warnings if False """ assert isinstance(state_m, AbstractStateModel) if isinstance(state_m, LibraryStateModel): if not state_m.state_copy_initialized: if not expected: logger.warning("State {0} generates unexpected missing state copy models.".format(state_m)) state_m.recursive_generate_models(load_meta_data=False) import rafcon.gui.helpers.meta_data as gui_helper_meta_data gui_helper_meta_data.scale_library_content(state_m) if isinstance(state_m, ContainerStateModel): for child_state_m in state_m.states.values(): update_models_recursively(child_state_m, expected)
def get_normal_connection(config): """ Get the connection either with a username and password or without """ if config.username and config.password: log.debug("connecting with username and password") queue_manager = pymqi.connect( config.queue_manager_name, config.channel, config.host_and_port, config.username, config.password ) else: log.debug("connecting without a username and password") queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port) return queue_manager
Get the connection either with a username and password or without
Below is the the instruction that describes the task: ### Input: Get the connection either with a username and password or without ### Response: def get_normal_connection(config): """ Get the connection either with a username and password or without """ if config.username and config.password: log.debug("connecting with username and password") queue_manager = pymqi.connect( config.queue_manager_name, config.channel, config.host_and_port, config.username, config.password ) else: log.debug("connecting without a username and password") queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port) return queue_manager
def solve(self): ''' Solves a one period consumption saving problem with risky income, with persistent income explicitly tracked as a state variable. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function (defined over market resources and persistent income), a marginal value function, bounding MPCs, and human wealth as a func- tion of persistent income. Might also include a value function and marginal marginal value function, depending on options selected. ''' aLvl,pLvl = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() if self.vFuncBool: self.makeEndOfPrdvFunc(EndOfPrdvP) if self.CubicBool: interpolator = self.makeCubiccFunc else: interpolator = self.makeLinearcFunc solution = self.makeBasicSolution(EndOfPrdvP,aLvl,pLvl,interpolator) solution = self.addMPCandHumanWealth(solution) if self.vFuncBool: solution.vFunc = self.makevFunc(solution) if self.CubicBool: solution = self.addvPPfunc(solution) return solution
Solves a one period consumption saving problem with risky income, with persistent income explicitly tracked as a state variable. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function (defined over market resources and persistent income), a marginal value function, bounding MPCs, and human wealth as a func- tion of persistent income. Might also include a value function and marginal marginal value function, depending on options selected.
Below is the the instruction that describes the task: ### Input: Solves a one period consumption saving problem with risky income, with persistent income explicitly tracked as a state variable. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function (defined over market resources and persistent income), a marginal value function, bounding MPCs, and human wealth as a func- tion of persistent income. Might also include a value function and marginal marginal value function, depending on options selected. ### Response: def solve(self): ''' Solves a one period consumption saving problem with risky income, with persistent income explicitly tracked as a state variable. Parameters ---------- None Returns ------- solution : ConsumerSolution The solution to the one period problem, including a consumption function (defined over market resources and persistent income), a marginal value function, bounding MPCs, and human wealth as a func- tion of persistent income. Might also include a value function and marginal marginal value function, depending on options selected. ''' aLvl,pLvl = self.prepareToCalcEndOfPrdvP() EndOfPrdvP = self.calcEndOfPrdvP() if self.vFuncBool: self.makeEndOfPrdvFunc(EndOfPrdvP) if self.CubicBool: interpolator = self.makeCubiccFunc else: interpolator = self.makeLinearcFunc solution = self.makeBasicSolution(EndOfPrdvP,aLvl,pLvl,interpolator) solution = self.addMPCandHumanWealth(solution) if self.vFuncBool: solution.vFunc = self.makevFunc(solution) if self.CubicBool: solution = self.addvPPfunc(solution) return solution