code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def call_ping(*args, **kwargs): errors = dict() for dev_id, dev_status in call_blink().items(): if not dev_status['result']: errors[dev_id] = False return errors or True
Ping the lamps by issuing a short inversion blink to all available devices. CLI Example: .. code-block:: bash salt '*' hue.ping
def do_chunked_gzip(infh, outfh, filename): import gzip gzfh = gzip.GzipFile('rawlogs', mode='wb', fileobj=outfh) if infh.closed: infh = open(infh.name, 'r') else: infh.seek(0) readsize = 0 sys.stdout.write('Gzipping {0}: '.format(filename)) if os.stat(infh.name).st_size: infh.seek(0) progressbar = ProgressBar(sys.stdout, os.stat(infh.name).st_size, "bytes gzipped") while True: chunk = infh.read(GZIP_CHUNK_SIZE) if not chunk: break if sys.version_info[0] >= 3: gzfh.write(bytes(chunk, "utf-8")) else: gzfh.write(chunk) readsize += len(chunk) progressbar.redraw(readsize) gzfh.close()
A memory-friendly way of compressing the data.
def generate_notes(notes): new_notes = [] for note in notes: tmp_note = {} for note_item in notes[note]: tmp_note[note_item] = notes[note][note_item] new_notes.append(tmp_note) return new_notes
Generate the notes list :param dict notes: A dict of converted notes from the old topology :return: List of notes for the the topology :rtype: list
def delete(self, expected_value=None, return_values=None): return self.table.layer2.delete_item(self, expected_value, return_values)
Delete the item from DynamoDB. :type expected_value: dict :param expected_value: A dictionary of name/value pairs that you expect. This dictionary should have name/value pairs where the name is the name of the attribute and the value is either the value you are expecting or False if you expect the attribute not to exist. :type return_values: str :param return_values: Controls the return of attribute name-value pairs before then were changed. Possible values are: None or 'ALL_OLD'. If 'ALL_OLD' is specified and the item is overwritten, the content of the old item is returned.
def bulk_update_resourcedata(scenario_ids, resource_scenarios,**kwargs): user_id = kwargs.get('user_id') res = None res = {} net_ids = db.DBSession.query(Scenario.network_id).filter(Scenario.id.in_(scenario_ids)).all() if len(set(net_ids)) != 1: raise HydraError("Scenario IDS are not in the same network") for scenario_id in scenario_ids: _check_can_edit_scenario(scenario_id, kwargs['user_id']) scen_i = _get_scenario(scenario_id, user_id) res[scenario_id] = [] for rs in resource_scenarios: if rs.dataset is not None: updated_rs = _update_resourcescenario(scen_i, rs, user_id=user_id, source=kwargs.get('app_name')) res[scenario_id].append(updated_rs) else: _delete_resourcescenario(scenario_id, rs.resource_attr_id) db.DBSession.flush() return res
Update the data associated with a list of scenarios.
async def export_wallet(handle: int, export_config_json: str) -> None: logger = logging.getLogger(__name__) logger.debug("export_wallet: >>> handle: %r, export_config_json: %r", handle, export_config_json) if not hasattr(export_wallet, "cb"): logger.debug("export_wallet: Creating callback") export_wallet.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32)) c_export_config_json = c_char_p(export_config_json.encode('utf-8')) await do_call('indy_export_wallet', handle, c_export_config_json, export_wallet.cb) logger.debug("export_wallet: <<<")
Exports opened wallet to the file. :param handle: wallet handle returned by indy_open_wallet. :param export_config_json: JSON containing settings for input operation. { "path": path of the file that contains exported wallet content "key": string, Key or passphrase used for wallet export key derivation. Look to key_derivation_method param for information about supported key derivation methods. "key_derivation_method": optional<string> algorithm to use for export key derivation: ARGON2I_MOD - derive secured wallet export key (used by default) ARGON2I_INT - derive secured wallet export key (less secured but faster) RAW - raw wallet export key provided (skip derivation). RAW keys can be generated with generate_wallet_key call } :return:
def get_initial_broks_from_satellites(self): for satellites in [self.conf.brokers, self.conf.schedulers, self.conf.pollers, self.conf.reactionners, self.conf.receivers]: for satellite in satellites: if not satellite.reachable: continue logger.debug("Getting initial brok from: %s", satellite.name) brok = satellite.get_initial_status_brok() logger.debug("Satellite '%s' initial brok: %s", satellite.name, brok) self.add(brok)
Get initial broks from my internal satellite links :return: None
def record_span(self, span): if instana.singletons.agent.can_send() or "INSTANA_TEST" in os.environ: json_span = None if span.operation_name in self.registered_spans: json_span = self.build_registered_span(span) else: json_span = self.build_sdk_span(span) self.queue.put(json_span)
Convert the passed BasicSpan into an JsonSpan and add it to the span queue
def revisions_diff(self, doc_id, *revisions): url = '/'.join((self.database_url, '_revs_diff')) data = {doc_id: list(revisions)} resp = self.r_session.post( url, headers={'Content-Type': 'application/json'}, data=json.dumps(data, cls=self.client.encoder) ) resp.raise_for_status() return response_to_json_dict(resp)
Returns the differences in the current remote database for the specified document id and specified list of revision values. :param str doc_id: Document id to check for revision differences against. :param list revisions: List of document revisions values to check against. :returns: The revision differences in JSON format
def _get_verts_and_connect(self, paths): verts = np.vstack(paths) gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1 connect = np.ones(gaps[-1], dtype=bool) connect[gaps[:-1]] = False return verts, connect
retrieve vertices and connects from given paths-list
def uid_something_colon(self, node): node.op_pos = [ NodeWithPosition(node.uid, (node.first_line, node.first_col)) ] position = (node.body[0].first_line, node.body[0].first_col) last, first = self.operators[':'].find_previous(position) node.op_pos.append(NodeWithPosition(last, first)) return last
Creates op_pos for node from uid to colon
def _disjoint_qubits(op1: ops.Operation, op2: ops.Operation) -> bool: return not set(op1.qubits) & set(op2.qubits)
Returns true only if the operations have qubits in common.
def check_namespace(namespace_id): if type(namespace_id) not in [str, unicode]: return False if not is_namespace_valid(namespace_id): return False return True
Verify that a namespace ID is well-formed >>> check_namespace(123) False >>> check_namespace(None) False >>> check_namespace('') False >>> check_namespace('abcd') True >>> check_namespace('Abcd') False >>> check_namespace('a+bcd') False >>> check_namespace('.abcd') False >>> check_namespace('abcdabcdabcdabcdabcd') False >>> check_namespace('abcdabcdabcdabcdabc') True
def predict_task(self, X, t=0, break_ties="random", **kwargs): Y_tp = self.predict_task_proba(X, t=t, **kwargs) Y_tph = self._break_ties(Y_tp, break_ties) return Y_tph
Predicts int labels for an input X on task t Args: X: The input for the predict_task_proba method t: The task index to predict Returns: An n-dim tensor of int predictions for the specified task
def get_historical_minute_data(self, ticker: str): start = self._start stop = self._stop if len(stop) > 4: stop = stop[:4] if len(start) > 4: start = start[:4] for year in range(int(start), int(stop) + 1): beg_time = ('%s0101000000' % year) end_time = ('%s1231235959' % year) msg = "HIT,%s,60,%s,%s,,,,1,,,s\r\n" % (ticker, beg_time, end_time) try: data = iq.iq_query(message=msg) iq.add_data_to_df(data=data) except Exception as err: log.error('No data returned because %s', err) try: self.dfdb.write_points(self._ndf, ticker) except InfluxDBClientError as err: log.error('Write to database failed: %s' % err)
Request historical 5 minute data from DTN.
def main(argv=None): try: colorama.init() if argv is None: argv = sys.argv[1:] _main(argv) except RuntimeError as e: print(colorama.Fore.RED + 'ERROR: ' + str(e) + colorama.Style.RESET_ALL) sys.exit(1) else: sys.exit(0)
Main entry point when the user runs the `trytravis` command.
def parse_text_urls(mesg): rval = [] loc = 0 for match in URLRE.finditer(mesg): if loc < match.start(): rval.append(Chunk(mesg[loc:match.start()], None)) email = match.group("email") if email and "mailto" not in email: mailto = "mailto:{}".format(email) else: mailto = match.group(1) rval.append(Chunk(None, mailto)) loc = match.end() if loc < len(mesg): rval.append(Chunk(mesg[loc:], None)) return rval
Parse a block of text, splitting it into its url and non-url components.
def retrieve(self, *args, **kwargs): lookup, key = self._lookup(*args, **kwargs) return lookup[key]
Retrieve the permsission function for the provided things.
def create_contour_metadata(contour_path): metadata = { 'title': tr('Earthquake Contour'), 'layer_purpose': layer_purpose_earthquake_contour['key'], 'layer_geometry': layer_geometry_line['key'], 'layer_mode': layer_mode_classified['key'], 'inasafe_fields': {} } for contour_field in contour_fields: metadata['inasafe_fields'][contour_field['key']] = contour_field[ 'field_name'] write_iso19115_metadata(contour_path, metadata)
Create metadata file for contour layer. :param contour_path: Path where the contour is located. :type contour_path: basestring
def locate(self, point, _verify=True): r if _verify: if self._dimension != 2: raise NotImplementedError("Only 2D surfaces supported.") if point.shape != (self._dimension, 1): point_dimensions = " x ".join( str(dimension) for dimension in point.shape ) msg = _LOCATE_ERROR_TEMPLATE.format( self._dimension, self._dimension, point, point_dimensions ) raise ValueError(msg) return _surface_intersection.locate_point( self._nodes, self._degree, point[0, 0], point[1, 0] )
r"""Find a point on the current surface. Solves for :math:`s` and :math:`t` in :math:`B(s, t) = p`. This method acts as a (partial) inverse to :meth:`evaluate_cartesian`. .. warning:: A unique solution is only guaranteed if the current surface is valid. This code assumes a valid surface, but doesn't check. .. image:: ../../images/surface_locate.png :align: center .. doctest:: surface-locate >>> nodes = np.asfortranarray([ ... [0.0, 0.5 , 1.0, 0.25, 0.75, 0.0], ... [0.0, -0.25, 0.0, 0.5 , 0.75, 1.0], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = np.asfortranarray([ ... [0.59375], ... [0.25 ], ... ]) >>> s, t = surface.locate(point) >>> s 0.5 >>> t 0.25 .. testcleanup:: surface-locate import make_images make_images.surface_locate(surface, point) Args: point (numpy.ndarray): A (``D x 1``) point on the surface, where :math:`D` is the dimension of the surface. _verify (Optional[bool]): Indicates if extra caution should be used to verify assumptions about the inputs. Can be disabled to speed up execution time. Defaults to :data:`True`. Returns: Optional[Tuple[float, float]]: The :math:`s` and :math:`t` values corresponding to ``point`` or :data:`None` if the point is not on the surface. Raises: NotImplementedError: If the surface isn't in :math:`\mathbf{R}^2`. ValueError: If the dimension of the ``point`` doesn't match the dimension of the current surface.
def axis_to_data_points(ax, points_axis): axis_to_data = ax.transAxes + ax.transData.inverted() return axis_to_data.transform(points_axis)
Map points in axis coordinates to data coordinates. Uses matplotlib.transform. Parameters ---------- ax : matplotlib.axis Axis object from matplotlib. points_axis : np.array Points in axis coordinates.
def search(self, q, **kw): url = '{base_url}/search/{stream}'.format(**vars(self)) params = { 'q': q, } params.update(self.params) params.update(kw) response = self.session.get(url, params=params) response.raise_for_status() return response.json()
Search Gnip for given query, returning deserialized response.
def coerce(self, value): if isinstance(value, dict): value = [value] if not isiterable_notstring(value): value = [value] return [coerce_single_instance(self.lookup_field, v) for v in value]
Convert from whatever is given to a list of scalars for the lookup_field.
def replace_u_start_day(day): day = day.lstrip('-') if day == 'uu' or day == '0u': return '01' if day == 'u0': return '10' return day.replace('u', '0')
Find the earliest legitimate day.
def generate_modules_cache(self, modules, underlined=None, task_handle=taskhandle.NullTaskHandle()): job_set = task_handle.create_jobset( 'Generatig autoimport cache for modules', len(modules)) for modname in modules: job_set.started_job('Working on <%s>' % modname) if modname.endswith('.*'): mod = self.project.find_module(modname[:-2]) if mod: for sub in submodules(mod): self.update_resource(sub, underlined) else: self.update_module(modname, underlined) job_set.finished_job()
Generate global name cache for modules listed in `modules`
def start(name, quiet=False, path=None): data = _do_names(name, 'start', path=path) if data and not quiet: __jid_event__.fire_event( {'data': data, 'outputter': 'lxc_start'}, 'progress') return data
Start the named container. path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 .. code-block:: bash salt-run lxc.start name
def _set_attributes(self): config = obj(self._config_dict) for k, v in self._config_dict.items(): setattr(self, k, getattr(config, k))
Recursively transforms config dictionaries into instance attrs to make for easy dot attribute access instead of dictionary access.
def value(self, new_value): if self.unit != units.Undefined and new_value.unit != self.unit: raise AttributeError("%s must be in %s" % ( self.__class__, self.unit)) self._value = new_value
Set the value of this measurement. Raises: AttributeError: if the new value isn't of the correct units.
def setup_logging(namespace): loglevel = { 0: logging.ERROR, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG, }.get(namespace.verbosity, logging.DEBUG) if namespace.verbosity > 1: logformat = '%(levelname)s csvpandas %(lineno)s %(message)s' else: logformat = 'csvpandas %(message)s' logging.basicConfig(stream=namespace.log, format=logformat, level=loglevel)
setup global logging
def interval(coro, interval=1, times=None, loop=None): assert_corofunction(coro=coro) times = int(times or 0) or float('inf') @asyncio.coroutine def schedule(times, *args, **kw): while times > 0: times -= 1 yield from coro(*args, **kw) yield from asyncio.sleep(interval) def wrapper(*args, **kw): return ensure_future(schedule(times, *args, **kw), loop=loop) return wrapper
Schedules the execution of a coroutine function every `x` amount of seconds. The function returns an `asyncio.Task`, which implements also an `asyncio.Future` interface, allowing the user to cancel the execution cycle. This function can be used as decorator. Arguments: coro (coroutinefunction): coroutine function to defer. interval (int/float): number of seconds to repeat the coroutine execution. times (int): optional maximum time of executions. Infinite by default. loop (asyncio.BaseEventLoop, optional): loop to run. Defaults to asyncio.get_event_loop(). Raises: TypeError: if coro argument is not a coroutine function. Returns: future (asyncio.Task): coroutine wrapped as task future. Useful for cancellation and state checking. Usage:: # Usage as function future = paco.interval(coro, 1) # Cancel it after a while... await asyncio.sleep(5) future.cancel() # Usage as decorator @paco.interval(10) async def metrics(): await send_metrics() future = await metrics()
def MessageToDict(message, including_default_value_fields=False, preserving_proto_field_name=False): printer = _Printer(including_default_value_fields, preserving_proto_field_name) return printer._MessageToJsonObject(message)
Converts protobuf message to a JSON dictionary. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. preserving_proto_field_name: If True, use the original proto field names as defined in the .proto file. If False, convert the field names to lowerCamelCase. Returns: A dict representation of the JSON formatted protocol buffer message.
def get_vendor(self, mac): data = { self._SEARCH_F: mac, self._FORMAT_F: self._VERBOSE_T } response = self.__decode_str(self.__call_api(self.__url, data), 'utf-8') return response
Get vendor company name. Keyword arguments: mac -- MAC address or OUI for searching
def _sinusoid(x, p, L, y): N = int(len(p)/2) n = np.linspace(0, N, N+1) k = n*np.pi/L func = 0 for n in range(0, N): func += p[2*n]*np.sin(k[n]*x)+p[2*n+1]*np.cos(k[n]*x) return func
Return the sinusoid cont func evaluated at input x for the continuum. Parameters ---------- x: float or np.array data, input to function p: ndarray coefficients of fitting function L: float width of x data y: float or np.array output data corresponding to input x Returns ------- func: float function evaluated for the input x
def make_function_arguments(args, kwonly, varargs, varkwargs, defaults, kw_defaults, annotations): return ast.arguments( args=[ast.arg(arg=a, annotation=annotations.get(a)) for a in args], kwonlyargs=[ ast.arg(arg=a, annotation=annotations.get(a)) for a in kwonly ], defaults=defaults, kw_defaults=list(map(kw_defaults.get, kwonly)), vararg=None if varargs is None else ast.arg( arg=varargs, annotation=annotations.get(varargs), ), kwarg=None if varkwargs is None else ast.arg( arg=varkwargs, annotation=annotations.get(varkwargs) ), )
Make an ast.arguments from the args parsed out of a code object.
def tokenize(text): stem = PorterStemmer().stem tokens = re.finditer('[a-z]+', text.lower()) for offset, match in enumerate(tokens): unstemmed = match.group(0) yield { 'stemmed': stem(unstemmed), 'unstemmed': unstemmed, 'offset': offset }
Yield tokens. Args: text (str): The original text. Yields: dict: The next token.
def start_http_server(self, port, host='0.0.0.0', endpoint=None): if self.should_start_http_server(): pc_start_http_server(port, host, registry=self.registry)
Start an HTTP server for exposing the metrics, if the `should_start_http_server` function says we should, otherwise just return. Uses the implementation from `prometheus_client` rather than a Flask app. :param port: the HTTP port to expose the metrics endpoint on :param host: the HTTP host to listen on (default: `0.0.0.0`) :param endpoint: **ignored**, the HTTP server will respond on any path
def parse_csv_header(line): units = {} names = [] for var in line.split(','): start = var.find('[') if start < 0: names.append(str(var)) continue else: names.append(str(var[:start])) end = var.find(']', start) unitstr = var[start + 1:end] eq = unitstr.find('=') if eq >= 0: units[names[-1]] = unitstr[eq + 2:-1] return names, units
Parse the CSV header returned by TDS.
def ledger(self, start=None, end=None): DEBIT_IN_DB = self._DEBIT_IN_DB() flip = 1 if self._positive_credit(): flip *= -1 qs = self._entries_range(start=start, end=end) qs = qs.order_by("transaction__t_stamp", "transaction__tid") balance = Decimal("0.00") if start: balance = self.balance(start) if not qs: return [] def helper(balance_in): balance = balance_in for e in qs.all(): amount = e.amount * DEBIT_IN_DB o_balance = balance balance += flip * amount yield LedgerEntry(amount, e, o_balance, balance) return helper(balance)
Returns a list of entries for this account. Ledger returns a sequence of LedgerEntry's matching the criteria in chronological order. The returned sequence can be boolean-tested (ie. test that nothing was returned). If 'start' is given, only entries on or after that datetime are returned. 'start' must be given with a timezone. If 'end' is given, only entries before that datetime are returned. 'end' must be given with a timezone.
def find_hass_config(): if "HASSIO_TOKEN" in os.environ: return "/config" config_dir = default_hass_config_dir() if os.path.isdir(config_dir): return config_dir raise ValueError( "Unable to automatically find the location of Home Assistant " "config. Please pass it in." )
Try to find HASS config.
def get_nt_7z_dir (): try: import _winreg as winreg except ImportError: import winreg try: key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\7-Zip") try: return winreg.QueryValueEx(key, "Path")[0] finally: winreg.CloseKey(key) except WindowsError: return ""
Return 7-Zip directory from registry, or an empty string.
def get_month(datestring): convert_written = re.compile(r"jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec", re.IGNORECASE) month = convert_written.search(datestring) month_number = None if month: month_number = strptime(month.group(), "%b").tm_mon if month_number < 10: month_number = add_zero(month_number) return str(month_number)
Transforms a written month into corresponding month number. E.g. November -> 11, or May -> 05. Keyword arguments: datestring -- a string Returns: String, or None if the transformation fails
def salt_ssh_create_dirs(self): logger.debug('Creating salt-ssh dirs into: %s', self.settings_dir) utils.create_dir(os.path.join(self.settings_dir, 'salt')) utils.create_dir(os.path.join(self.settings_dir, 'pillar')) utils.create_dir(os.path.join(self.settings_dir, 'etc', 'salt')) utils.create_dir(os.path.join(self.settings_dir, 'var', 'cache', 'salt')) utils.create_dir(os.path.join(self.settings_dir, 'var', 'log', 'salt'))
Creates the `salt-ssh` required directory structure
def color(string, status=True, warning=False, bold=True): attr = [] if status: attr.append('32') if warning: attr.append('31') if bold: attr.append('1') return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
Change text color for the linux terminal, defaults to green. Set "warning=True" for red.
def get_url_distribution(self, params=None): params = params or {} all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report.
def membuf_tempfile(memfile): memfile.seek(0, 0) tmpfd, tmpname = mkstemp(suffix='.rar') tmpf = os.fdopen(tmpfd, "wb") try: while True: buf = memfile.read(BSIZE) if not buf: break tmpf.write(buf) tmpf.close() except: tmpf.close() os.unlink(tmpname) raise return tmpname
Write in-memory file object to real file.
def to_image(self, shape): if len(shape) != 2: raise ValueError('input shape must have 2 elements.') image = np.zeros(shape) if self.bbox.ixmin < 0 or self.bbox.iymin < 0: return self._to_image_partial_overlap(image) try: image[self.bbox.slices] = self.data except ValueError: image = self._to_image_partial_overlap(image) return image
Return an image of the mask in a 2D array of the given shape, taking any edge effects into account. Parameters ---------- shape : tuple of int The ``(ny, nx)`` shape of the output array. Returns ------- result : `~numpy.ndarray` A 2D array of the mask.
def http_reply(self): data = { 'status': self.status, 'error': self.code.upper(), 'error_description': str(self) } if self.error_caught: data['error_caught'] = pformat(self.error_caught) if self.error_id: data['error_id'] = self.error_id if self.user_message: data['user_message'] = self.user_message r = jsonify(data) r.status_code = self.status if str(self.status) != "200": log.warn("ERROR: caught error %s %s [%s]" % (self.status, self.code, str(self))) return r
Return a Flask reply object describing this error
def date_to_datetime(date, fraction=0.0): day_seconds = (60 * 60 * 24) - 1 total_seconds = int(day_seconds * fraction) delta = datetime.timedelta(seconds=total_seconds) time = datetime.time() dt = datetime.datetime.combine(date, time) + delta return dt
fraction is how much through the day you are. 0=start of the day, 1=end of the day.
def filebrowser(request): try: fb_url = reverse('fb_browse') except: fb_url = reverse('filebrowser:fb_browse') return HttpResponse(jsmin(render_to_string('tinymce/filebrowser.js', context={'fb_url': fb_url}, request=request)), content_type='application/javascript; charset=utf-8')
JavaScript callback function for `django-filebrowser`_ :param request: Django http request :type request: django.http.request.HttpRequest :return: Django http response with filebrowser JavaScript code for for TinyMCE 4 :rtype: django.http.HttpResponse .. _django-filebrowser: https://github.com/sehmaschine/django-filebrowser
def _taskify(func): if not isinstance(func, _Task): func = _Task(func) spec = inspect.getargspec(func.func) if spec.args: num_args = len(spec.args) num_kwargs = len(spec.defaults or []) isflag = lambda x, y: '' if x.defaults[y] is False else '=' func.args = spec.args[:(num_args - num_kwargs)] func.defaults = {spec.args[i - num_kwargs]: spec.defaults[i] for i in range(num_kwargs)} func.kwargs = [key.replace('_','-') + isflag(func, key) for key in func.defaults] if not func.name.startswith('_'): TASKS.append(func) return func
Convert a function into a task.
def step_prev(self): window_start = around(self.parent.value('window_start') - self.parent.value('window_length') / self.parent.value('window_step'), 2) if window_start < 0: return self.parent.overview.update_position(window_start)
Go to the previous step.
def image_summary(predictions, targets, hparams): del hparams results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) gold = tf.cast(targets, tf.uint8) summary1 = tf.summary.image("prediction", results, max_outputs=2) summary2 = tf.summary.image("data", gold, max_outputs=2) summary = tf.summary.merge([summary1, summary2]) return summary, tf.zeros_like(predictions)
Reshapes predictions and passes it to tensorboard. Args: predictions : The predicted image (logits). targets : The ground truth. hparams: model hparams. Returns: summary_proto: containing the summary images. weights: A Tensor of zeros of the same shape as predictions.
def _pp(dict_data): for key, val in dict_data.items(): print('{0:<11}: {1}'.format(key, val))
Pretty print.
def cs_axis_mapping(cls, part_info, axes_to_move ): cs_ports = set() axis_mapping = {} for motor_info in cls.filter_values(part_info): if motor_info.scannable in axes_to_move: assert motor_info.cs_axis in cs_axis_names, \ "Can only scan 1-1 mappings, %r is %r" % \ (motor_info.scannable, motor_info.cs_axis) cs_ports.add(motor_info.cs_port) axis_mapping[motor_info.scannable] = motor_info missing = list(set(axes_to_move) - set(axis_mapping)) assert not missing, \ "Some scannables %s are not in the CS mapping %s" % ( missing, axis_mapping) assert len(cs_ports) == 1, \ "Requested axes %s are in multiple CS numbers %s" % ( axes_to_move, list(cs_ports)) cs_axis_counts = Counter([x.cs_axis for x in axis_mapping.values()]) overlap = [k for k, v in cs_axis_counts.items() if v > 1] assert not overlap, \ "CS axis defs %s have more that one raw motor attached" % overlap return cs_ports.pop(), axis_mapping
Given the motor infos for the parts, filter those with scannable names in axes_to_move, check they are all in the same CS, and return the cs_port and mapping of cs_axis to MotorInfo
def _parseIsTag(self): el = self._element self._istag = el and el[0] == "<" and el[-1] == ">"
Detect whether the element is HTML tag or not. Result is saved to the :attr:`_istag` property.
def _contiguous_slices(self): k = j = None for i in self._sorted(): if k is None: k = j = i if i - j > 1: yield slice(k, j + 1, 1) k = i j = i if k is not None: yield slice(k, j + 1, 1)
Internal iterator over contiguous slices in RangeSet.
def get_mount_points(): def decode_path(path): return path.replace(br"\011", b"\011").replace(br"\040", b"\040").replace(br"\012", b"\012").replace(br"\134", b"\134") with open("/proc/self/mounts", "rb") as mounts: for mount in mounts: source, target, fstype, options, unused1, unused2 = mount.split(b" ") options = set(options.split(b",")) yield (decode_path(source), decode_path(target), fstype, options)
Get all current mount points of the system. Changes to the mount points during iteration may be reflected in the result. @return a generator of (source, target, fstype, options), where options is a list of bytes instances, and the others are bytes instances (this avoids encoding problems with mount points with problematic characters).
def minimum_valid_values_in_any_group(df, levels=None, n=1, invalid=np.nan): df = df.copy() if levels is None: if 'Group' in df.columns.names: levels = [df.columns.names.index('Group')] if invalid is np.nan: dfx = ~np.isnan(df) else: dfx = df != invalid dfc = dfx.astype(int).sum(axis=1, level=levels) dfm = dfc.max(axis=1) >= n mask = dfm.values return df.iloc[mask, :]
Filter ``DataFrame`` by at least n valid values in at least one group. Taking a Pandas ``DataFrame`` with a ``MultiIndex`` column index, filters rows to remove rows where there are less than `n` valid values per group. Groups are defined by the `levels` parameter indexing into the column index. For example, a ``MultiIndex`` with top and second level Group (A,B,C) and Replicate (1,2,3) using ``levels=[0,1]`` would filter on `n` valid values per replicate. Alternatively, ``levels=[0]`` would filter on `n` valid values at the Group level only, e.g. A, B or C. By default valid values are determined by `np.nan`. However, alternatives can be supplied via `invalid`. :param df: Pandas ``DataFrame`` :param levels: ``list`` of ``int`` specifying levels of column ``MultiIndex`` to group by :param n: ``int`` minimum number of valid values threshold :param invalid: matching invalid value :return: filtered Pandas ``DataFrame``
def check(self): return programs.is_module_installed(self.modname, self.required_version, self.installed_version)
Check if dependency is installed
def _call_brew(cmd, failhard=True): user = __salt__['file.get_user'](_homebrew_bin()) runas = user if user != __opts__['user'] else None cmd = '{} {}'.format(salt.utils.path.which('brew'), cmd) result = __salt__['cmd.run_all'](cmd, runas=runas, output_loglevel='trace', python_shell=False) if failhard and result['retcode'] != 0: raise CommandExecutionError('Brew command failed', info={'result': result}) return result
Calls the brew command with the user account of brew
def describe(value): if isinstance(value, types.ModuleType): return describe_file(value) elif isinstance(value, messages.Field): return describe_field(value) elif isinstance(value, messages.Enum): return describe_enum_value(value) elif isinstance(value, type): if issubclass(value, messages.Message): return describe_message(value) elif issubclass(value, messages.Enum): return describe_enum(value) return None
Describe any value as a descriptor. Helper function for describing any object with an appropriate descriptor object. Args: value: Value to describe as a descriptor. Returns: Descriptor message class if object is describable as a descriptor, else None.
def run_total_dos(self, sigma=None, freq_min=None, freq_max=None, freq_pitch=None, use_tetrahedron_method=True): if self._mesh is None: msg = "run_mesh has to be done before DOS calculation." raise RuntimeError(msg) total_dos = TotalDos(self._mesh, sigma=sigma, use_tetrahedron_method=use_tetrahedron_method) total_dos.set_draw_area(freq_min, freq_max, freq_pitch) total_dos.run() self._total_dos = total_dos
Calculate total DOS from phonons on sampling mesh. Parameters ---------- sigma : float, optional Smearing width for smearing method. Default is None freq_min, freq_max, freq_pitch : float, optional Minimum and maximum frequencies in which range DOS is computed with the specified interval (freq_pitch). Defaults are None and they are automatically determined. use_tetrahedron_method : float, optional Use tetrahedron method when this is True. When sigma is set, smearing method is used.
def debug(self, value): self.__debug = value if self.__debug: for _, logger in iteritems(self.logger): logger.setLevel(logging.DEBUG) httplib.HTTPConnection.debuglevel = 1 else: for _, logger in iteritems(self.logger): logger.setLevel(logging.WARNING) httplib.HTTPConnection.debuglevel = 0
Sets the debug status. :param value: The debug status, True or False. :type: bool
def plot_posterior_marginal(self, idx_param=0, res=100, smoothing=0, range_min=None, range_max=None, label_xaxis=True, other_plot_args={}, true_model=None ): res = plt.plot(*self.posterior_marginal( idx_param, res, smoothing, range_min, range_max ), **other_plot_args) if label_xaxis: plt.xlabel('${}$'.format(self.model.modelparam_names[idx_param])) if true_model is not None: true_model = true_model[0, idx_param] if true_model.ndim == 2 else true_model[idx_param] old_ylim = plt.ylim() plt.vlines(true_model, old_ylim[0] - 0.1, old_ylim[1] + 0.1, color='k', linestyles='--') plt.ylim(old_ylim) return res
Plots a marginal of the requested parameter. :param int idx_param: Index of parameter to be marginalized. :param int res1: Resolution of of the axis. :param float smoothing: Standard deviation of the Gaussian kernel used to smooth; same units as parameter. :param float range_min: Minimum range of the output axis. :param float range_max: Maximum range of the output axis. :param bool label_xaxis: Labels the :math:`x`-axis with the model parameter name given by this updater's model. :param dict other_plot_args: Keyword arguments to be passed to matplotlib's ``plot`` function. :param np.ndarray true_model: Plots a given model parameter vector as the "true" model for comparison. .. seealso:: :meth:`SMCUpdater.posterior_marginal`
def is_equal_strings_ignore_case(first, second): if first and second: return first.upper() == second.upper() else: return not (first or second)
The function compares strings ignoring case
def _init_kws(self): if 'fmtgo' not in self.kws: self.kws['fmtgo'] = self.grprdflt.gosubdag.prt_attr['fmt'] + "\n" if 'fmtgo2' not in self.kws: self.kws['fmtgo2'] = self.grprdflt.gosubdag.prt_attr['fmt'] + "\n" if 'fmtgene' not in self.kws: if 'itemid2name' not in self.kws: self.kws['fmtgene'] = "{AART} {ID}\n" else: self.kws['fmtgene'] = "{AART} {ID} {NAME}\n" if 'fmtgene2' not in self.kws: self.kws['fmtgene2'] = self.kws['fmtgene']
Fill default values for keyword args, if necessary.
def get_current_user(self): from google.appengine.api import users if _IS_DEVELOPMENT_SERVER: return users.get_current_user() else: from google.appengine.api import oauth try: user = oauth.get_current_user() except oauth.OAuthRequestError: user = users.get_current_user() return user
Override get_current_user for Google AppEngine Checks for oauth capable request first, if this fails fall back to standard users API
def _Insert(cursor, table, values): precondition.AssertIterableType(values, dict) if not values: return column_names = list(sorted(values[0])) for value_dict in values: if set(column_names) != set(value_dict): raise ValueError("Given value dictionaries must have identical keys. " "Expecting columns {!r}, but got value {!r}".format( column_names, value_dict)) query = "INSERT IGNORE INTO %s {cols} VALUES {vals}" % table query = query.format( cols=mysql_utils.Columns(column_names), vals=mysql_utils.Placeholders(num=len(column_names), values=len(values))) values_list = [] for values_dict in values: values_list.extend(values_dict[column] for column in column_names) cursor.execute(query, values_list)
Inserts one or multiple rows into the given table. Args: cursor: The MySQL cursor to perform the insertion. table: The table name, where rows should be inserted. values: A list of dicts, associating column names to values.
def option_hook(self, function): sig = Signature(function) if "options" not in sig.arguments: raise KeyError("option_hook functions must have an argument called" " 'options', but got {}".format(sig.arguments)) self.option_hooks.append(function) return function
Decorator for adding an option hook function. An option hook is a function that is called right before a run is created. It receives (and potentially modifies) the options dictionary. That is, the dictionary of commandline options used for this run. .. note:: The decorated function MUST have an argument called options. The options also contain ``'COMMAND'`` and ``'UPDATE'`` entries, but changing them has no effect. Only modification on flags (entries starting with ``'--'``) are considered.
def options(self, new): options = self._create_options(new) if self.widget.value: self.widget.set_param(options=options, value=list(options.values())[:1]) else: self.widget.options = options self.widget.value = list(options.values())[:1]
Set options from list, or instance of named item Over-writes old options
def validateAQLQuery(self, query, bindVars = None, options = None) : "returns the server answer is the query is valid. Raises an AQLQueryError if not" if bindVars is None : bindVars = {} if options is None : options = {} payload = {'query' : query, 'bindVars' : bindVars, 'options' : options} r = self.connection.session.post(self.cursorsURL, data = json.dumps(payload, default=str)) data = r.json() if r.status_code == 201 and not data["error"] : return data else : raise AQLQueryError(data["errorMessage"], query, data)
returns the server answer is the query is valid. Raises an AQLQueryError if not
def extensions(self): _tmp_extensions = self.mimes.encodings_map.keys() + \ self.mimes.suffix_map.keys() + \ self.mimes.types_map[1].keys() + \ cfg['CFG_BIBDOCFILE_ADDITIONAL_KNOWN_FILE_EXTENSIONS'] extensions = [] for ext in _tmp_extensions: if ext.startswith('.'): extensions.append(ext) else: extensions.append('.' + ext) extensions.sort() extensions.reverse() extensions = set([ext.lower() for ext in extensions]) extensions = '\\' + '$|\\'.join(extensions) + '$' extensions = extensions.replace('+', '\\+') return re.compile(extensions, re.I)
Generate the regular expression to match all the known extensions. @return: the regular expression. @rtype: regular expression object
def load(self, instance, xblock): if djpyfs: return djpyfs.get_filesystem(scope_key(instance, xblock)) else: raise NotImplementedError("djpyfs not available")
Get the filesystem for the field specified in 'instance' and the xblock in 'xblock' It is locally scoped.
def recipe_create(backend, kitchen, name): err_str, use_kitchen = Backend.get_kitchen_from_user(kitchen) if use_kitchen is None: raise click.ClickException(err_str) click.secho("%s - Creating Recipe %s for Kitchen '%s'" % (get_datetime(), name, use_kitchen), fg='green') check_and_print(DKCloudCommandRunner.recipe_create(backend.dki, use_kitchen,name))
Create a new Recipe
def writeBinaryItemContainer(filelike, binaryItemContainer, compress=True): allMetadata = dict() binarydatafile = io.BytesIO() for index, binaryItem in enumerate(viewvalues(binaryItemContainer)): metadataList = _dumpArrayDictToFile(binarydatafile, binaryItem.arrays) allMetadata[index] = [binaryItem._reprJSON(), metadataList] binarydatafile.seek(0) zipcomp = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED with zipfile.ZipFile(filelike, 'w', allowZip64=True) as containerFile: containerFile.writestr('metadata', json.dumps(allMetadata, cls=MaspyJsonEncoder), zipcomp ) containerFile.writestr('binarydata', binarydatafile.getvalue(), zipcomp)
Serializes the binaryItems contained in binaryItemContainer and writes them into a zipfile archive. Examples of binaryItem classes are :class:`maspy.core.Ci` and :class:`maspy.core.Sai`. A binaryItem class has to define the function ``_reprJSON()`` which returns a JSON formated string representation of the class instance. In addition it has to contain an attribute ``.arrays``, a dictionary which values are ``numpy.array``, that are serialized to bytes and written to the ``binarydata`` file of the zip archive. See :func:`_dumpArrayDictToFile()` The JSON formated string representation of the binaryItems, together with the metadata, necessary to restore serialized numpy arrays, is written to the ``metadata`` file of the archive in this form: ``[[serialized binaryItem, [metadata of a numpy array, ...]], ...]`` Use the method :func:`loadBinaryItemContainer()` to restore a binaryItemContainer from a zipfile. :param filelike: path to a file (str) or a file-like object :param binaryItemContainer: a dictionary containing binaryItems :param compress: bool, True to use zip file compression
def describe_topic_rule(ruleName, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) rule = conn.get_topic_rule(ruleName=ruleName) if rule and 'rule' in rule: rule = rule['rule'] keys = ('ruleName', 'sql', 'description', 'actions', 'ruleDisabled') return {'rule': dict([(k, rule.get(k)) for k in keys])} else: return {'rule': None} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Given a topic rule name describe its properties. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_iot.describe_topic_rule myrule
def create_pars_from_dict(name, pars_dict, rescale=True, update_bounds=False): o = get_function_defaults(name) pars_dict = pars_dict.copy() for k in o.keys(): if not k in pars_dict: continue v = pars_dict[k] if not isinstance(v, dict): v = {'name': k, 'value': v} o[k].update(v) kw = dict(update_bounds=update_bounds, rescale=rescale) if 'min' in v or 'max' in v: kw['update_bounds'] = False if 'scale' in v: kw['rescale'] = False o[k] = make_parameter_dict(o[k], **kw) return o
Create a dictionary for the parameters of a function. Parameters ---------- name : str Name of the function. pars_dict : dict Existing parameter dict that will be merged with the default dictionary created by this method. rescale : bool Rescale parameter values.
def get_changes_since(self, change_number, app_changes=True, package_changes=False): return self.send_job_and_wait(MsgProto(EMsg.ClientPICSChangesSinceRequest), { 'since_change_number': change_number, 'send_app_info_changes': app_changes, 'send_package_info_changes': package_changes, }, timeout=15 )
Get changes since a change number :param change_number: change number to use as stating point :type change_number: :class:`int` :param app_changes: whether to inclued app changes :type app_changes: :class:`bool` :param package_changes: whether to inclued package changes :type package_changes: :class:`bool` :return: `CMsgClientPICSChangesSinceResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver.proto#L1171-L1191>`_ :rtype: proto message instance, or :class:`None` on timeout
def _dy(self): min_y = max_y = self._start_y for drawing_operation in self: if hasattr(drawing_operation, 'y'): min_y = min(min_y, drawing_operation.y) max_y = max(max_y, drawing_operation.y) return max_y - min_y
Return integer height of this shape's path in local units.
def merge_ownership_periods(mappings): return valmap( lambda v: tuple( OwnershipPeriod( a.start, b.start, a.sid, a.value, ) for a, b in sliding_window( 2, concatv( sorted(v), [OwnershipPeriod( pd.Timestamp.max.tz_localize('utc'), None, None, None, )], ), ) ), mappings, )
Given a dict of mappings where the values are lists of OwnershipPeriod objects, returns a dict with the same structure with new OwnershipPeriod objects adjusted so that the periods have no gaps. Orders the periods chronologically, and pushes forward the end date of each period to match the start date of the following period. The end date of the last period pushed forward to the max Timestamp.
def _combine_attr_fast_update(self, attr, typ): values = dict(getattr(self, attr, {})) for base in self._class_data.bases: vals = dict(getattr(base, attr, {})) preserve_attr_data(vals, values) values = combine(vals, values) setattr(self, attr, typ(values))
Avoids having to call _update for each intermediate base. Only works for class attr of type UpdateDict.
def draw_image(self, ax, image): self.renderer.draw_image(imdata=utils.image_to_base64(image), extent=image.get_extent(), coordinates="data", style={"alpha": image.get_alpha(), "zorder": image.get_zorder()}, mplobj=image)
Process a matplotlib image object and call renderer.draw_image
def add_styles(self, **styles): for stylename in sorted(styles): self._doc.styles.addElement(styles[stylename])
Add ODF styles to the current document.
def mogrify(self, sql, params): conn = self.engine.raw_connection() cursor = conn.cursor() return cursor.mogrify(sql, params)
Return the query string with parameters added
def _score_macro_average(self, n_classes): all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)])) avg_tpr = np.zeros_like(all_fpr) for i in range(n_classes): avg_tpr += interp(all_fpr, self.fpr[i], self.tpr[i]) avg_tpr /= n_classes self.fpr[MACRO] = all_fpr self.tpr[MACRO] = avg_tpr self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO])
Compute the macro average scores for the ROCAUC curves.
def readBuffer(self, newLength): result = Buffer(self.buf, self.offset, newLength) self.skip(newLength) return result
Read next chunk as another buffer.
def get_chart(chart_type, time_span=None, rolling_average=None, api_code=None): resource = 'charts/' + chart_type + '?format=json' if time_span is not None: resource += '&timespan=' + time_span if rolling_average is not None: resource += '&rollingAverage=' + rolling_average if api_code is not None: resource += '&api_code=' + api_code response = util.call_api(resource) json_response = json.loads(response) return Chart(json_response)
Get chart data of a specific chart type. :param str chart_type: type of chart :param str time_span: duration of the chart. Default is 1 year for most charts, 1 week for mempool charts (optional) (Example: 5weeks) :param str rolling_average: duration over which the data should be averaged (optional) :param str api_code: Blockchain.info API code (optional) :return: an instance of :class:`Chart` class
def listflat(path, ext=None): if os.path.isdir(path): if ext: if ext == 'tif' or ext == 'tiff': files = glob.glob(os.path.join(path, '*.tif')) files = files + glob.glob(os.path.join(path, '*.tiff')) else: files = glob.glob(os.path.join(path, '*.' + ext)) else: files = [os.path.join(path, fname) for fname in os.listdir(path)] else: files = glob.glob(path) files = [fpath for fpath in files if not isinstance(fpath, list) and not os.path.isdir(fpath)] return sorted(files)
List files without recursion
def recover(self, requeue=False): if not isinstance(requeue, bool): raise AMQPInvalidArgument('requeue should be a boolean') recover_frame = specification.Basic.Recover(requeue=requeue) return self._channel.rpc_request(recover_frame)
Redeliver unacknowledged messages. :param bool requeue: Re-queue the messages :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
def _needs_region_update(out_file, samples): nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x] for nblock_file in nblock_files: test_old = nblock_file.replace("-nblocks", "-analysisblocks") if os.path.exists(test_old): return False for noblock_file in nblock_files: if not utils.file_uptodate(out_file, noblock_file): return True return False
Check if we need to update BED file of regions, supporting back compatibility.
def status(self, **kwargs): path = '/geo_nodes/%s/status' % self.get_id() return self.manager.gitlab.http_get(path, **kwargs)
Get the status of the geo node. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: dict: The status of the geo node
def get_build_platform(): from sysconfig import get_platform plat = get_platform() if sys.platform == "darwin" and not plat.startswith('macosx-'): try: version = _macosx_vers() machine = os.uname()[4].replace(" ", "_") return "macosx-%d.%d-%s" % ( int(version[0]), int(version[1]), _macosx_arch(machine), ) except ValueError: pass return plat
Return this platform's string for platform-specific distributions XXX Currently this is the same as ``distutils.util.get_platform()``, but it needs some hacks for Linux and Mac OS X.
def min(self, expr, extra_constraints=(), solver=None, model_callback=None): if self._solver_required and solver is None: raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) return self._min(self.convert(expr), extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback)
Return the minimum value of `expr`. :param expr: expression (an AST) to evaluate :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: the minimum possible value of expr (backend object)
def load_mldataset(filename): user = [] item = [] score = [] with open(filename) as f: for line in f: tks = line.strip().split('\t') if len(tks) != 4: continue user.append(int(tks[0])) item.append(int(tks[1])) score.append(float(tks[2])) user = mx.nd.array(user) item = mx.nd.array(item) score = mx.nd.array(score) return gluon.data.ArrayDataset(user, item, score)
Not particularly fast code to parse the text file and load it into three NDArray's and product an NDArrayIter
def showAddColumnDialog(self, triggered): if triggered: dialog = AddAttributesDialog(self) dialog.accepted.connect(self.addColumn) dialog.rejected.connect(self.uncheckButton) dialog.show()
Display the dialog to add a column to the model. This method is also a slot. Args: triggered (bool): If the corresponding button was activated, the dialog will be created and shown.
def _extend_settings(settings, configurator_config, prefix=None): for key in configurator_config: settings_key = '.'.join([prefix, key]) if prefix else key if hasattr(configurator_config[key], 'keys') and\ hasattr(configurator_config[key], '__getitem__'): _extend_settings( settings, configurator_config[key], prefix=settings_key ) else: settings[settings_key] = configurator_config[key]
Extend settings dictionary with content of yaml's configurator key. .. note:: This methods changes multilayered subkeys defined within **configurator** into dotted keys in settings dictionary: .. code-block:: yaml configurator: sqlalchemy: url: mysql://user:password@host/dbname will result in **sqlalchemy.url**: mysql://user:password@host/dbname key value in settings dictionary. :param dict settings: settings dictionary :param dict configurator_config: yml defined settings :param str prefix: prefix for settings dict key
def cfg_lldp_interface(self, protocol_interface, phy_interface=None): if phy_interface is None: phy_interface = protocol_interface self.create_attr_obj(protocol_interface, phy_interface) ret = self.pub_lldp.enable_lldp(protocol_interface) attr_obj = self.get_attr_obj(protocol_interface) attr_obj.update_lldp_status(ret)
Cfg LLDP on interface and create object.
def load_data(self, table_name, obj, database=None, **kwargs): _database = self.db_name self.set_database(database) self.con.load_table(table_name, obj, **kwargs) self.set_database(_database)
Wraps the LOAD DATA DDL statement. Loads data into an MapD table by physically moving data files. Parameters ---------- table_name : string obj: pandas.DataFrame or pyarrow.Table database : string, default None (optional)
def get_induced_subhypergraph(self, nodes): sub_H = self.copy() sub_H.remove_nodes(sub_H.get_node_set() - set(nodes)) return sub_H
Gives a new hypergraph that is the subhypergraph of the current hypergraph induced by the provided set of nodes. That is, the induced subhypergraph's node set corresponds precisely to the nodes provided, and the coressponding hyperedges in the subhypergraph are only those from the original graph consist of tail and head sets that are subsets of the provided nodes. :param nodes: the set of nodes to find the induced subhypergraph of. :returns: DirectedHypergraph -- the subhypergraph induced on the provided nodes.
def _get_args_to_parse(args, sys_argv): arguments = args if args is not None else sys_argv[1:] _LOG.debug("Parsing arguments: %s", arguments) return arguments
Return the given arguments if it is not None else sys.argv if it contains something, an empty list otherwise. Args: args: argument to be parsed sys_argv: arguments of the command line i.e. sys.argv