code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def write_wrapped(self, s, extra_room=0): """Add a soft line break if needed, then write s.""" if self.room < len(s) + extra_room: self.write_soft_break() self.write_str(s)
Add a soft line break if needed, then write s.
def _login(self, csrf_token): """Attempt to login session on easyname.""" login_response = self.session.post( self.URLS['login'], data={ 'username': self._get_provider_option('auth_username') or '', 'password': self._get_provider_option('auth_password') or '', 'submit': '', 'loginxtoken': csrf_token, } ) self._log('Login', login_response) assert login_response.status_code == 200, \ 'Could not login due to a network error.' assert login_response.url == self.URLS['overview'], \ 'Easyname login failed, bad EASYNAME_USER or EASYNAME_PASS.'
Attempt to login session on easyname.
def _prep_binary_content(self): ''' Sets delivery method of either payload or header Favors Content-Location header if set Args: None Returns: None: sets attributes in self.binary and headers ''' # nothing present if not self.data and not self.location and 'Content-Location' not in self.resource.headers.keys(): raise Exception('creating/updating NonRDFSource requires content from self.binary.data, self.binary.location, or the Content-Location header') elif 'Content-Location' in self.resource.headers.keys(): logger.debug('Content-Location header found, using') self.delivery = 'header' # if Content-Location is not set, look for self.data_location then self.data elif 'Content-Location' not in self.resource.headers.keys(): # data_location set, trumps Content self.data if self.location: # set appropriate header self.resource.headers['Content-Location'] = self.location self.delivery = 'header' # data attribute is plain text, binary, or file-like object elif self.data: # if file-like object, set flag for api.http_request if isinstance(self.data, io.BufferedIOBase): logger.debug('detected file-like object') self.delivery = 'payload' # else, just bytes else: logger.debug('detected bytes') self.delivery = 'payload'
Sets delivery method of either payload or header Favors Content-Location header if set Args: None Returns: None: sets attributes in self.binary and headers
def _cleanup(self): ''' Cleanup all the local data. ''' self._declare_cb = None self._bind_cb = None self._unbind_cb = None self._delete_cb = None self._purge_cb = None super(QueueClass, self)._cleanup()
Cleanup all the local data.
def choose_branch(exclude=None): # type: (List[str]) -> str """ Show the user a menu to pick a branch from the existing ones. Args: exclude (list[str]): List of branch names to exclude from the menu. By default it will exclude master and develop branches. To show all branches pass an empty array here. Returns: str: The name of the branch chosen by the user. If the user inputs an invalid choice, he will be asked again (and again) until he picks a a valid branch. """ if exclude is None: master = conf.get('git.master_branch', 'master') develop = conf.get('git.devel_branch', 'develop') exclude = {master, develop} branches = list(set(git.branches()) - exclude) # Print the menu for i, branch_name in enumerate(branches): shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name)) # Get a valid choice from the user choice = 0 while choice < 1 or choice > len(branches): prompt = "Pick a base branch from the above [1-{}]".format( len(branches) ) choice = click.prompt(prompt, value_proc=int) if not (1 <= choice <= len(branches)): fmt = "Invalid choice {}, you must pick a number between {} and {}" log.err(fmt.format(choice, 1, len(branches))) return branches[choice - 1]
Show the user a menu to pick a branch from the existing ones. Args: exclude (list[str]): List of branch names to exclude from the menu. By default it will exclude master and develop branches. To show all branches pass an empty array here. Returns: str: The name of the branch chosen by the user. If the user inputs an invalid choice, he will be asked again (and again) until he picks a a valid branch.
def _qteUpdateLabelWidths(self): """ Ensure all but the last ``QLabel`` are only as wide as necessary. The width of the last label is manually set to a large value to ensure that it stretches as much as possible. The height of all widgets is also set appropriately. The method also takes care or rearranging the widgets in the correct order, ie. in the order specified by ``self._qteModeList``. |Args| * **None** |Returns| * **None** |Raises| * **None** """ layout = self.layout() # Remove all labels from the list and add them again in the # new order. for ii in range(layout.count()): label = layout.itemAt(ii) layout.removeItem(label) # Add all labels and ensure they have appropriate width. for item in self._qteModeList: label = item[2] width = label.fontMetrics().size(0, str(item[1])).width() label.setMaximumWidth(width) label.setMinimumWidth(width) layout.addWidget(label) # Remove the width constraint from the last label so that # it can expand to the right. _, _, label = self._qteModeList[-1] label.setMaximumWidth(1600000)
Ensure all but the last ``QLabel`` are only as wide as necessary. The width of the last label is manually set to a large value to ensure that it stretches as much as possible. The height of all widgets is also set appropriately. The method also takes care or rearranging the widgets in the correct order, ie. in the order specified by ``self._qteModeList``. |Args| * **None** |Returns| * **None** |Raises| * **None**
def publish(self, name, data, userList): """ Publish data """ # Publish data to all room users self.broadcast(userList, { "name": name, "data": SockJSDefaultHandler._parser.encode(data) })
Publish data
def provider(self, name, history=None): """ Find the provider of the property by I{name}. @param name: The property name. @type name: str @param history: A history of nodes checked to prevent circular hunting. @type history: [L{Properties},..] @return: The provider when found. Otherwise, None (when nested) and I{self} when not nested. @rtype: L{Properties} """ if history is None: history = [] history.append(self) if name in self.definitions: return self for x in self.links: if x in history: continue provider = x.provider(name, history) if provider is not None: return provider history.remove(self) if len(history): return None return self
Find the provider of the property by I{name}. @param name: The property name. @type name: str @param history: A history of nodes checked to prevent circular hunting. @type history: [L{Properties},..] @return: The provider when found. Otherwise, None (when nested) and I{self} when not nested. @rtype: L{Properties}
def AsPrimitiveProto(self): """Return an old style protocol buffer object.""" if self.protobuf: result = self.protobuf() result.ParseFromString(self.SerializeToString()) return result
Return an old style protocol buffer object.
def targetSigma2(self,R,log=False): """ NAME: targetSigma2 PURPOSE: evaluate the target Sigma_R^2(R) INPUT: R - radius at which to evaluate (can be Quantity) OUTPUT: target Sigma_R^2(R) log - if True, return the log (default: False) HISTORY: 2010-03-28 - Written - Bovy (NYU) """ return self._surfaceSigmaProfile.sigma2(R,log=log)
NAME: targetSigma2 PURPOSE: evaluate the target Sigma_R^2(R) INPUT: R - radius at which to evaluate (can be Quantity) OUTPUT: target Sigma_R^2(R) log - if True, return the log (default: False) HISTORY: 2010-03-28 - Written - Bovy (NYU)
def _validate_arch(self, arch = None): """ @type arch: str @param arch: Name of the processor architecture. If not provided the current processor architecture is assumed. For more details see L{win32.version._get_arch}. @rtype: str @return: Name of the processor architecture. If not provided the current processor architecture is assumed. For more details see L{win32.version._get_arch}. @raise NotImplementedError: This disassembler doesn't support the requested processor architecture. """ # Use the default architecture if none specified. if not arch: arch = win32.arch # Validate the architecture. if arch not in self.supported: msg = "The %s engine cannot decode %s code." msg = msg % (self.name, arch) raise NotImplementedError(msg) # Return the architecture. return arch
@type arch: str @param arch: Name of the processor architecture. If not provided the current processor architecture is assumed. For more details see L{win32.version._get_arch}. @rtype: str @return: Name of the processor architecture. If not provided the current processor architecture is assumed. For more details see L{win32.version._get_arch}. @raise NotImplementedError: This disassembler doesn't support the requested processor architecture.
def ver(self, value): """The ver property. Args: value (int). the property value. """ if value == self._defaults['ver'] and 'ver' in self._values: del self._values['ver'] else: self._values['ver'] = value
The ver property. Args: value (int). the property value.
def create_namespaced_endpoints(self, namespace, body, **kwargs): """ create Endpoints This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_endpoints(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Endpoints body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Endpoints If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_endpoints_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_endpoints_with_http_info(namespace, body, **kwargs) return data
create Endpoints This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_endpoints(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Endpoints body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Endpoints If the method is called asynchronously, returns the request thread.
def _call_geocoder( self, url, timeout=DEFAULT_SENTINEL, raw=False, requester=None, deserializer=json.loads, **kwargs ): """ For a generated query URL, get the results. """ if requester: req = url # Don't construct an urllib's Request for a custom requester. # `requester` might be anything which can issue an HTTP request. # Assume that `requester` is a method of the `requests` library. # Requests, however, doesn't accept SSL context in its HTTP # request methods. A custom HTTP adapter has to be created for that. # So the current usage is not directly compatible with `requests`. requester = functools.partial(requester, context=self.ssl_context, proxies=self.proxies, headers=self.headers) else: if isinstance(url, Request): # copy Request headers = self.headers.copy() headers.update(url.header_items()) req = Request(url=url.get_full_url(), headers=headers) else: req = Request(url=url, headers=self.headers) requester = requester or self.urlopen if timeout is None: warnings.warn( ('`timeout=None` has been passed to a geocoder call. Using ' 'default geocoder timeout. In geopy 2.0 the ' 'behavior will be different: None will mean "no timeout" ' 'instead of "default geocoder timeout". Pass ' 'geopy.geocoders.base.DEFAULT_SENTINEL instead of None ' 'to get rid of this warning.'), DeprecationWarning, stacklevel=3) timeout = DEFAULT_SENTINEL timeout = (timeout if timeout is not DEFAULT_SENTINEL else self.timeout) try: page = requester(req, timeout=timeout, **kwargs) except Exception as error: message = ( str(error) if not py3k else ( str(error.args[0]) if len(error.args) else str(error) ) ) self._geocoder_exception_handler(error, message) if isinstance(error, HTTPError): code = error.getcode() body = self._read_http_error_body(error) if body: logger.info('Received an HTTP error (%s): %s', code, body, exc_info=False) try: raise ERROR_CODE_MAP[code](message) except KeyError: raise GeocoderServiceError(message) elif isinstance(error, URLError): if "timed out" in message: raise GeocoderTimedOut('Service timed out') elif "unreachable" in message: raise GeocoderUnavailable('Service not available') elif isinstance(error, SocketTimeout): raise GeocoderTimedOut('Service timed out') elif isinstance(error, SSLError): if "timed out" in message: raise GeocoderTimedOut('Service timed out') raise GeocoderServiceError(message) if hasattr(page, 'getcode'): status_code = page.getcode() elif hasattr(page, 'status_code'): status_code = page.status_code else: status_code = None if status_code in ERROR_CODE_MAP: raise ERROR_CODE_MAP[page.status_code]("\n%s" % decode_page(page)) if raw: return page page = decode_page(page) if deserializer is not None: try: return deserializer(page) except ValueError: raise GeocoderParseError( "Could not deserialize using deserializer:\n%s" % page ) else: return page
For a generated query URL, get the results.
def every_other(x, name=None): """Drops every other value from the tensor and returns a 1D tensor. This is useful if you are running multiple inputs through a model tower before splitting them and you want to line it up with some other data. Args: x: the target tensor. name: the name for this op, defaults to every_other Returns: A tensorflow op. """ with tf.name_scope(name, 'every_other', [x]) as scope: x = tf.convert_to_tensor(x, name='x') return tf.reshape( tf.slice( tf.reshape(x, [-1, 2]), [0, 0], [-1, 1]), [-1], name=scope)
Drops every other value from the tensor and returns a 1D tensor. This is useful if you are running multiple inputs through a model tower before splitting them and you want to line it up with some other data. Args: x: the target tensor. name: the name for this op, defaults to every_other Returns: A tensorflow op.
def forum_topic_undelete(self, topic_id): """Un delete a topic (Login requries) (Moderator+) (UNTESTED). Parameters: topic_id (int): Where topic_id is the topic id. """ return self._get('forum_topics/{0}/undelete.json'.format(topic_id), method='POST', auth=True)
Un delete a topic (Login requries) (Moderator+) (UNTESTED). Parameters: topic_id (int): Where topic_id is the topic id.
def textwidth(self, text, config): """Calculates the width of the specified text. """ surface = cairo.SVGSurface(None, 1280, 200) ctx = cairo.Context(surface) ctx.select_font_face(config['font_face'], cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD) ctx.set_font_size(int(config['font_size'])) return ctx.text_extents(text)[2] + 2
Calculates the width of the specified text.
def match_status_code(self, entry, status_code, regex=True): """ Helper function that returns entries with a status code matching then given `status_code` argument. NOTE: This is doing a STRING comparison NOT NUMERICAL :param entry: entry object to analyze :param status_code: ``str`` of status code to search for :param request_type: ``regex`` of request type to match """ if regex: return re.search(status_code, str(entry['response']['status'])) is not None else: return str(entry['response']['status']) == status_code
Helper function that returns entries with a status code matching then given `status_code` argument. NOTE: This is doing a STRING comparison NOT NUMERICAL :param entry: entry object to analyze :param status_code: ``str`` of status code to search for :param request_type: ``regex`` of request type to match
def map(self, options=None): """Trigger find of serialized sources and build objects""" for path, data in self.paths.items(): references = data.get("references", []) for item in data["items"]: for obj in self.create_class(item, options, references=references): self.add_object(obj) self.organize_objects()
Trigger find of serialized sources and build objects
def build_loss(model_logits, sparse_targets): """Compute the log loss given predictions and targets.""" time_major_shape = [FLAGS.unroll_steps, FLAGS.batch_size] flat_batch_shape = [FLAGS.unroll_steps * FLAGS.batch_size, -1] xent = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=tf.reshape(model_logits, flat_batch_shape), labels=tf.reshape(sparse_targets, flat_batch_shape[:-1])) xent = tf.reshape(xent, time_major_shape) # Sum over the sequence. sequence_neg_log_prob = tf.reduce_sum(xent, axis=0) # Average over the batch. return tf.reduce_mean(sequence_neg_log_prob, axis=0)
Compute the log loss given predictions and targets.
def find_ports(device): """ Find the port chain a device is plugged on. This is done by searching sysfs for a device that matches the device bus/address combination. Useful when the underlying usb lib does not return device.port_number for whatever reason. """ bus_id = device.bus dev_id = device.address for dirent in os.listdir(USB_SYS_PREFIX): matches = re.match(USB_PORTS_STR + '$', dirent) if matches: bus_str = readattr(dirent, 'busnum') if bus_str: busnum = float(bus_str) else: busnum = None dev_str = readattr(dirent, 'devnum') if dev_str: devnum = float(dev_str) else: devnum = None if busnum == bus_id and devnum == dev_id: return str(matches.groups()[1])
Find the port chain a device is plugged on. This is done by searching sysfs for a device that matches the device bus/address combination. Useful when the underlying usb lib does not return device.port_number for whatever reason.
def initialize_page_data(self): """Initialize the page data for the given screen.""" if self.term.is_a_tty: self.display_initialize() self.character_generator = self.character_factory(self.screen.wide) page_data = list() while True: try: page_data.append(next(self.character_generator)) except StopIteration: break if LIMIT_UCS == 0x10000: echo(self.term.center('press any key.').rstrip()) flushout() self.term.inkey(timeout=None) return page_data
Initialize the page data for the given screen.
def labeled_intervals(intervals, labels, label_set=None, base=None, height=None, extend_labels=True, ax=None, tick=True, **kwargs): '''Plot labeled intervals with each label on its own row. Parameters ---------- intervals : np.ndarray, shape=(n, 2) segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. label_set : list An (ordered) list of labels to determine the plotting order. If not provided, the labels will be inferred from ``ax.get_yticklabels()``. If no ``yticklabels`` exist, then the sorted set of unique values in ``labels`` is taken as the label set. base : np.ndarray, shape=(n,), optional Vertical positions of each label. By default, labels are positioned at integers ``np.arange(len(labels))``. height : scalar or np.ndarray, shape=(n,), optional Height for each label. If scalar, the same value is applied to all labels. By default, each label has ``height=1``. extend_labels : bool If ``False``, only values of ``labels`` that also exist in ``label_set`` will be shown. If ``True``, all labels are shown, with those in `labels` but not in `label_set` appended to the top of the plot. A horizontal line is drawn to indicate the separation between values in or out of ``label_set``. ax : matplotlib.pyplot.axes An axis handle on which to draw the intervals. If none is provided, a new set of axes is created. tick : bool If ``True``, sets tick positions and labels on the y-axis. kwargs Additional keyword arguments to pass to `matplotlib.collection.BrokenBarHCollection`. Returns ------- ax : matplotlib.pyplot.axes._subplots.AxesSubplot A handle to the (possibly constructed) plot axes ''' # Get the axes handle ax, _ = __get_axes(ax=ax) # Make sure we have a numpy array intervals = np.atleast_2d(intervals) if label_set is None: # If we have non-empty pre-existing tick labels, use them label_set = [_.get_text() for _ in ax.get_yticklabels()] # If none of the label strings have content, treat it as empty if not any(label_set): label_set = [] else: label_set = list(label_set) # Put additional labels at the end, in order if extend_labels: ticks = label_set + sorted(set(labels) - set(label_set)) elif label_set: ticks = label_set else: ticks = sorted(set(labels)) style = dict(linewidth=1) style.update(next(ax._get_patches_for_fill.prop_cycler)) # Swap color -> facecolor here so we preserve edgecolor on rects style['facecolor'] = style.pop('color') style.update(kwargs) if base is None: base = np.arange(len(ticks)) if height is None: height = 1 if np.isscalar(height): height = height * np.ones_like(base) seg_y = dict() for ybase, yheight, lab in zip(base, height, ticks): seg_y[lab] = (ybase, yheight) xvals = defaultdict(list) for ival, lab in zip(intervals, labels): if lab not in seg_y: continue xvals[lab].append((ival[0], ival[1] - ival[0])) for lab in seg_y: ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab], **style)) # Pop the label after the first time we see it, so we only get # one legend entry style.pop('label', None) # Draw a line separating the new labels from pre-existing labels if label_set != ticks: ax.axhline(len(label_set), color='k', alpha=0.5) if tick: ax.grid(True, axis='y') ax.set_yticks([]) ax.set_yticks(base) ax.set_yticklabels(ticks, va='bottom') ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks)) if base.size: __expand_limits(ax, [base.min(), (base + height).max()], which='y') if intervals.size: __expand_limits(ax, [intervals.min(), intervals.max()], which='x') return ax
Plot labeled intervals with each label on its own row. Parameters ---------- intervals : np.ndarray, shape=(n, 2) segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. label_set : list An (ordered) list of labels to determine the plotting order. If not provided, the labels will be inferred from ``ax.get_yticklabels()``. If no ``yticklabels`` exist, then the sorted set of unique values in ``labels`` is taken as the label set. base : np.ndarray, shape=(n,), optional Vertical positions of each label. By default, labels are positioned at integers ``np.arange(len(labels))``. height : scalar or np.ndarray, shape=(n,), optional Height for each label. If scalar, the same value is applied to all labels. By default, each label has ``height=1``. extend_labels : bool If ``False``, only values of ``labels`` that also exist in ``label_set`` will be shown. If ``True``, all labels are shown, with those in `labels` but not in `label_set` appended to the top of the plot. A horizontal line is drawn to indicate the separation between values in or out of ``label_set``. ax : matplotlib.pyplot.axes An axis handle on which to draw the intervals. If none is provided, a new set of axes is created. tick : bool If ``True``, sets tick positions and labels on the y-axis. kwargs Additional keyword arguments to pass to `matplotlib.collection.BrokenBarHCollection`. Returns ------- ax : matplotlib.pyplot.axes._subplots.AxesSubplot A handle to the (possibly constructed) plot axes
def run(addr, *commands, **kwargs): """ Non-threaded batch command runner returning output results """ results = [] handler = VarnishHandler(addr, **kwargs) for cmd in commands: if isinstance(cmd, tuple) and len(cmd)>1: results.extend([getattr(handler, c[0].replace('.','_'))(*c[1:]) for c in cmd]) else: results.append(getattr(handler, cmd.replace('.','_'))(*commands[1:])) break handler.close() return results
Non-threaded batch command runner returning output results
def getWidget(self,**kwargs): """ Wrapper function that returns a new widget attached to this simulation. Widgets provide real-time 3D visualizations from within an Jupyter notebook. See the Widget class for more details on the possible arguments. Arguments --------- All arguments passed to this wrapper function will be passed to /Widget class. Returns ------- A rebound.Widget object. Examples -------- >>> sim = rebound.Simulation() >>> sim.add(m=1.) >>> sim.add(m=1.e-3,x=1.,vy=1.) >>> sim.getWidget() """ from .widget import Widget # ondemand from ipywidgets import DOMWidget from IPython.display import display, HTML if not hasattr(self, '_widgets'): self._widgets = [] def display_heartbeat(simp): for w in self._widgets: w.refresh(simp,isauto=1) self.visualization = VISUALIZATIONS["webgl"] clibrebound.reb_display_init_data(byref(self)); self._dhbf = AFF(display_heartbeat) self._display_heartbeat = self._dhbf display(HTML(Widget.getClientCode())) # HACK! Javascript should go into custom.js newWidget = Widget(self,**kwargs) self._widgets.append(newWidget) newWidget.refresh(isauto=0) return newWidget
Wrapper function that returns a new widget attached to this simulation. Widgets provide real-time 3D visualizations from within an Jupyter notebook. See the Widget class for more details on the possible arguments. Arguments --------- All arguments passed to this wrapper function will be passed to /Widget class. Returns ------- A rebound.Widget object. Examples -------- >>> sim = rebound.Simulation() >>> sim.add(m=1.) >>> sim.add(m=1.e-3,x=1.,vy=1.) >>> sim.getWidget()
def find_warnings(content): """Look for lines containing warning/error/info strings instead of data.""" keywords = [k.lower() for k in [ "WARNING", "Couldn't find device", "Configuration setting", "read failed", "Was device resized?", "Invalid argument", "leaked on lvs", "Checksum error", "is exported", "failed.", "Invalid metadata", "response failed", "unknown device", "duplicate", "not found", "Missing device", "Internal error", "Input/output error", "Incorrect metadata", "Cannot process volume", "No such file or directory", "Logging initialised", "changed sizes", "vsnprintf failed", "write failed", "correction failed", "Failed to write", "Couldn't read", "marked missing", "Attempt to close device", "Ignoring supplied major", "not match metadata" ]] for l in content: lower = l.strip().lower() # Avoid hitting keywords inside the data if not lower.startswith('lvm2'): if any(k in lower for k in keywords): yield l
Look for lines containing warning/error/info strings instead of data.
def add(self, value): """ Add a value to the reservoir The value will be casted to a floating-point, so a TypeError or a ValueError may be raised. """ if not isinstance(value, float): value = float(value) return self._do_add(value)
Add a value to the reservoir The value will be casted to a floating-point, so a TypeError or a ValueError may be raised.
def __create(self, options, collation, session): """Sends a create command with the given options. """ cmd = SON([("create", self.__name)]) if options: if "size" in options: options["size"] = float(options["size"]) cmd.update(options) with self._socket_for_writes(session) as sock_info: self._command( sock_info, cmd, read_preference=ReadPreference.PRIMARY, write_concern=self._write_concern_for(session), collation=collation, session=session)
Sends a create command with the given options.
def hangup_all_calls(self): """REST Hangup All Live Calls Helper """ path = '/' + self.api_version + '/HangupAllCalls/' method = 'POST' return self.request(path, method)
REST Hangup All Live Calls Helper
def argsort(self, axis=-1, kind="quicksort", order=None): """ Returns the indices that would sort the array. See the documentation of ndarray.argsort for details about the keyword arguments. Example ------- >>> from unyt import km >>> data = [3, 8, 7]*km >>> print(np.argsort(data)) [0 2 1] >>> print(data.argsort()) [0 2 1] """ return self.view(np.ndarray).argsort(axis, kind, order)
Returns the indices that would sort the array. See the documentation of ndarray.argsort for details about the keyword arguments. Example ------- >>> from unyt import km >>> data = [3, 8, 7]*km >>> print(np.argsort(data)) [0 2 1] >>> print(data.argsort()) [0 2 1]
def retweets(self, tweet_id): """ Retrieves up to the last 100 retweets for the provided tweet. """ log.info("retrieving retweets of %s", tweet_id) url = "https://api.twitter.com/1.1/statuses/retweets/""{}.json".format( tweet_id) resp = self.get(url, params={"count": 100}) for tweet in resp.json(): yield tweet
Retrieves up to the last 100 retweets for the provided tweet.
def all(self, command, params=None): """ Возвращает строки ответа, полученного через query > db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID}) :param command: SQL запрос :param params: Параметры для prepared statements :rtype: list of dict """ dr = self.query(command, params) return dr['rows']
Возвращает строки ответа, полученного через query > db.query('SELECT * FORM users WHERE id=:id', {"id":MY_USER_ID}) :param command: SQL запрос :param params: Параметры для prepared statements :rtype: list of dict
def try_handle_route(self, route_uri, method, request, uri, headers): """Try to handle the supplied request on the specified routing URI. :param route_uri: string - URI of the request :param method: string - HTTP Verb :param request: request object describing the HTTP request :param uri: URI of the reuqest :param headers: case-insensitive headers dict :returns: tuple - (int, dict, string) containing: int - the http response status code dict - the headers for the http response string - http string response """ uri_path = route_uri if '?' in uri: logger.debug('StackInABoxService ({0}:{1}): Found query string ' 'removing for match operation.' .format(self.__id, self.name)) uri_path, uri_qs = uri.split('?') logger.debug('StackInABoxService ({0}:{1}): uri = "{2}", ' 'query = "{3}"' .format(self.__id, self.name, uri_path, uri_qs)) for k, v in six.iteritems(self.routes): logger.debug('StackInABoxService ({0}:{1}): Checking if ' 'route {2} handles...' .format(self.__id, self.name, v['uri'])) logger.debug('StackInABoxService ({0}:{1}): ...using regex ' 'pattern {2} against {3}' .format(self.__id, self.name, v['regex'].pattern, uri_path)) if v['regex'].match(uri_path): logger.debug('StackInABoxService ({0}:{1}): Checking if ' 'route {2} handles method {2}...' .format(self.__id, self.name, v['uri'], method)) return v['handlers'](method, request, uri, headers) return (595, headers, 'Route ({0}) Not Handled'.format(uri))
Try to handle the supplied request on the specified routing URI. :param route_uri: string - URI of the request :param method: string - HTTP Verb :param request: request object describing the HTTP request :param uri: URI of the reuqest :param headers: case-insensitive headers dict :returns: tuple - (int, dict, string) containing: int - the http response status code dict - the headers for the http response string - http string response
def decorate_client(api_client, func, name): """A helper for decorating :class:`bravado.client.SwaggerClient`. :class:`bravado.client.SwaggerClient` can be extended by creating a class which wraps all calls to it. This helper is used in a :func:`__getattr__` to check if the attr exists on the api_client. If the attr does not exist raise :class:`AttributeError`, if it exists and is not callable return it, and if it is callable return a partial function calling `func` with `name`. Example usage: .. code-block:: python class SomeClientDecorator(object): def __init__(self, api_client, ...): self.api_client = api_client # First arg should be suffiently unique to not conflict with any of # the kwargs def wrap_call(self, client_call_name, *args, **kwargs): ... def __getattr__(self, name): return decorate_client(self.api_client, self.wrap_call, name) :param api_client: the client which is being decorated :type api_client: :class:`bravado.client.SwaggerClient` :param func: a callable which accepts `name`, `*args`, `**kwargs` :type func: callable :param name: the attribute being accessed :type name: string :returns: the attribute from the `api_client` or a partial of `func` :raises: :class:`AttributeError` """ client_attr = getattr(api_client, name) if not callable(client_attr): return client_attr return OperationDecorator(client_attr, functools.partial(func, name))
A helper for decorating :class:`bravado.client.SwaggerClient`. :class:`bravado.client.SwaggerClient` can be extended by creating a class which wraps all calls to it. This helper is used in a :func:`__getattr__` to check if the attr exists on the api_client. If the attr does not exist raise :class:`AttributeError`, if it exists and is not callable return it, and if it is callable return a partial function calling `func` with `name`. Example usage: .. code-block:: python class SomeClientDecorator(object): def __init__(self, api_client, ...): self.api_client = api_client # First arg should be suffiently unique to not conflict with any of # the kwargs def wrap_call(self, client_call_name, *args, **kwargs): ... def __getattr__(self, name): return decorate_client(self.api_client, self.wrap_call, name) :param api_client: the client which is being decorated :type api_client: :class:`bravado.client.SwaggerClient` :param func: a callable which accepts `name`, `*args`, `**kwargs` :type func: callable :param name: the attribute being accessed :type name: string :returns: the attribute from the `api_client` or a partial of `func` :raises: :class:`AttributeError`
def set_state(self, state=None, **kwargs): """ Set the view state of the camera Should be a dict (or kwargs) as returned by get_state. It can be an incomlete dict, in which case only the specified properties are set. Parameters ---------- state : dict The camera state. **kwargs : dict Unused keyword arguments. """ D = state or {} D.update(kwargs) for key, val in D.items(): if key not in self._state_props: raise KeyError('Not a valid camera state property %r' % key) setattr(self, key, val)
Set the view state of the camera Should be a dict (or kwargs) as returned by get_state. It can be an incomlete dict, in which case only the specified properties are set. Parameters ---------- state : dict The camera state. **kwargs : dict Unused keyword arguments.
def change_svc_snapshot_command(self, service, snapshot_command): """Modify host snapshot command Format of the line that triggers function call:: CHANGE_HOST_SNAPSHOT_COMMAND;<host_name>;<event_handler_command> :param service: service to modify snapshot command :type service: alignak.objects.service.Service :param snapshot_command: snapshot command command line :type snapshot_command: :return: None """ service.modified_attributes |= DICT_MODATTR["MODATTR_EVENT_HANDLER_COMMAND"].value data = {"commands": self.commands, "call": snapshot_command} service.change_snapshot_command(data) self.send_an_element(service.get_update_status_brok())
Modify host snapshot command Format of the line that triggers function call:: CHANGE_HOST_SNAPSHOT_COMMAND;<host_name>;<event_handler_command> :param service: service to modify snapshot command :type service: alignak.objects.service.Service :param snapshot_command: snapshot command command line :type snapshot_command: :return: None
def blastparse(self): """ Parse the blast results, and store necessary data in dictionaries in sample object """ logging.info('Parsing BLAST results') # Load the NCBI 16S reference database as a dictionary for sample in self.runmetadata.samples: if sample.general.bestassemblyfile != 'NA': # Load the NCBI 16S reference database as a dictionary dbrecords = SeqIO.to_dict(SeqIO.parse(sample[self.analysistype].baitfile, 'fasta')) # Allow for no BLAST results if os.path.isfile(sample[self.analysistype].blastreport): # Initialise a dictionary to store the number of times a genus is the best hit sample[self.analysistype].frequency = dict() # Open the sequence profile file as a dictionary blastdict = DictReader(open(sample[self.analysistype].blastreport), fieldnames=self.fieldnames, dialect='excel-tab') recorddict = dict() for record in blastdict: # Create the subject id. It will look like this: gi|1018196593|ref|NR_136472.1| subject = record['subject_id'] # Extract the genus name. Use the subject id as a key in the dictionary of the reference db. # It will return the full record e.g. gi|1018196593|ref|NR_136472.1| Escherichia marmotae # strain HT073016 16S ribosomal RNA, partial sequence # This full description can be manipulated to extract the genus e.g. Escherichia genus = dbrecords[subject].description.split('|')[-1].split()[0] # Increment the number of times this genus was found, or initialise the dictionary with this # genus the first time it is seen try: sample[self.analysistype].frequency[genus] += 1 except KeyError: sample[self.analysistype].frequency[genus] = 1 try: recorddict[dbrecords[subject].description] += 1 except KeyError: recorddict[dbrecords[subject].description] = 1 # Sort the dictionary based on the number of times a genus is seen sample[self.analysistype].sortedgenera = sorted(sample[self.analysistype].frequency.items(), key=operator.itemgetter(1), reverse=True) try: # Extract the top result, and set it as the genus of the sample sample[self.analysistype].genus = sample[self.analysistype].sortedgenera[0][0] # Previous code relies on having the closest refseq genus, so set this as above # sample.general.closestrefseqgenus = sample[self.analysistype].genus except IndexError: # Populate attributes with 'NA' sample[self.analysistype].sortedgenera = 'NA' sample[self.analysistype].genus = 'NA' # sample.general.closestrefseqgenus = 'NA' else: # Populate attributes with 'NA' sample[self.analysistype].sortedgenera = 'NA' sample[self.analysistype].genus = 'NA' # sample.general.closestrefseqgenus = 'NA' else: # Populate attributes with 'NA' sample[self.analysistype].sortedgenera = 'NA' sample[self.analysistype].genus = 'NA'
Parse the blast results, and store necessary data in dictionaries in sample object
def _get_ctx(self): """ Get web.ctx object for the Template helper """ if self._WEB_CTX_KEY not in web.ctx: web.ctx[self._WEB_CTX_KEY] = { "javascript": {"footer": [], "header": []}, "css": []} return web.ctx.get(self._WEB_CTX_KEY)
Get web.ctx object for the Template helper
def get_resource(self, resource_key, **variables): """Get a resource. Attempts to get and return a cached version of the resource if available, otherwise a new resource object is created and returned. Args: resource_key (`str`): Name of the type of `Resources` to find variables: data to identify / store on the resource Returns: `PackageRepositoryResource` instance. """ handle = self.make_resource_handle(resource_key, **variables) return self.get_resource_from_handle(handle, verify_repo=False)
Get a resource. Attempts to get and return a cached version of the resource if available, otherwise a new resource object is created and returned. Args: resource_key (`str`): Name of the type of `Resources` to find variables: data to identify / store on the resource Returns: `PackageRepositoryResource` instance.
def sph2cart(r, az, elev): """ Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate """ x = r * np.cos(az) * np.sin(elev) y = r * np.sin(az) * np.sin(elev) z = r * np.cos(elev) return x, y, z
Convert spherical to cartesian coordinates. Attributes ---------- r : float radius az : float aziumth (angle about z axis) elev : float elevation from xy plane Returns ------- float x-coordinate float y-coordinate float z-coordinate
def dataset_publication_finished(self, ignore_exception=False): ''' This is the "commit". It triggers the creation/update of handles. * Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied) * The dataset publication message is created and sent to the queue. * All file publication messages are sent to the queue. ''' self.__check_if_dataset_publication_allowed_right_now() self.__check_data_consistency(ignore_exception) self.__coupler.start_rabbit_business() # Synchronous: Opens connection. Asynchronous: Ignored. self.__create_and_send_dataset_publication_message_to_queue() self.__send_existing_file_messages_to_queue() self.__coupler.done_with_rabbit_business() # Synchronous: Closes connection. Asynchronous: Ignored. self.__set_machine_state_to_finished() loginfo(LOGGER, 'Requesting to publish PID for dataset "%s" (version %s) and its files at "%s" (handle %s).', self.__drs_id, self.__version_number, self.__data_node, self.__dataset_handle)
This is the "commit". It triggers the creation/update of handles. * Check if the set of files corresponds to the previously published set (if applicable, and if solr url given, and if solr replied) * The dataset publication message is created and sent to the queue. * All file publication messages are sent to the queue.
def prepare_payload(op, method, uri, data): """Return the URI (modified perhaps) and body and headers. - For GET requests, encode parameters in the query string. - Otherwise always encode parameters in the request body. - Except op; this can always go in the query string. :param method: The HTTP method. :param uri: The URI of the action. :param data: An iterable of ``name, value`` or ``name, opener`` tuples (see `name_value_pair`) to pack into the body or query, depending on the type of request. """ query = [] if op is None else [("op", op)] def slurp(opener): with opener() as fd: return fd.read() if method == "GET": headers, body = [], None query.extend( (name, slurp(value) if callable(value) else value) for name, value in data) else: # Even if data is empty, construct a multipart request body. Piston # (server-side) sets `request.data` to `None` if there's no payload. message = build_multipart_message(data) headers, body = encode_multipart_message(message) uri = urlparse(uri)._replace(query=urlencode(query)).geturl() return uri, body, headers
Return the URI (modified perhaps) and body and headers. - For GET requests, encode parameters in the query string. - Otherwise always encode parameters in the request body. - Except op; this can always go in the query string. :param method: The HTTP method. :param uri: The URI of the action. :param data: An iterable of ``name, value`` or ``name, opener`` tuples (see `name_value_pair`) to pack into the body or query, depending on the type of request.
def boxed(msg, ch="=", pad=5): """ Returns a string in a box Args: msg: Input string. ch: Character used to form the box. pad: Number of characters ch added before and after msg. >>> print(boxed("hello", ch="*", pad=2)) *********** ** hello ** *********** """ if pad > 0: msg = pad * ch + " " + msg.strip() + " " + pad * ch return "\n".join([len(msg) * ch, msg, len(msg) * ch, ])
Returns a string in a box Args: msg: Input string. ch: Character used to form the box. pad: Number of characters ch added before and after msg. >>> print(boxed("hello", ch="*", pad=2)) *********** ** hello ** ***********
def get_results(self, metadata=False): """ Return results of the analysis. """ results_data = [] self.process_har() self.process_from_splash() for rt in sorted(self._results.get_results()): rdict = {'name': rt.name} if rt.version: rdict['version'] = rt.version if metadata: rdict['homepage'] = rt.homepage rdict['type'] = rt.type rdict['from_url'] = rt.from_url rdict['plugin'] = rt.plugin results_data.append(rdict) return results_data
Return results of the analysis.
def _get_json(endpoint, params, referer='scores'): """ Internal method to streamline our requests / json getting Args: endpoint (str): endpoint to be called from the API params (dict): parameters to be passed to the API Raises: HTTPError: if requests hits a status code != 200 Returns: json (json): json object for selected API call """ h = dict(HEADERS) h['referer'] = 'http://stats.nba.com/{ref}/'.format(ref=referer) _get = get(BASE_URL.format(endpoint=endpoint), params=params, headers=h) # print _get.url _get.raise_for_status() return _get.json()
Internal method to streamline our requests / json getting Args: endpoint (str): endpoint to be called from the API params (dict): parameters to be passed to the API Raises: HTTPError: if requests hits a status code != 200 Returns: json (json): json object for selected API call
def group_dict(self, group: str) -> Dict[str, Any]: """The names and values of options in a group. Useful for copying options into Application settings:: from tornado.options import define, parse_command_line, options define('template_path', group='application') define('static_path', group='application') parse_command_line() application = Application( handlers, **options.group_dict('application')) .. versionadded:: 3.1 """ return dict( (opt.name, opt.value()) for name, opt in self._options.items() if not group or group == opt.group_name )
The names and values of options in a group. Useful for copying options into Application settings:: from tornado.options import define, parse_command_line, options define('template_path', group='application') define('static_path', group='application') parse_command_line() application = Application( handlers, **options.group_dict('application')) .. versionadded:: 3.1
def apply_connectivity_changes(self, request): """ Handle apply connectivity changes request json, trigger add or remove vlan methods, get responce from them and create json response :param request: json with all required action to configure or remove vlans from certain port :return Serialized DriverResponseRoot to json :rtype json """ if request is None or request == "": raise Exception(self.__class__.__name__, "request is None or empty") holder = JsonRequestDeserializer(jsonpickle.decode(request)) if not holder or not hasattr(holder, "driverRequest"): raise Exception(self.__class__.__name__, "Deserialized request is None or empty") driver_response = DriverResponse() add_vlan_thread_list = [] remove_vlan_thread_list = [] driver_response_root = DriverResponseRoot() for action in holder.driverRequest.actions: self._logger.info("Action: ", action.__dict__) self._validate_request_action(action) action_id = action.actionId full_name = action.actionTarget.fullName port_mode = action.connectionParams.mode.lower() if action.type == "setVlan": qnq = False ctag = "" for attribute in action.connectionParams.vlanServiceAttributes: if attribute.attributeName.lower() == "qnq" and attribute.attributeValue.lower() == "true": qnq = True if attribute.attributeName.lower() == "ctag": ctag = attribute.attributeValue for vlan_id in self._get_vlan_list(action.connectionParams.vlanId): add_vlan_thread = Thread(target=self.add_vlan, name=action_id, args=(vlan_id, full_name, port_mode, qnq, ctag)) add_vlan_thread_list.append(add_vlan_thread) elif action.type == "removeVlan": for vlan_id in self._get_vlan_list(action.connectionParams.vlanId): remove_vlan_thread = Thread(target=self.remove_vlan, name=action_id, args=(vlan_id, full_name, port_mode,)) remove_vlan_thread_list.append(remove_vlan_thread) else: self._logger.warning("Undefined action type determined '{}': {}".format(action.type, action.__dict__)) continue # Start all created remove_vlan_threads for thread in remove_vlan_thread_list: thread.start() # Join all remove_vlan_threads. Main thread will wait completion of all remove_vlan_thread for thread in remove_vlan_thread_list: thread.join() # Start all created add_vlan_threads for thread in add_vlan_thread_list: thread.start() # Join all add_vlan_threads. Main thread will wait completion of all add_vlan_thread for thread in add_vlan_thread_list: thread.join() request_result = [] for action in holder.driverRequest.actions: result_statuses, message = zip(*self.result.get(action.actionId)) if all(result_statuses): action_result = ConnectivitySuccessResponse(action, "Add Vlan {vlan} configuration successfully completed" .format(vlan=action.connectionParams.vlanId)) else: message_details = "\n\t".join(message) action_result = ConnectivityErrorResponse(action, "Add Vlan {vlan} configuration failed." "\nAdd Vlan configuration details:\n{message_details}" .format(vlan=action.connectionParams.vlanId, message_details=message_details)) request_result.append(action_result) driver_response.actionResults = request_result driver_response_root.driverResponse = driver_response return serialize_to_json(driver_response_root)
Handle apply connectivity changes request json, trigger add or remove vlan methods, get responce from them and create json response :param request: json with all required action to configure or remove vlans from certain port :return Serialized DriverResponseRoot to json :rtype json
def get_client(client=None): """ Get an ElasticAPM client. :param client: :return: :rtype: elasticapm.base.Client """ global _client tmp_client = client is not None if not tmp_client: config = getattr(django_settings, "ELASTIC_APM", {}) client = config.get("CLIENT", default_client_class) if _client[0] != client: client_class = import_string(client) instance = client_class() if not tmp_client: _client = (client, instance) return instance return _client[1]
Get an ElasticAPM client. :param client: :return: :rtype: elasticapm.base.Client
def generate_password(self) -> list: """Generate a list of random characters.""" characterset = self._get_password_characters() if ( self.passwordlen is None or not characterset ): raise ValueError("Can't generate password: character set is " "empty or passwordlen isn't set") password = [] for _ in range(0, self.passwordlen): password.append(randchoice(characterset)) self.last_result = password return password
Generate a list of random characters.
def _add_cloned_sers(self, plotArea, count): """ Add `c:ser` elements to the last xChart element in *plotArea*, cloned from the last `c:ser` child of that last xChart. """ def clone_ser(ser): new_ser = deepcopy(ser) new_ser.idx.val = plotArea.next_idx new_ser.order.val = plotArea.next_order ser.addnext(new_ser) return new_ser last_ser = plotArea.last_ser for _ in range(count): last_ser = clone_ser(last_ser)
Add `c:ser` elements to the last xChart element in *plotArea*, cloned from the last `c:ser` child of that last xChart.
def connectAlt(cls, redisConnectionParams): ''' connectAlt - Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model. @param redisConnectionParams <dict> - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS. @return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance. The fields and key will be the same here, but the connection will be different. use #copyModel if you want an independent class for the model ''' if not isinstance(redisConnectionParams, dict): raise ValueError('redisConnectionParams must be a dictionary!') hashVal = hashDictOneLevel(redisConnectionParams) modelDictCopy = copy.deepcopy(dict(cls.__dict__)) modelDictCopy['REDIS_CONNECTION_PARAMS'] = redisConnectionParams ConnectedIndexedRedisModel = type('AltConnect' + cls.__name__ + str(hashVal), cls.__bases__, modelDictCopy) return ConnectedIndexedRedisModel
connectAlt - Create a class of this model which will use an alternate connection than the one specified by REDIS_CONNECTION_PARAMS on this model. @param redisConnectionParams <dict> - Dictionary of arguments to redis.Redis, same as REDIS_CONNECTION_PARAMS. @return - A class that can be used in all the same ways as the existing IndexedRedisModel, but that connects to a different instance. The fields and key will be the same here, but the connection will be different. use #copyModel if you want an independent class for the model
def copy(self): """ Deepcopy the parameter (with a new uniqueid). All other tags will remain the same... so some other tag should be changed before attaching back to a ParameterSet or Bundle. :return: the copied :class:`Parameter` object """ s = self.to_json() cpy = parameter_from_json(s) # TODO: may need to subclass for Parameters that require bundle by using this line instead: # cpy = parameter_from_json(s, bundle=self._bundle) cpy.set_uniqueid(_uniqueid()) return cpy
Deepcopy the parameter (with a new uniqueid). All other tags will remain the same... so some other tag should be changed before attaching back to a ParameterSet or Bundle. :return: the copied :class:`Parameter` object
def get_mode_group(self, group): """While a reference is kept by the caller, the returned mode group will compare equal with mode group returned by each subsequent call of this method with the same index and mode group returned from :attr:`~libinput.event.TabletPadEvent.mode_group`, provided the event was generated by this mode group. Args: group (int): A mode group index. Returns: ~libinput.define.TabletPadModeGroup: The mode group with the given index or :obj:`None` if an invalid index is given. """ hmodegroup = self._libinput.libinput_device_tablet_pad_get_mode_group( self._handle, group) if hmodegroup: return TabletPadModeGroup(hmodegroup, self._libinput) return None
While a reference is kept by the caller, the returned mode group will compare equal with mode group returned by each subsequent call of this method with the same index and mode group returned from :attr:`~libinput.event.TabletPadEvent.mode_group`, provided the event was generated by this mode group. Args: group (int): A mode group index. Returns: ~libinput.define.TabletPadModeGroup: The mode group with the given index or :obj:`None` if an invalid index is given.
def load_metadata_for_topics(self, *topics): """ Fetch broker and topic-partition metadata from the server, and update internal data: broker list, topic/partition list, and topic/parition -> broker map This method should be called after receiving any error Arguments: *topics (optional): If a list of topics is provided, the metadata refresh will be limited to the specified topics only. Exceptions: ---------- If the broker is configured to not auto-create topics, expect UnknownTopicOrPartitionError for topics that don't exist If the broker is configured to auto-create topics, expect LeaderNotAvailableError for new topics until partitions have been initialized. Exceptions *will not* be raised in a full refresh (i.e. no topic list) In this case, error codes will be logged as errors Partition-level errors will also not be raised here (a single partition w/o a leader, for example) """ topics = [kafka_bytestring(t) for t in topics] if topics: for topic in topics: self.reset_topic_metadata(topic) else: self.reset_all_metadata() resp = self.send_metadata_request(topics) log.debug('Updating broker metadata: %s', resp.brokers) log.debug('Updating topic metadata: %s', resp.topics) self.brokers = dict([(broker.nodeId, broker) for broker in resp.brokers]) for topic_metadata in resp.topics: topic = topic_metadata.topic partitions = topic_metadata.partitions # Errors expected for new topics try: kafka_common.check_error(topic_metadata) except (UnknownTopicOrPartitionError, LeaderNotAvailableError) as e: # Raise if the topic was passed in explicitly if topic in topics: raise # Otherwise, just log a warning log.error('Error loading topic metadata for %s: %s', topic, type(e)) continue self.topic_partitions[topic] = {} for partition_metadata in partitions: partition = partition_metadata.partition leader = partition_metadata.leader self.topic_partitions[topic][partition] = partition_metadata # Populate topics_to_brokers dict topic_part = TopicAndPartition(topic, partition) # Check for partition errors try: kafka_common.check_error(partition_metadata) # If No Leader, topics_to_brokers topic_partition -> None except LeaderNotAvailableError: log.error('No leader for topic %s partition %d', topic, partition) self.topics_to_brokers[topic_part] = None continue # If one of the replicas is unavailable -- ignore # this error code is provided for admin purposes only # we never talk to replicas, only the leader except ReplicaNotAvailableError: log.debug('Some (non-leader) replicas not available for topic %s partition %d', topic, partition) # If Known Broker, topic_partition -> BrokerMetadata if leader in self.brokers: self.topics_to_brokers[topic_part] = self.brokers[leader] # If Unknown Broker, fake BrokerMetadata so we dont lose the id # (not sure how this could happen. server could be in bad state) else: self.topics_to_brokers[topic_part] = BrokerMetadata( leader, None, None )
Fetch broker and topic-partition metadata from the server, and update internal data: broker list, topic/partition list, and topic/parition -> broker map This method should be called after receiving any error Arguments: *topics (optional): If a list of topics is provided, the metadata refresh will be limited to the specified topics only. Exceptions: ---------- If the broker is configured to not auto-create topics, expect UnknownTopicOrPartitionError for topics that don't exist If the broker is configured to auto-create topics, expect LeaderNotAvailableError for new topics until partitions have been initialized. Exceptions *will not* be raised in a full refresh (i.e. no topic list) In this case, error codes will be logged as errors Partition-level errors will also not be raised here (a single partition w/o a leader, for example)
def ldirectory(inpath, outpath, args, scope): """Compile all *.less files in directory Args: inpath (str): Path to compile outpath (str): Output directory args (object): Argparse Object scope (Scope): Scope object or None """ yacctab = 'yacctab' if args.debug else None if not outpath: sys.exit("Compile directory option needs -o ...") else: if not os.path.isdir(outpath): if args.verbose: print("Creating '%s'" % outpath, file=sys.stderr) if not args.dry_run: os.mkdir(outpath) less = glob.glob(os.path.join(inpath, '*.less')) f = formatter.Formatter(args) for lf in less: outf = os.path.splitext(os.path.basename(lf)) minx = '.min' if args.min_ending else '' outf = "%s/%s%s.css" % (outpath, outf[0], minx) if not args.force and os.path.exists(outf): recompile = os.path.getmtime(outf) < os.path.getmtime(lf) else: recompile = True if recompile: print('%s -> %s' % (lf, outf)) p = parser.LessParser( yacc_debug=(args.debug), lex_optimize=True, yacc_optimize=(not args.debug), scope=scope, tabfile=yacctab, verbose=args.verbose) p.parse(filename=lf, debuglevel=0) css = f.format(p) if not args.dry_run: with open(outf, 'w') as outfile: outfile.write(css) elif args.verbose: print('skipping %s, not modified' % lf, file=sys.stderr) sys.stdout.flush() if args.recurse: [ ldirectory( os.path.join(inpath, name), os.path.join(outpath, name), args, scope) for name in os.listdir(inpath) if os.path.isdir(os.path.join(inpath, name)) and not name.startswith('.') and not name == outpath ]
Compile all *.less files in directory Args: inpath (str): Path to compile outpath (str): Output directory args (object): Argparse Object scope (Scope): Scope object or None
def close(self): """ Closes this IOU VM. """ if not (yield from super().close()): return False adapters = self._ethernet_adapters + self._serial_adapters for adapter in adapters: if adapter is not None: for nio in adapter.ports.values(): if nio and isinstance(nio, NIOUDP): self.manager.port_manager.release_udp_port(nio.lport, self._project) yield from self.stop()
Closes this IOU VM.
def waitPuppetCatalogToBeApplied(self, key, sleepTime=5): """ Function waitPuppetCatalogToBeApplied Wait for puppet catalog to be applied @param key: The host name or ID @return RETURN: None """ # Wait for puppet catalog to be applied loop_stop = False while not loop_stop: status = self[key].getStatus() if status == 'No Changes' or status == 'Active': self.__printProgression__(True, key + ' creation: provisioning OK') loop_stop = True elif status == 'Error': self.__printProgression__(False, key + ' creation: Error - ' 'Error during provisioning') loop_stop = True return False else: self.__printProgression__('In progress', key + ' creation: provisioning ({})' .format(status), eol='\r') time.sleep(sleepTime)
Function waitPuppetCatalogToBeApplied Wait for puppet catalog to be applied @param key: The host name or ID @return RETURN: None
def get_resource(request): """Retrieve a file's data.""" hash = request.matchdict['hash'] # Do the file lookup with db_connect() as db_connection: with db_connection.cursor() as cursor: args = dict(hash=hash) cursor.execute(SQL['get-resource'], args) try: mimetype, file = cursor.fetchone() except TypeError: # None returned raise httpexceptions.HTTPNotFound() resp = request.response resp.status = "200 OK" resp.content_type = mimetype resp.body = file[:] return resp
Retrieve a file's data.
def watched_file_handler(name, logname, filename, mode='a', encoding=None, delay=False): """ A Bark logging handler logging output to a named file. If the file has changed since the last log message was written, it will be closed and reopened. Similar to logging.handlers.WatchedFileHandler. """ return wrap_log_handler(logging.handlers.WatchedFileHandler( filename, mode=mode, encoding=encoding, delay=delay))
A Bark logging handler logging output to a named file. If the file has changed since the last log message was written, it will be closed and reopened. Similar to logging.handlers.WatchedFileHandler.
def unregister(self, cleanup_mode): """Unregisters a machine previously registered with :py:func:`IVirtualBox.register_machine` and optionally do additional cleanup before the machine is unregistered. This method does not delete any files. It only changes the machine configuration and the list of registered machines in the VirtualBox object. To delete the files which belonged to the machine, including the XML file of the machine itself, call :py:func:`delete_config` , optionally with the array of IMedium objects which was returned from this method. How thoroughly this method cleans up the machine configuration before unregistering the machine depends on the @a cleanupMode argument. With "UnregisterOnly", the machine will only be unregistered, but no additional cleanup will be performed. The call will fail if the machine is in "Saved" state or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ). It is the responsibility of the caller to delete all such configuration in this mode. In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API which it replaces. With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved" state or if it has snapshots or media attached. All media attached to the current machine state or in snapshots will be detached. No medium objects will be returned; all of the machine's media will remain open. With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone", except that all the hard disk medium objects which were detached from the machine will be returned as an array. This allows for quickly passing them to the :py:func:`delete_config` API for closing and deletion. With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except that all media will be returned in the array, including removable media like DVDs and floppies. This might be useful if the user wants to inspect in detail which media were attached to the machine. Be careful when passing the media array to :py:func:`delete_config` in that case because users will typically want to preserve ISO and RAW image files. A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely deleted with all its saved states and hard disk images, but images for removable drives (such as ISO and RAW files) will remain on disk. This API does not verify whether the media files returned in the array are still attached to other machines (i.e. shared between several machines). If such a shared image is passed to :py:func:`delete_config` however, closing the image will fail there and the image will be silently skipped. This API may, however, move media from this machine's media registry to other media registries (see :py:class:`IMedium` for details on media registries). For machines created with VirtualBox 4.0 or later, if media from this machine's media registry are also attached to another machine (shared attachments), each such medium will be moved to another machine's registry. This is because without this machine's media registry, the other machine cannot find its media any more and would become inaccessible. This API implicitly calls :py:func:`save_settings` to save all current machine settings before unregistering it. It may also silently call :py:func:`save_settings` on other machines if media are moved to other machines' media registries. After successful method invocation, the :py:class:`IMachineRegisteredEvent` event is fired. The call will fail if the machine is currently locked (see :py:class:`ISession` ). If the given machine is inaccessible (see :py:func:`accessible` ), it will be unregistered and fully uninitialized right afterwards. As a result, the returned machine object will be unusable and an attempt to call **any** method will return the "Object not ready" error. in cleanup_mode of type :class:`CleanupMode` How to clean up after the machine has been unregistered. return media of type :class:`IMedium` List of media detached from the machine, depending on the @a cleanupMode parameter. raises :class:`VBoxErrorInvalidObjectState` Machine is currently locked for a session. """ if not isinstance(cleanup_mode, CleanupMode): raise TypeError("cleanup_mode can only be an instance of type CleanupMode") media = self._call("unregister", in_p=[cleanup_mode]) media = [IMedium(a) for a in media] return media
Unregisters a machine previously registered with :py:func:`IVirtualBox.register_machine` and optionally do additional cleanup before the machine is unregistered. This method does not delete any files. It only changes the machine configuration and the list of registered machines in the VirtualBox object. To delete the files which belonged to the machine, including the XML file of the machine itself, call :py:func:`delete_config` , optionally with the array of IMedium objects which was returned from this method. How thoroughly this method cleans up the machine configuration before unregistering the machine depends on the @a cleanupMode argument. With "UnregisterOnly", the machine will only be unregistered, but no additional cleanup will be performed. The call will fail if the machine is in "Saved" state or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ). It is the responsibility of the caller to delete all such configuration in this mode. In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API which it replaces. With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved" state or if it has snapshots or media attached. All media attached to the current machine state or in snapshots will be detached. No medium objects will be returned; all of the machine's media will remain open. With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone", except that all the hard disk medium objects which were detached from the machine will be returned as an array. This allows for quickly passing them to the :py:func:`delete_config` API for closing and deletion. With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except that all media will be returned in the array, including removable media like DVDs and floppies. This might be useful if the user wants to inspect in detail which media were attached to the machine. Be careful when passing the media array to :py:func:`delete_config` in that case because users will typically want to preserve ISO and RAW image files. A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely deleted with all its saved states and hard disk images, but images for removable drives (such as ISO and RAW files) will remain on disk. This API does not verify whether the media files returned in the array are still attached to other machines (i.e. shared between several machines). If such a shared image is passed to :py:func:`delete_config` however, closing the image will fail there and the image will be silently skipped. This API may, however, move media from this machine's media registry to other media registries (see :py:class:`IMedium` for details on media registries). For machines created with VirtualBox 4.0 or later, if media from this machine's media registry are also attached to another machine (shared attachments), each such medium will be moved to another machine's registry. This is because without this machine's media registry, the other machine cannot find its media any more and would become inaccessible. This API implicitly calls :py:func:`save_settings` to save all current machine settings before unregistering it. It may also silently call :py:func:`save_settings` on other machines if media are moved to other machines' media registries. After successful method invocation, the :py:class:`IMachineRegisteredEvent` event is fired. The call will fail if the machine is currently locked (see :py:class:`ISession` ). If the given machine is inaccessible (see :py:func:`accessible` ), it will be unregistered and fully uninitialized right afterwards. As a result, the returned machine object will be unusable and an attempt to call **any** method will return the "Object not ready" error. in cleanup_mode of type :class:`CleanupMode` How to clean up after the machine has been unregistered. return media of type :class:`IMedium` List of media detached from the machine, depending on the @a cleanupMode parameter. raises :class:`VBoxErrorInvalidObjectState` Machine is currently locked for a session.
def errcat(self): ''' List the posts to be modified. ''' post_recs = MPost.query_random(limit=1000) outrecs = [] errrecs = [] idx = 0 for postinfo in post_recs: if idx > 16: break cat = MPost2Catalog.get_first_category(postinfo.uid) if cat: if 'def_cat_uid' in postinfo.extinfo: if postinfo.extinfo['def_cat_uid'] == cat.tag_id: pass else: errrecs.append(postinfo) idx += 1 else: errrecs.append(postinfo) idx += 1 else: outrecs.append(postinfo) idx += 1 self.render('list/errcat.html', kwd={}, norecs=outrecs, errrecs=errrecs, userinfo=self.userinfo)
List the posts to be modified.
def draw_variable(loc, scale, shape, skewness, nsims): """ Draws random variables from this distribution Parameters ---------- loc : float location parameter for the distribution scale : float scale parameter for the distribution shape : float tail thickness parameter for the distribution skewness : float skewness parameter for the distribution nsims : int or list number of draws to take from the distribution Returns ---------- - Random draws from the distribution """ return ss.cauchy.rvs(loc, scale, nsims)
Draws random variables from this distribution Parameters ---------- loc : float location parameter for the distribution scale : float scale parameter for the distribution shape : float tail thickness parameter for the distribution skewness : float skewness parameter for the distribution nsims : int or list number of draws to take from the distribution Returns ---------- - Random draws from the distribution
def object_patch_rm_link(self, root, link, **kwargs): """Creates a new merkledag object based on an existing one. The new object will lack a link to the specified object. .. code-block:: python >>> c.object_patch_rm_link( ... 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k', ... 'Johnny' ... ) {'Hash': 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2'} Parameters ---------- root : str IPFS hash of the object to modify link : str name of the link to remove Returns ------- dict : Hash of new object """ args = ((root, link),) return self._client.request('/object/patch/rm-link', args, decoder='json', **kwargs)
Creates a new merkledag object based on an existing one. The new object will lack a link to the specified object. .. code-block:: python >>> c.object_patch_rm_link( ... 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k', ... 'Johnny' ... ) {'Hash': 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2'} Parameters ---------- root : str IPFS hash of the object to modify link : str name of the link to remove Returns ------- dict : Hash of new object
def twilight(self, direction=SUN_RISING, date=None, local=True, use_elevation=True): """Returns the start and end times of Twilight in the UTC timezone when the sun is traversing in the specified direction. This method defines twilight as being between the time when the sun is at -6 degrees and sunrise/sunset. :param direction: Determines whether the time is for the sun rising or setting. Use ``astral.SUN_RISING`` or ``astral.SUN_SETTING``. :type direction: int :param date: The date for which to calculate the times. :type date: :class:`datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :return: A tuple of the UTC date and time at which twilight starts and ends. :rtype: (:class:`~datetime.datetime`, :class:`~datetime.datetime`) """ if local and self.timezone is None: raise ValueError("Local time requested but Location has no timezone set.") if date is None: date = datetime.date.today() elevation = self.elevation if use_elevation else 0 start, end = self.astral.twilight_utc( direction, date, self.latitude, self.longitude, elevation ) if local: return start.astimezone(self.tz), end.astimezone(self.tz) else: return start, end
Returns the start and end times of Twilight in the UTC timezone when the sun is traversing in the specified direction. This method defines twilight as being between the time when the sun is at -6 degrees and sunrise/sunset. :param direction: Determines whether the time is for the sun rising or setting. Use ``astral.SUN_RISING`` or ``astral.SUN_SETTING``. :type direction: int :param date: The date for which to calculate the times. :type date: :class:`datetime.date` :param local: True = Time to be returned in location's time zone; False = Time to be returned in UTC. If not specified then the time will be returned in local time :type local: bool :param use_elevation: True = Return times that allow for the location's elevation; False = Return times that don't use elevation. If not specified then times will take elevation into account. :type use_elevation: bool :return: A tuple of the UTC date and time at which twilight starts and ends. :rtype: (:class:`~datetime.datetime`, :class:`~datetime.datetime`)
def hash_sha256(self): """Calculate sha256 fingerprint.""" fp_plain = hashlib.sha256(self._decoded_key).digest() return (b"SHA256:" + base64.b64encode(fp_plain).replace(b"=", b"")).decode("utf-8")
Calculate sha256 fingerprint.
def dumps(*args, **kwargs): """ Wrapper for json.dumps that uses the JSONArgonautsEncoder. """ import json from django.conf import settings from argonauts.serializers import JSONArgonautsEncoder kwargs.setdefault('cls', JSONArgonautsEncoder) # pretty print in DEBUG mode. if settings.DEBUG: kwargs.setdefault('indent', 4) kwargs.setdefault('separators', (',', ': ')) else: kwargs.setdefault('separators', (',', ':')) return json.dumps(*args, **kwargs)
Wrapper for json.dumps that uses the JSONArgonautsEncoder.
def _highlightBracket(self, bracket, qpart, block, columnIndex): """Highlight bracket and matching bracket Return tuple of QTextEdit.ExtraSelection's """ try: matchedBlock, matchedColumnIndex = self._findMatchingBracket(bracket, qpart, block, columnIndex) except _TimeoutException: # not found, time is over return[] # highlight nothing if matchedBlock is not None: self.currentMatchedBrackets = ((block, columnIndex), (matchedBlock, matchedColumnIndex)) return [self._makeMatchSelection(block, columnIndex, True), self._makeMatchSelection(matchedBlock, matchedColumnIndex, True)] else: self.currentMatchedBrackets = None return [self._makeMatchSelection(block, columnIndex, False)]
Highlight bracket and matching bracket Return tuple of QTextEdit.ExtraSelection's
def exclude_from(l, containing = [], equal_to = []): """Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob'] """ cont = lambda li: any(c in li for c in containing) eq = lambda li: any(e == li for e in equal_to) return [li for li in l if not (cont(li) or eq(li))]
Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob']
def do_gate(self, gate: Gate): """ Perform a gate. :return: ``self`` to support method chaining. """ gate_matrix, qubit_inds = _get_gate_tensor_and_qubits(gate=gate) # Note to developers: you can use either einsum- or tensordot- based functions. # tensordot seems a little faster, but feel free to experiment. # self.wf = targeted_einsum(gate=gate_matrix, wf=self.wf, wf_target_inds=qubit_inds) self.wf = targeted_tensordot(gate=gate_matrix, wf=self.wf, wf_target_inds=qubit_inds) return self
Perform a gate. :return: ``self`` to support method chaining.
def parent_org_sdo_ids(self): '''The SDO IDs of the compositions this RTC belongs to.''' return [sdo.get_owner()._narrow(SDOPackage.SDO).get_sdo_id() \ for sdo in self._obj.get_organizations() if sdo]
The SDO IDs of the compositions this RTC belongs to.
def star(self): """ Stars the project .. deprecated:: 0.8.5 Update Taiga and use like instead """ warnings.warn( "Deprecated! Update Taiga and use .like() instead", DeprecationWarning ) self.requester.post( '/{endpoint}/{id}/star', endpoint=self.endpoint, id=self.id ) return self
Stars the project .. deprecated:: 0.8.5 Update Taiga and use like instead
def _post_read_flds(flds, header): """Process flds to handle sphericity.""" if flds.shape[0] >= 3 and header['rcmb'] > 0: # spherical vector header['p_mesh'] = np.roll( np.arctan2(header['y_mesh'], header['x_mesh']), -1, 1) for ibk in range(header['ntb']): flds[..., ibk] = _to_spherical(flds[..., ibk], header) header['p_mesh'] = np.roll( np.arctan2(header['y_mesh'], -header['x_mesh']) + np.pi, -1, 1) return flds
Process flds to handle sphericity.
def shutdown(self): """ Shuts down this HazelcastClient. """ if self.lifecycle.is_live: self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTTING_DOWN) self.near_cache_manager.destroy_all_near_caches() self.statistics.shutdown() self.partition_service.shutdown() self.heartbeat.shutdown() self.cluster.shutdown() self.reactor.shutdown() self.lifecycle.fire_lifecycle_event(LIFECYCLE_STATE_SHUTDOWN) self.logger.info("Client shutdown.", extra=self._logger_extras)
Shuts down this HazelcastClient.
def wrap_args_with_process_isolation(self, args): ''' Wrap existing command line with bwrap to restrict access to: - self.process_isolation_path (generally, /tmp) (except for own /tmp files) ''' cwd = os.path.realpath(self.cwd) pi_temp_dir = self.build_process_isolation_temp_dir() new_args = [self.process_isolation_executable or 'bwrap', '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc'] for path in sorted(set(self.process_isolation_hide_paths or [])): if not os.path.exists(path): logger.debug('hide path not found: {0}'.format(path)) continue path = os.path.realpath(path) if os.path.isdir(path): new_path = tempfile.mkdtemp(dir=pi_temp_dir) os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) else: handle, new_path = tempfile.mkstemp(dir=pi_temp_dir) os.close(handle) os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR) new_args.extend(['--bind', '{0}'.format(new_path), '{0}'.format(path)]) if self.private_data_dir: show_paths = [self.private_data_dir] else: show_paths = [cwd] for path in sorted(set(self.process_isolation_ro_paths or [])): if not os.path.exists(path): logger.debug('read-only path not found: {0}'.format(path)) continue path = os.path.realpath(path) new_args.extend(['--ro-bind', '{0}'.format(path), '{0}'.format(path)]) show_paths.extend(self.process_isolation_show_paths or []) for path in sorted(set(show_paths)): if not os.path.exists(path): logger.debug('show path not found: {0}'.format(path)) continue path = os.path.realpath(path) new_args.extend(['--bind', '{0}'.format(path), '{0}'.format(path)]) if self.execution_mode == ExecutionMode.ANSIBLE_PLAYBOOK: # playbook runs should cwd to the SCM checkout dir if self.directory_isolation_path is not None: new_args.extend(['--chdir', os.path.realpath(self.directory_isolation_path)]) else: new_args.extend(['--chdir', self.project_dir]) elif self.execution_mode == ExecutionMode.ANSIBLE: # ad-hoc runs should cwd to the root of the private data dir new_args.extend(['--chdir', os.path.realpath(self.private_data_dir)]) new_args.extend(args) return new_args
Wrap existing command line with bwrap to restrict access to: - self.process_isolation_path (generally, /tmp) (except for own /tmp files)
def generate(self, src=None, identifier=None): """Generate static files for one source image.""" self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
Generate static files for one source image.
def add_tokens_for_group(self, with_pass=False): """Add the tokens for the group signature""" kls = self.groups.super_kls name = self.groups.kls_name # Reset indentation to beginning and add signature self.reset_indentation('') self.result.extend(self.tokens.make_describe(kls, name)) # Add pass if necessary if with_pass: self.add_tokens_for_pass() self.groups.finish_signature()
Add the tokens for the group signature
async def get_entity(self): """ Returns `entity` but will make an API call if necessary. """ if not self.entity and await self.get_input_entity(): try: self._entity =\ await self._client.get_entity(self._input_entity) except ValueError: pass return self._entity
Returns `entity` but will make an API call if necessary.
def save(self, *args, **kwargs): """Save animation into a movie file. [NOTE] If 'writer' is not specified, default writer defined in this module will be used to generate the movie file. [TODO] Implement docstring inheritance. """ writer = None if 'writer' in kwargs.keys(): writer = kwargs.pop('writer') else: writer = default_writer super().save(*args, **kwargs, writer=writer)
Save animation into a movie file. [NOTE] If 'writer' is not specified, default writer defined in this module will be used to generate the movie file. [TODO] Implement docstring inheritance.
def Draw(self, *args, **kwargs): """ Loop over subfiles, draw each, and sum the output into a single histogram. """ self.reset() output = None while self._rollover(): if output is None: # Make our own copy of the drawn histogram output = self._tree.Draw(*args, **kwargs) if output is not None: output = output.Clone() # Make it memory resident (histograms) if hasattr(output, 'SetDirectory'): output.SetDirectory(0) else: newoutput = self._tree.Draw(*args, **kwargs) if newoutput is not None: if isinstance(output, _GraphBase): output.Append(newoutput) else: # histogram output += newoutput return output
Loop over subfiles, draw each, and sum the output into a single histogram.
def _map_tril_1d_on_2d(indices, dims): """Map 1d indices on lower triangular matrix in 2d. """ N = (dims * dims - dims) / 2 m = np.ceil(np.sqrt(2 * N)) c = m - np.round(np.sqrt(2 * (N - indices))) - 1 r = np.mod(indices + (c + 1) * (c + 2) / 2 - 1, m) + 1 return np.array([r, c], dtype=np.int64)
Map 1d indices on lower triangular matrix in 2d.
def _set_ghost_ios(self, vm): """ Manages Ghost IOS support. :param vm: VM instance """ if not vm.mmap: raise DynamipsError("mmap support is required to enable ghost IOS support") if vm.platform == "c7200" and vm.npe == "npe-g2": log.warning("Ghost IOS is not supported for c7200 with NPE-G2") return ghost_file = vm.formatted_ghost_file() module_workdir = vm.project.module_working_directory(self.module_name.lower()) ghost_file_path = os.path.join(module_workdir, ghost_file) if ghost_file_path not in self._ghost_files: # create a new ghost IOS instance ghost_id = str(uuid4()) ghost = Router("ghost-" + ghost_file, ghost_id, vm.project, vm.manager, platform=vm.platform, hypervisor=vm.hypervisor, ghost_flag=True) try: yield from ghost.create() yield from ghost.set_image(vm.image) yield from ghost.set_ghost_status(1) yield from ghost.set_ghost_file(ghost_file_path) yield from ghost.set_ram(vm.ram) try: yield from ghost.start() yield from ghost.stop() self._ghost_files.add(ghost_file_path) except DynamipsError: raise finally: yield from ghost.clean_delete() except DynamipsError as e: log.warn("Could not create ghost instance: {}".format(e)) if vm.ghost_file != ghost_file and os.path.isfile(ghost_file_path): # set the ghost file to the router yield from vm.set_ghost_status(2) yield from vm.set_ghost_file(ghost_file_path)
Manages Ghost IOS support. :param vm: VM instance
def request_verification(self, user, identity): """ Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object """ return UserIdentityRequest(self).put(self.endpoint.request_verification, user, identity)
Sends the user a verification email with a link to verify ownership of the email address. :param user: User id or object :param identity: Identity id or object :return: requests Response object
def get_item(self, path, project=None, file_name=None, download=None, scope_path=None, recursion_level=None, version_descriptor=None, include_content=None): """GetItem. Get Item Metadata and/or Content for a single item. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content which is always returned as a download. :param str path: Version control path of an individual item to return. :param str project: Project ID or project name :param str file_name: file name of item returned. :param bool download: If true, create a downloadable attachment. :param str scope_path: Version control path of a folder to return multiple items. :param str recursion_level: None (just the item), or OneLevel (contents of a folder). :param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor: Version descriptor. Default is null. :param bool include_content: Set to true to include item content when requesting json. Default is false. :rtype: :class:`<TfvcItem> <azure.devops.v5_0.tfvc.models.TfvcItem>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if path is not None: query_parameters['path'] = self._serialize.query('path', path, 'str') if file_name is not None: query_parameters['fileName'] = self._serialize.query('file_name', file_name, 'str') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if version_descriptor is not None: if version_descriptor.version_option is not None: query_parameters['versionDescriptor.versionOption'] = version_descriptor.version_option if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if include_content is not None: query_parameters['includeContent'] = self._serialize.query('include_content', include_content, 'bool') response = self._send(http_method='GET', location_id='ba9fc436-9a38-4578-89d6-e4f3241f5040', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('TfvcItem', response)
GetItem. Get Item Metadata and/or Content for a single item. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content which is always returned as a download. :param str path: Version control path of an individual item to return. :param str project: Project ID or project name :param str file_name: file name of item returned. :param bool download: If true, create a downloadable attachment. :param str scope_path: Version control path of a folder to return multiple items. :param str recursion_level: None (just the item), or OneLevel (contents of a folder). :param :class:`<TfvcVersionDescriptor> <azure.devops.v5_0.tfvc.models.TfvcVersionDescriptor>` version_descriptor: Version descriptor. Default is null. :param bool include_content: Set to true to include item content when requesting json. Default is false. :rtype: :class:`<TfvcItem> <azure.devops.v5_0.tfvc.models.TfvcItem>`
def _update_geography(self, countries, regions, cities, city_country_mapping): """ Update database with new countries, regions and cities """ existing = { 'cities': list(City.objects.values_list('id', flat=True)), 'regions': list(Region.objects.values('name', 'country__code')), 'countries': Country.objects.values_list('code', flat=True) } for country_code in countries: if country_code not in existing['countries']: Country.objects.create(code=country_code, name=ISO_CODES.get(country_code, country_code)) for entry in regions: if entry not in existing['regions']: Region.objects.create(name=entry['name'], country_id=entry['country__code']) for entry in cities: if int(entry['id']) not in existing['cities']: code = city_country_mapping.get(entry['id']) if code: region = Region.objects.get(name=entry['region__name'], country__code=code) City.objects.create(id=entry['id'], name=entry['name'], region=region, latitude=entry.get('latitude'), longitude=entry.get('longitude'))
Update database with new countries, regions and cities
def thorium(opts, functions, runners): ''' Load the thorium runtime modules ''' pack = {'__salt__': functions, '__runner__': runners, '__context__': {}} ret = LazyLoader(_module_dirs(opts, 'thorium'), opts, tag='thorium', pack=pack) ret.pack['__thorium__'] = ret return ret
Load the thorium runtime modules
def accept(self): # type: () -> str """The content-type for the response to the client. Returns: (str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT environment variable. """ accept = self.headers.get('Accept') if not accept or accept == _content_types.ANY: return self._default_accept else: return accept
The content-type for the response to the client. Returns: (str): The value of the header 'Accept' or the user-supplied SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT environment variable.
def sample_indexes(segyfile, t0=0.0, dt_override=None): """ Creates a list of values representing the samples in a trace at depth or time. The list starts at *t0* and is incremented with am*dt* for the number of samples. If a *dt_override* is not provided it will try to find a *dt* in the file. Parameters ---------- segyfile : segyio.SegyFile t0 : float initial sample, or delay-recording-time dt_override : float or None Returns ------- samples : array_like of float Notes ----- .. versionadded:: 1.1 """ if dt_override is None: dt_override = dt(segyfile) return [t0 + t * dt_override for t in range(len(segyfile.samples))]
Creates a list of values representing the samples in a trace at depth or time. The list starts at *t0* and is incremented with am*dt* for the number of samples. If a *dt_override* is not provided it will try to find a *dt* in the file. Parameters ---------- segyfile : segyio.SegyFile t0 : float initial sample, or delay-recording-time dt_override : float or None Returns ------- samples : array_like of float Notes ----- .. versionadded:: 1.1
def distributive(self): """ Return a term where the leading AND or OR terms are switched. This is done by applying the distributive laws: A & (B|C) = (A&B) | (A&C) A | (B&C) = (A|B) & (A|C) """ dual = self.dual args = list(self.args) for i, arg in enumerate(args): if isinstance(arg, dual): args[i] = arg.args else: args[i] = (arg,) prod = itertools.product(*args) args = tuple(self.__class__(*arg).simplify() for arg in prod) if len(args) == 1: return args[0] else: return dual(*args)
Return a term where the leading AND or OR terms are switched. This is done by applying the distributive laws: A & (B|C) = (A&B) | (A&C) A | (B&C) = (A|B) & (A|C)
def get_location(self, location_id: int, timeout: int=None): """Get a location information Parameters ---------- location_id: int A location ID See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json for a list of acceptable location IDs timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.LOCATIONS + '/' + str(location_id) return self._get_model(url, timeout=timeout)
Get a location information Parameters ---------- location_id: int A location ID See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json for a list of acceptable location IDs timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def exec_command( client, container, command, interactive=True, stdout=None, stderr=None, stdin=None): """ Run provided command via exec API in provided container. This is just a wrapper for PseudoTerminal(client, container).exec_command() """ exec_id = exec_create(client, container, command, interactive=interactive) operation = ExecOperation(client, exec_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) PseudoTerminal(client, operation).start()
Run provided command via exec API in provided container. This is just a wrapper for PseudoTerminal(client, container).exec_command()
def inis2dict(ini_paths: Union[str, Sequence[str]]) -> dict: """ Take one or more ini files and return a dict with configuration from all, interpolating bash-style variables ${VAR} or ${VAR:-DEFAULT}. :param ini_paths: path or paths to .ini files """ var_dflt = r'\${(.*?):-(.*?)}' def _interpolate(content): rv = expandvars(content) while True: match = re.search(var_dflt, rv) if match is None: break bash_var = '${{{}}}'.format(match.group(1)) value = expandvars(bash_var) rv = re.sub(var_dflt, match.group(2) if value == bash_var else value, rv, count=1) return rv parser = ConfigParser() for ini in [ini_paths] if isinstance(ini_paths, str) else ini_paths: if not isfile(ini): raise FileNotFoundError('No such file: {}'.format(ini)) with open(ini, 'r') as ini_fh: ini_text = _interpolate(ini_fh.read()) parser.read_string(ini_text) return {s: dict(parser[s].items()) for s in parser.sections()}
Take one or more ini files and return a dict with configuration from all, interpolating bash-style variables ${VAR} or ${VAR:-DEFAULT}. :param ini_paths: path or paths to .ini files
def get_terminal_size(): '''Finds the width of the terminal, or returns a suitable default value.''' def read_terminal_size_by_ioctl(fd): try: import struct, fcntl, termios cr = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '0000')) except ImportError: return None except IOError as e: return None return cr[1], cr[0] cr = read_terminal_size_by_ioctl(0) or \ read_terminal_size_by_ioctl(1) or \ read_terminal_size_by_ioctl(2) if not cr: try: import os fd = os.open(os.ctermid(), os.O_RDONLY) cr = read_terminal_size_by_ioctl(fd) os.close(fd) except: pass if not cr: import os cr = [80, 25] # 25 rows, 80 columns is the default value if os.getenv('ROWS'): cr[1] = int(os.getenv('ROWS')) if os.getenv('COLUMNS'): cr[0] = int(os.getenv('COLUMNS')) return cr[1], cr[0]
Finds the width of the terminal, or returns a suitable default value.
def authenticate(self, email=None, password=None): """ Attempt to authenticate the user. Parameters ---------- email : string The email of a user on Lending Club password : string The user's password, for authentication. Returns ------- boolean True if the user authenticated or raises an exception if not Raises ------ session.AuthenticationError If authentication failed session.NetworkError If a network error occurred """ if self.session.authenticate(email, password): return True
Attempt to authenticate the user. Parameters ---------- email : string The email of a user on Lending Club password : string The user's password, for authentication. Returns ------- boolean True if the user authenticated or raises an exception if not Raises ------ session.AuthenticationError If authentication failed session.NetworkError If a network error occurred
def input_from_history(a, n, bias=False): """ This is function for creation of input matrix. **Args:** * `a` : series (1 dimensional array) * `n` : size of input matrix row (int). It means how many samples \ of previous history you want to use \ as the filter input. It also represents the filter length. **Kwargs:** * `bias` : decides if the bias is used (Boolean). If True, \ array of all ones is appended as a last column to matrix `x`. \ So matrix `x` has `n`+1 columns. **Returns:** * `x` : input matrix (2 dimensional array) \ constructed from an array `a`. The length of `x` \ is calculated as length of `a` - `n` + 1. \ If the `bias` is used, then the amount of columns is `n` if not then \ amount of columns is `n`+1). """ if not type(n) == int: raise ValueError('The argument n must be int.') if not n > 0: raise ValueError('The argument n must be greater than 0') try: a = np.array(a, dtype="float64") except: raise ValueError('The argument a is not numpy array or similar.') x = np.array([a[i:i+n] for i in range(len(a)-n+1)]) if bias: x = np.vstack((x.T, np.ones(len(x)))).T return x
This is function for creation of input matrix. **Args:** * `a` : series (1 dimensional array) * `n` : size of input matrix row (int). It means how many samples \ of previous history you want to use \ as the filter input. It also represents the filter length. **Kwargs:** * `bias` : decides if the bias is used (Boolean). If True, \ array of all ones is appended as a last column to matrix `x`. \ So matrix `x` has `n`+1 columns. **Returns:** * `x` : input matrix (2 dimensional array) \ constructed from an array `a`. The length of `x` \ is calculated as length of `a` - `n` + 1. \ If the `bias` is used, then the amount of columns is `n` if not then \ amount of columns is `n`+1).
def template_scheduler_yaml(cl_args, masters): ''' Template scheduler.yaml ''' single_master = masters[0] scheduler_config_actual = "%s/standalone/scheduler.yaml" % cl_args["config_path"] scheduler_config_template = "%s/standalone/templates/scheduler.template.yaml" \ % cl_args["config_path"] template_file(scheduler_config_template, scheduler_config_actual, {"<scheduler_uri>": "http://%s:4646" % single_master})
Template scheduler.yaml
def _addConfig(instance, config, parent_section): """ Writes a section for a plugin. Args: instance (object): Class instance for plugin config (object): Object (ConfigParser) which the current config parent_section (str): Parent section for plugin. Usually 'checkers' or 'reports' """ try: section_name = "{p}/{n}".format(p = parent_section, n=instance.NAME.lower()) config.add_section(section_name) for k in instance.CONFIG.keys(): config.set(section_name, k, instance.CONFIG[k]) except Exception as e: print "[!] %s" % e
Writes a section for a plugin. Args: instance (object): Class instance for plugin config (object): Object (ConfigParser) which the current config parent_section (str): Parent section for plugin. Usually 'checkers' or 'reports'
def process_line(self, idx, line): """处理每行""" if '///' in line: # 注释 py_line = '#' + line[3:] # /// [T去掉ftdc前的内容,方便后面比对]FtdcInvestorRangeType是一个投资者范围类型 ==> # /// <summary> # /// 投资者范围类型 # /// </summary>""" if py_line.find('是一个') > 0: self.enum_comment[py_line[py_line.find('Ftdc'):py_line.find('是一个')]] = '/// <summary>\n/// %s\n///</summary>' % py_line[py_line.find('是一个') + 3:-1] else: self.tmp_comment = '/// <summary>\n\t/// {0}\n\t///</summary>\n\t'.format(line[3:-1]) # -1去掉尾换行 elif '#define' in line: # 定义常量 # define THOST_FTDC_IR_All '1' ==> defineDict["THOST_FTDC_IR_All"] = '1' content = line.split(' ') constant = content[1] if len(content) > 2: value = content[-1][:-1] # value带行尾的\n py_line = 'defineDict["%s"] = %s\n' % (constant, value) else: py_line = '' # enum relate define if py_line: # 命名保持一致,不再精简 if len(value) > 3: # 处理理'x' x长度>1的情况,如102001 self.define.append("{2}{0} = {1},".format(constant, value[1:-1], self.tmp_comment)) else: self.define.append("{2}{0} = (byte){1},".format(constant, value, self.tmp_comment)) elif 'typedef' in line: # 类型申明 # typedef char TThostFtdcInvestorRangeType; ==> typedefDict["TThostFtdcInvestorRangeType"] = "c_char" py_line = self.process_typedef(line) # public enum TThostFtdcInvestorRangeType : byte # { # /// <summary> # ///所有 # /// </summary> # THOST_FTDC_IR_All = (byte)'1', # /// <summary> # ///投资者组 # /// </summary> # THOST_FTDC_IR_Group = (byte)'2', # /// <summary> # ///单一投资者 # /// </summary> # THOST_FTDC_IR_Single = (byte)'3' # } if line.find(' char ') > 0 and line.find('[') < 0: key = line.split(' ')[2][6:-2] # TThostFtdcInvestorRangeType=>FtdcInvestorRangeType(去掉首个T) enum_line = self.enum_comment[key] enum_line += '\npublic enum TThost%s : byte\n{\n' % key for l in self.define: enum_line += '\t%s\n' % l enum_line += '}\n\n' # 处理形如 102001此类值 if enum_line.find("(byte)") < 0: enum_line = enum_line.replace(': byte', ': int') self.fenum.write(enum_line) self.define.clear() elif line == '\n': # 空行 py_line = line else: py_line = '' return py_line
处理每行
def _cast_field(self, cast_to, value): """ Convert field type from raw bytes to native python type :param cast_to: native python type to cast to :type cast_to: a type object (one of bytes, int, unicode (str for py3k)) :param value: raw value from the database :type value: bytes :return: converted value :rtype: value of native python type (one of bytes, int, unicode (str for py3k)) """ if cast_to in (int, long, str): return cast_to(value) elif cast_to == unicode: try: value = value.decode(self.charset, self.errors) except UnicodeEncodeError, e: raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e)) return value elif cast_to in (any, bytes): return value else: raise TypeError("Invalid field type %s" % (cast_to))
Convert field type from raw bytes to native python type :param cast_to: native python type to cast to :type cast_to: a type object (one of bytes, int, unicode (str for py3k)) :param value: raw value from the database :type value: bytes :return: converted value :rtype: value of native python type (one of bytes, int, unicode (str for py3k))
def _periodically_flush_profile_events(self): """Drivers run this as a thread to flush profile data in the background.""" # Note(rkn): This is run on a background thread in the driver. It uses # the raylet client. This should be ok because it doesn't read # from the raylet client and we have the GIL here. However, # if either of those things changes, then we could run into issues. while True: # Sleep for 1 second. This will be interrupted if # self.threads_stopped is set. self.threads_stopped.wait(timeout=1) # Exit if we received a signal that we should stop. if self.threads_stopped.is_set(): return self.flush_profile_data()
Drivers run this as a thread to flush profile data in the background.
def _add_months(p_sourcedate, p_months): """ Adds a number of months to the source date. Takes into account shorter months and leap years and such. https://stackoverflow.com/questions/4130922/how-to-increment-datetime-month-in-python """ month = p_sourcedate.month - 1 + p_months year = p_sourcedate.year + month // 12 month = month % 12 + 1 day = min(p_sourcedate.day, calendar.monthrange(year, month)[1]) return date(year, month, day)
Adds a number of months to the source date. Takes into account shorter months and leap years and such. https://stackoverflow.com/questions/4130922/how-to-increment-datetime-month-in-python