code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def throw(self, type, value=None, traceback=None): return self.__wrapped__.throw(type, value, traceback)
Raise an exception in this element
def _resolve_value(self, name): name = str(name) if name in self._metadata._meta.elements: element = self._metadata._meta.elements[name] if element.editable: value = getattr(self, name) if value: return value populate_from = element.populate_from if isinstance(populate_from, collections.Callable): return populate_from(self, **self._populate_from_kwargs()) elif isinstance(populate_from, Literal): return populate_from.value elif populate_from is not NotSet: return self._resolve_value(populate_from) try: value = getattr(self._metadata, name) except AttributeError: pass else: if isinstance(value, collections.Callable): if getattr(value, '__self__', None): return value(self) else: return value(self._metadata, obj=self) return value
Returns an appropriate value for the given name.
def print(self, output_file=sys.stdout, log=False): for soln in iter(self): print(soln, file=output_file) print(SOLN_SEP, file=output_file) if self.status == 0: print(SEARCH_COMPLETE, file=output_file) if (self.status == 1 and self._n_solns == 0) or self.status >= 2: print({ Status.INCOMPLETE : ERROR, Status.UNKNOWN: UNKNOWN, Status.UNSATISFIABLE: UNSATISFIABLE, Status.UNBOUNDED: UNBOUNDED, Status.UNSATorUNBOUNDED: UNSATorUNBOUNDED, Status.ERROR: ERROR }[self.status], file=output_file) if self.stderr: print(self.stderr.strip(), file=sys.stderr) elif log: print(str(self.log), file=output_file)
Print the solution stream
def print_error(msg, color=True): if color and is_posix(): safe_print(u"%s[ERRO] %s%s" % (ANSI_ERROR, msg, ANSI_END)) else: safe_print(u"[ERRO] %s" % (msg))
Print an error message. :param string msg: the message :param bool color: if ``True``, print with POSIX color
def Printer(open_file=sys.stdout, closing=False): try: while True: logstr = (yield) open_file.write(logstr) open_file.write('\n') except GeneratorExit: if closing: try: open_file.close() except: pass
Prints items with a timestamp.
def encrypt(self,plaintext,n=''): self.ed = 'e' if self.mode == MODE_XTS: return self.chain.update(plaintext,'e',n) else: return self.chain.update(plaintext,'e')
Encrypt some plaintext plaintext = a string of binary data n = the 'tweak' value when the chaining mode is XTS The encrypt function will encrypt the supplied plaintext. The behavior varies slightly depending on the chaining mode. ECB, CBC: --------- When the supplied plaintext is not a multiple of the blocksize of the cipher, then the remaining plaintext will be cached. The next time the encrypt function is called with some plaintext, the new plaintext will be concatenated to the cache and then cache+plaintext will be encrypted. CFB, OFB, CTR: -------------- When the chaining mode allows the cipher to act as a stream cipher, the encrypt function will always encrypt all of the supplied plaintext immediately. No cache will be kept. XTS: ---- Because the handling of the last two blocks is linked, it needs the whole block of plaintext to be supplied at once. Every encrypt function called on a XTS cipher will output an encrypted block based on the current supplied plaintext block. CMAC: ----- Everytime the function is called, the hash from the input data is calculated. No finalizing needed. The hashlength is equal to block size of the used block cipher.
def get_peers_public_keys(self): with self._lock: return [key for key in (self._network.connection_id_to_public_key(peer) for peer in copy.copy(self._peers)) if key is not None]
Returns the list of public keys for all peers.
def allow_inbound_connection(self): LOGGER.debug("Determining whether inbound connection should " "be allowed. num connections: %s max %s", len(self._connections), self._max_incoming_connections) return self._max_incoming_connections >= len(self._connections)
Determines if an additional incoming network connection should be permitted. Returns: bool
def clear(self): super(NGram, self).clear() self._grams = {} self.length = {}
Remove all elements from this set. >>> from ngram import NGram >>> n = NGram(['spam', 'eggs']) >>> sorted(list(n)) ['eggs', 'spam'] >>> n.clear() >>> list(n) []
def _get_msiexec(use_msiexec): if use_msiexec is False: return False, '' if isinstance(use_msiexec, six.string_types): if os.path.isfile(use_msiexec): return True, use_msiexec else: log.warning( "msiexec path '%s' not found. Using system registered " "msiexec instead", use_msiexec ) use_msiexec = True if use_msiexec is True: return True, 'msiexec'
Return if msiexec.exe will be used and the command to invoke it.
def _assert_category(self, category): category = category.lower() valid_categories = ['cable', 'broadcast', 'final', 'tv'] assert_msg = "%s is not a valid category." % (category) assert (category in valid_categories), assert_msg
Validate category argument
def download_preview(self, image, url_field='url'): return self.download(image, url_field=url_field, suffix='preview')
Downlaod the binary data of an image attachment at preview size. :param str url_field: the field of the image with the right URL :return: binary image data :rtype: bytes
def measures(self): from ambry.valuetype.core import ROLE return [c for c in self.columns if c.role == ROLE.MEASURE]
Iterate over all measures
def connect(provider_id): provider = get_provider_or_404(provider_id) callback_url = get_authorize_callback('connect', provider_id) allow_view = get_url(config_value('CONNECT_ALLOW_VIEW')) pc = request.form.get('next', allow_view) session[config_value('POST_OAUTH_CONNECT_SESSION_KEY')] = pc return provider.authorize(callback_url)
Starts the provider connection OAuth flow
def fit_to_structure(self, structure, symprec=0.1): sga = SpacegroupAnalyzer(structure, symprec) symm_ops = sga.get_symmetry_operations(cartesian=True) return sum([self.transform(symm_op) for symm_op in symm_ops]) / len(symm_ops)
Returns a tensor that is invariant with respect to symmetry operations corresponding to a structure Args: structure (Structure): structure from which to generate symmetry operations symprec (float): symmetry tolerance for the Spacegroup Analyzer used to generate the symmetry operations
def get(self, key, default=None, index=-1, type=None): try: val = self.dict[key][index] return type(val) if type else val except Exception, e: pass return default
Return the most recent value for a key. :param default: The default value to be returned if the key is not present or the type conversion fails. :param index: An index for the list of available values. :param type: If defined, this callable is used to cast the value into a specific type. Exception are suppressed and result in the default value to be returned.
def populate_obj(self, obj=None, form=None): if not form: form = current_context.data.form if obj is None: obj = AttrDict() form.populate_obj(obj) return obj
Populates an object with the form's data
def step_interpolation(x, xp, fp, **kwargs): del kwargs xp = np.expand_dims(xp, -1) lower, upper = xp[:-1], xp[1:] conditions = (x >= lower) & (x < upper) conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]]) values = np.concatenate([[fp[0]], fp]) assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.' indices = np.argmax(conditions, 0) return values[indices].astype(np.float32)
Multi-dimensional step interpolation. Returns the multi-dimensional step interpolant to a function with given discrete data points (xp, fp), evaluated at x. Note that *N and *M indicate zero or more dimensions. Args: x: An array of shape [*N], the x-coordinates of the interpolated values. xp: An np.array of shape [D], the x-coordinates of the data points, must be increasing. fp: An np.array of shape [D, *M], the y-coordinates of the data points. **kwargs: Unused. Returns: An array of shape [*N, *M], the interpolated values.
def create_thumbnail(self, image, geometry, upscale=True, crop=None, colorspace='RGB'): image = self.colorspace(image, colorspace) image = self.scale(image, geometry, upscale, crop) image = self.crop(image, geometry, crop) return image
This serves as a really basic example of a thumbnailing method. You may want to implement your own logic, but this will work for simple cases. :param Image image: This is your engine's ``Image`` object. For PIL it's PIL.Image. :param tuple geometry: Geometry of the image in the format of (x,y). :keyword str crop: A cropping offset string. This is either one or two space-separated values. If only one value is specified, the cropping amount (pixels or percentage) for both X and Y dimensions is the amount given. If two values are specified, X and Y dimension cropping may be set independently. Some examples: '50% 50%', '50px 20px', '50%', '50px'. :keyword str colorspace: The colorspace to set/convert the image to. This is typically 'RGB' or 'GRAY'. :returns: The thumbnailed image. The returned type depends on your choice of Engine.
def selectAll( self ): currLayer = self._currentLayer for item in self.items(): layer = item.layer() if ( layer == currLayer or not layer ): item.setSelected(True)
Selects all the items in the scene.
def FillDeviceCapabilities(device, descriptor): preparsed_data = PHIDP_PREPARSED_DATA(0) ret = hid.HidD_GetPreparsedData(device, ctypes.byref(preparsed_data)) if not ret: raise ctypes.WinError() try: caps = HidCapabilities() ret = hid.HidP_GetCaps(preparsed_data, ctypes.byref(caps)) if ret != HIDP_STATUS_SUCCESS: raise ctypes.WinError() descriptor.usage = caps.Usage descriptor.usage_page = caps.UsagePage descriptor.internal_max_in_report_len = caps.InputReportByteLength descriptor.internal_max_out_report_len = caps.OutputReportByteLength finally: hid.HidD_FreePreparsedData(preparsed_data)
Fill out device capabilities. Fills the HidCapabilitites of the device into descriptor. Args: device: A handle to the open device descriptor: DeviceDescriptor to populate with the capabilities Returns: none Raises: WindowsError when unable to obtain capabilitites.
def what(self): originalFromDt = dt.datetime.combine(self.except_date, timeFrom(self.overrides.time_from)) changedFromDt = dt.datetime.combine(self.date, timeFrom(self.time_from)) originalDaysDelta = dt.timedelta(days=self.overrides.num_days - 1) originalToDt = getAwareDatetime(self.except_date + originalDaysDelta, self.overrides.time_to, self.tz) changedDaysDelta = dt.timedelta(days=self.num_days - 1) changedToDt = getAwareDatetime(self.except_date + changedDaysDelta, self.time_to, self.tz) if originalFromDt < changedFromDt: return _("Postponed") elif originalFromDt > changedFromDt or originalToDt != changedToDt: return _("Rescheduled") else: return None
May return a 'postponed' or 'rescheduled' string depending what the start and finish time of the event has been changed to.
def _filter_child_model_fields(cls, fields): indexes_to_remove = set([]) for index1, field1 in enumerate(fields): for index2, field2 in enumerate(fields): if index1 < index2 and index1 not in indexes_to_remove and\ index2 not in indexes_to_remove: if issubclass(field1.related_model, field2.related_model): indexes_to_remove.add(index1) if issubclass(field2.related_model, field1.related_model): indexes_to_remove.add(index2) fields = [field for index, field in enumerate(fields) if index not in indexes_to_remove] return fields
Keep only related model fields. Example: Inherited models: A -> B -> C B has one-to-many relationship to BMany. after inspection BMany would have links to B and C. Keep only B. Parent model A could not be used (It would not be in fields) :param list fields: model fields. :return list fields: filtered fields.
def dbmin05years(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmin05years`'.format(value)) self._dbmin05years = value
Corresponds to IDD Field `dbmin05years` 5-year return period values for minimum extreme dry-bulb temperature Args: value (float): value for IDD Field `dbmin05years` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
def get_downstream_causal_subgraph(graph, nbunch: Union[BaseEntity, Iterable[BaseEntity]]): return get_subgraph_by_edge_filter(graph, build_downstream_edge_predicate(nbunch))
Induce a sub-graph from all of the downstream causal entities of the nodes in the nbunch. :type graph: pybel.BELGraph :rtype: pybel.BELGraph
async def show_help(self): e = discord.Embed() messages = ['Welcome to the interactive paginator!\n'] messages.append('This interactively allows you to see pages of text by navigating with ' \ 'reactions. They are as follows:\n') for (emoji, func) in self.reaction_emojis: messages.append('%s %s' % (emoji, func.__doc__)) e.description = '\n'.join(messages) e.colour = 0x738bd7 e.set_footer(text='We were on page %s before this message.' % self.current_page) await self.bot.edit_message(self.message, embed=e) async def go_back_to_current_page(): await asyncio.sleep(60.0) await self.show_current_page() self.bot.loop.create_task(go_back_to_current_page())
shows this message
def languages(self): language = PageView.headers['Accept-Language'] first_language = fn.SubStr( language, 1, fn.StrPos(language, ';')) return (self.get_query() .select(first_language, fn.Count(PageView.id)) .group_by(first_language) .order_by(fn.Count(PageView.id).desc()) .tuples())
Retrieve languages, sorted by most common to least common. The Accept-Languages header sometimes looks weird, i.e. "en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi- colon.
def graphql_impl( schema, source, root_value, context_value, variable_values, operation_name, field_resolver, type_resolver, middleware, execution_context_class, ) -> AwaitableOrValue[ExecutionResult]: schema_validation_errors = validate_schema(schema) if schema_validation_errors: return ExecutionResult(data=None, errors=schema_validation_errors) try: document = parse(source) except GraphQLError as error: return ExecutionResult(data=None, errors=[error]) except Exception as error: error = GraphQLError(str(error), original_error=error) return ExecutionResult(data=None, errors=[error]) from .validation import validate validation_errors = validate(schema, document) if validation_errors: return ExecutionResult(data=None, errors=validation_errors) return execute( schema, document, root_value, context_value, variable_values, operation_name, field_resolver, type_resolver, middleware, execution_context_class, )
Execute a query, return asynchronously only if necessary.
def new(cls, mode, size, color=0, depth=8, **kwargs): header = cls._make_header(mode, size, depth) image_data = ImageData.new(header, color=color, **kwargs) return cls(PSD( header=header, image_data=image_data, image_resources=ImageResources.new(), ))
Create a new PSD document. :param mode: The color mode to use for the new image. :param size: A tuple containing (width, height) in pixels. :param color: What color to use for the image. Default is black. :return: A :py:class:`~psd_tools.api.psd_image.PSDImage` object.
def get_token(self, request): return stripe.Token.create( card={ "number": request.data["number"], "exp_month": request.data["exp_month"], "exp_year": request.data["exp_year"], "cvc": request.data["cvc"] } )
Create a stripe token for a card
def delete(self, **kwargs): if self.is_valid: if '_id' in self._document: to_delete = self.find_one({'_id': self._id}) if to_delete: before = self.before_delete() if before: return before try: self.delete_one({'_id': self._id}) self.after_delete() return self._document except PyMongoException as exc: return PyMongoError( error_message=exc.details.get( 'errmsg', exc.details.get( 'err', 'PyMongoError.' ) ), operation='delete', collection=type(self).__name__, document=self._document, ) else: return DocumentNotFoundError(type(self).__name__, self._id) else: return UnidentifiedDocumentError( type(self).__name__, self._document )
Deletes the document if it is saved in the collection.
def extract_lrzip (archive, compression, cmd, verbosity, interactive, outdir): cmdlist = [cmd, '-d'] if verbosity > 1: cmdlist.append('-v') outfile = util.get_single_outfile(outdir, archive) cmdlist.extend(["-o", outfile, os.path.abspath(archive)]) return cmdlist
Extract a LRZIP archive.
def cmd_logcat(self, *args): self.check_requirements() serial = self.serials[0:] if not serial: return filters = self.buildozer.config.getrawdefault( "app", "android.logcat_filters", "", section_sep=":", split_char=" ") filters = " ".join(filters) self.buildozer.environ['ANDROID_SERIAL'] = serial[0] self.buildozer.cmd('{adb} logcat {filters}'.format(adb=self.adb_cmd, filters=filters), cwd=self.buildozer.global_platform_dir, show_output=True) self.buildozer.environ.pop('ANDROID_SERIAL', None)
Show the log from the device
def do_load(self, arg): from os import path import json fullpath = path.expanduser(arg) if path.isfile(fullpath): with open(fullpath) as f: data = json.load(f) for stagepath in data["tests"]: self.do_parse(stagepath) self.args = data["args"]
Loads a saved session variables, settings and test results to the shell.
def _to_pandas(ob): if isinstance(ob, (pd.Series, pd.DataFrame)): return ob if ob.ndim == 1: return pd.Series(ob) elif ob.ndim == 2: return pd.DataFrame(ob) else: raise ValueError( 'cannot convert array of dim > 2 to a pandas structure', )
Convert an array-like to a pandas object. Parameters ---------- ob : array-like The object to convert. Returns ------- pandas_structure : pd.Series or pd.DataFrame The correct structure based on the dimensionality of the data.
def _url(self, uri): prefix = "{}://{}{}".format( self._protocol, self.real_connection.host, self._port_postfix(), ) return uri.replace(prefix, '', 1)
Returns request selector url from absolute URI
def parse_xml(self, node): self._set_properties(node) self.extend(TiledObject(self.parent, child) for child in node.findall('object')) return self
Parse an Object Group from ElementTree xml node :param node: ElementTree xml node :return: self
def put(self, name_to_val): self._check_open() for name, val in name_to_val.iteritems(): try: self.client.PutFullMatrix(name, 'base', val, None) except: self.client.PutWorkspaceData(name, 'base', val)
Loads a dictionary of variable names into the matlab com client.
def require_condition(cls, expr, message, *format_args, **format_kwds): if not expr: raise cls(message, *format_args, **format_kwds)
used to assert a certain state. If the expression renders a false value, an exception will be raised with the supplied message :param: message: The failure message to attach to the raised Buzz :param: expr: A boolean value indicating an evaluated expression :param: format_args: Format arguments. Follows str.format convention :param: format_kwds: Format keyword args. Follows str.format convetion
def append_note(self, note, root, scale=0): root_val = note_to_val(root) note_val = note_to_val(note) - root_val + scale * 12 if note_val not in self.components: self.components.append(note_val) self.components.sort()
Append a note to quality :param str note: note to append on quality :param str root: root note of chord :param int scale: key scale
def delete_all_banks(self): for file in glob(str(self.data_path) + "/*.json"): Persistence.delete(file)
Delete all banks files. Util for manual save, because isn't possible know which banks were removed
def set_led(self, led_id, color): if not set_leds_color(self.corsair_sdk, LedColor(led_id, *color)): self._raise_corsair_error() return True
Set color of an led :param led_id: id of led to set color :type led_id: int :param color: list of rgb values of new colors. eg. [255, 255, 255] :type color: list :returns: true if successful :rtype: bool
def create_307_response(self): request = get_current_request() msg_mb = UserMessageMember(self.message) coll = request.root['_messages'] coll.add(msg_mb) qs = self.__get_new_query_string(request.query_string, self.message.slug) resubmit_url = "%s?%s" % (request.path_url, qs) headers = [('Warning', '299 %s' % self.message.text), ] http_exc = HttpWarningResubmit(location=resubmit_url, detail=self.message.text, headers=headers) return request.get_response(http_exc)
Creates a 307 "Temporary Redirect" response including a HTTP Warning header with code 299 that contains the user message received during processing the request.
def get_sorted_pointlist(self): pointlist = self.get_pointlist() for i in range(len(pointlist)): pointlist[i] = sorted(pointlist[i], key=lambda p: p['time']) pointlist = sorted(pointlist, key=lambda stroke: stroke[0]['time']) return pointlist
Make sure that the points and strokes are in order. Returns ------- list A list of all strokes in the recording. Each stroke is represented as a list of dicts {'time': 123, 'x': 45, 'y': 67}
def get_meta_clusters(self, clusters): meta_clusters = collections.defaultdict(list) for cluster in clusters: if not cluster.meta_cluster: continue meta_clusters[cluster.meta_cluster].append(cluster) unconfigured_meta_clusters = [ name for name in meta_clusters.keys() if name not in self.meta_clusters ] for name in unconfigured_meta_clusters: logger.error("Meta cluster %s not configured!") del meta_clusters[name] return meta_clusters
Returns a dictionary keyed off of meta cluster names, where the values are lists of clusters associated with the meta cluster name. If a meta cluster name doesn't have a port defined in the `meta_cluster_ports` attribute an error is given and the meta cluster is removed from the mapping.
def listDatasetParents(self, dataset=""): if( dataset == "" ): dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/listDatasetParents. Child Dataset name is required.") conn = self.dbi.connection() try: result = self.datasetparentlist.execute(conn, dataset) return result finally: if conn: conn.close()
takes required dataset parameter returns only parent dataset name
def get_group_headers(self, table_name, group_name): df = self.dm[table_name] cond = df['group'] == group_name return df[cond].index
Return a list of all headers for a given group
def deep_merge(*dicts): result = {} for d in dicts: if not isinstance(d, dict): raise Exception('Can only deep_merge dicts, got {}'.format(d)) for k, v in d.items(): if isinstance(v, dict): v = deep_merge(result.get(k, {}), v) result[k] = v return result
Recursively merge all input dicts into a single dict.
def normalize(x:TensorImage, mean:FloatTensor,std:FloatTensor)->TensorImage: "Normalize `x` with `mean` and `std`." return (x-mean[...,None,None]) / std[...,None,None]
Normalize `x` with `mean` and `std`.
def dateint_to_weekday(dateint, first_day='Monday'): weekday_ix = dateint_to_datetime(dateint).weekday() return (weekday_ix - WEEKDAYS.index(first_day)) % 7
Returns the weekday of the given dateint. Arguments --------- dateint : int An integer object decipting a specific calendaric day; e.g. 20161225. first_day : str, default 'Monday' The first day of the week. Returns ------- int The weekday of the given dateint, when first day of the week = 0, last day of the week = 6. Example ------- >>> dateint_to_weekday(20170213) 0 >>> dateint_to_weekday(20170212) 6 >>> dateint_to_weekday(20170214) 1 >>> dateint_to_weekday(20170212, 'Sunday) 0 >>> dateint_to_weekday(20170214, 'Sunday') 2
def address_(): ret = {} cmd = 'hciconfig' out = __salt__['cmd.run'](cmd).splitlines() dev = '' for line in out: if line.startswith('hci'): comps = line.split(':') dev = comps[0] ret[dev] = { 'device': dev, 'path': '/sys/class/bluetooth/{0}'.format(dev), } if 'BD Address' in line: comps = line.split() ret[dev]['address'] = comps[2] if 'DOWN' in line: ret[dev]['power'] = 'off' if 'UP RUNNING' in line: ret[dev]['power'] = 'on' return ret
Get the many addresses of the Bluetooth adapter CLI Example: .. code-block:: bash salt '*' bluetooth.address
def sha1sum(filename): sha1 = hashlib.sha1() with open(filename, 'rb') as f: for chunk in iter(lambda: f.read(128 * sha1.block_size), b''): sha1.update(chunk) return sha1.hexdigest()
Calculates sha1 hash of a file
def _pack_output(self, init_parameter): output = {} for i, param in enumerate(init_parameter): output[self.key_order[i]] = param return output
Pack the output Parameters ---------- init_parameter : dict Returns ------- output : dict
def buffer_to_audio(buffer: bytes) -> np.ndarray: return np.fromstring(buffer, dtype='<i2').astype(np.float32, order='C') / 32768.0
Convert a raw mono audio byte string to numpy array of floats
def optimize(self): with open(LIBRARIES_FILE, 'r') as f: libs_data = json.load(f) for alg, libs_names in libs_data.items(): libs = self.get_libs(alg) if not libs: continue self.libs[alg] = [lib for lib in libs if [lib.module_name, lib.func_name] in libs_names] self.libs[alg].sort(key=lambda lib: libs_names.index([lib.module_name, lib.func_name]))
Sort algorithm implementations by speed.
def _make_futures(futmap_keys, class_check, make_result_fn): futmap = {} for key in futmap_keys: if class_check is not None and not isinstance(key, class_check): raise ValueError("Expected list of {}".format(type(class_check))) futmap[key] = concurrent.futures.Future() if not futmap[key].set_running_or_notify_cancel(): raise RuntimeError("Future was cancelled prematurely") f = concurrent.futures.Future() f.add_done_callback(lambda f: make_result_fn(f, futmap)) if not f.set_running_or_notify_cancel(): raise RuntimeError("Future was cancelled prematurely") return f, futmap
Create futures and a futuremap for the keys in futmap_keys, and create a request-level future to be bassed to the C API.
def codes_get_long_array(handle, key, size): values = ffi.new('long[]', size) size_p = ffi.new('size_t *', size) _codes_get_long_array(handle, key.encode(ENC), values, size_p) return list(values)
Get long array values from a key. :param bytes key: the keyword whose value(s) are to be extracted :rtype: List(int)
def create_instructor_answer(self, post, content, revision, anonymous=False): try: cid = post["id"] except KeyError: cid = post params = { "cid": cid, "type": "i_answer", "content": content, "revision": revision, "anonymous": "yes" if anonymous else "no", } return self._rpc.content_instructor_answer(params)
Create an instructor's answer to a post `post`. It seems like if the post has `<p>` tags, then it's treated as HTML, but is treated as text otherwise. You'll want to provide `content` accordingly. :type post: dict|str|int :param post: Either the post dict returned by another API method, or the `cid` field of that post. :type content: str :param content: The content of the answer. :type revision: int :param revision: The number of revisions the answer has gone through. The first responder should out 0, the first editor 1, etc. :type anonymous: bool :param anonymous: Whether or not to post anonymously. :rtype: dict :returns: Dictionary with information about the created answer.
def start_tty(self, conf, interactive): try: api = conf.harpoon.docker_context_maker().api container_id = conf.container_id stdin = conf.harpoon.tty_stdin stdout = conf.harpoon.tty_stdout stderr = conf.harpoon.tty_stderr if callable(stdin): stdin = stdin() if callable(stdout): stdout = stdout() if callable(stderr): stderr = stderr() dockerpty.start(api, container_id, interactive=interactive, stdout=stdout, stderr=stderr, stdin=stdin) except KeyboardInterrupt: pass
Startup a tty
def get_sigmoid_parameters(self, filename='shlp_sigmoid_factors.csv'): file = os.path.join(self.datapath, filename) sigmoid = pd.read_csv(file, index_col=0) sigmoid = sigmoid.query( 'building_class=={0} and '.format(self.building_class) + 'shlp_type=="{0}" and '.format(self.shlp_type) + 'wind_impact=={0}'.format(self.wind_class)) a = float(sigmoid['parameter_a']) b = float(sigmoid['parameter_b']) c = float(sigmoid['parameter_c']) if self.ww_incl: d = float(sigmoid['parameter_d']) else: d = 0 return a, b, c, d
Retrieve the sigmoid parameters from csv-files Parameters ---------- filename : string name of file where sigmoid factors are stored
def bam_conversion(job, samfile, sample_type, univ_options, samtools_options): work_dir = os.getcwd() input_files = { sample_type + '.sam': samfile} input_files = get_files_from_filestore(job, input_files, work_dir, docker=True) bamfile = '/'.join([work_dir, sample_type + '.bam']) parameters = ['view', '-bS', '-o', docker_path(bamfile), input_files[sample_type + '.sam'] ] docker_call(tool='samtools', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=samtools_options['version']) output_file = job.fileStore.writeGlobalFile(bamfile) job.fileStore.deleteGlobalFile(samfile) job.fileStore.logToMaster('Ran sam2bam on %s:%s successfully' % (univ_options['patient'], sample_type)) return output_file
Convert a sam to a bam. :param dict samfile: The input sam file :param str sample_type: Description of the sample to inject into the filename :param dict univ_options: Dict of universal options used by almost all tools :param dict samtools_options: Options specific to samtools :return: fsID for the generated bam :rtype: toil.fileStore.FileID
def _get_response(self, parse_result=True): self.vw_process.expect_exact('\r\n', searchwindowsize=-1) if parse_result: output = self.vw_process.before result_struct = VWResult(output, active_mode=self.active_mode) else: result_struct = None return result_struct
If 'parse_result' is False, ignore the received output and return None.
def unique_string(length=UUID_LENGTH): string = str(uuid4()) * int(math.ceil(length / float(UUID_LENGTH))) return string[:length] if length else string
Generate a unique string
def format_datetime(date): if date.utcoffset() is None: return date.isoformat() + 'Z' utc_offset_sec = date.utcoffset() utc_date = date - utc_offset_sec utc_date_without_offset = utc_date.replace(tzinfo=None) return utc_date_without_offset.isoformat() + 'Z'
Convert datetime to UTC ISO 8601
def _split_after_delimiter(self, item, indent_amt): self._delete_whitespace() if self.fits_on_current_line(item.size): return last_space = None for item in reversed(self._lines): if ( last_space and (not isinstance(item, Atom) or not item.is_colon) ): break else: last_space = None if isinstance(item, self._Space): last_space = item if isinstance(item, (self._LineBreak, self._Indent)): return if not last_space: return self.add_line_break_at(self._lines.index(last_space), indent_amt)
Split the line only after a delimiter.
def authenticate(self, request): jwt_value = self.get_jwt_value(request) if jwt_value is None: return None try: payload = jwt_decode_handler(jwt_value) except jwt.ExpiredSignature: msg = _('Signature has expired.') raise exceptions.AuthenticationFailed(msg) except jwt.DecodeError: msg = _('Error decoding signature.') raise exceptions.AuthenticationFailed(msg) except jwt.InvalidTokenError: raise exceptions.AuthenticationFailed() user = self.authenticate_credentials(payload) return (user, jwt_value)
Returns a two-tuple of `User` and token if a valid signature has been supplied using JWT-based authentication. Otherwise returns `None`.
def serve_forever(args=None): class Unbuffered(object): def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) sys.stdout = Unbuffered(sys.stdout) sys.stderr = Unbuffered(sys.stderr) server = JsonServer(args=args) server.serve_forever()
Creates the server and serves forever :param args: Optional args if you decided to use your own argument parser. Default is None to let the JsonServer setup its own parser and parse command line arguments.
def prepare_dispatches(cls, message, recipients=None): return Dispatch.create(message, recipients or cls.get_subscribers())
Creates Dispatch models for a given message and return them. :param Message message: Message model instance :param list|None recipients: A list or Recipient objects :return: list of created Dispatch models :rtype: list
def search(self, pattern, count=None): return self._scan(match=pattern, count=count)
Search the keys of the given hash using the specified pattern. :param str pattern: Pattern used to match keys. :param int count: Limit number of results returned. :returns: An iterator yielding matching key/value pairs.
def get_requires(self, requires_types): if not isinstance(requires_types, list): requires_types = list(requires_types) extracted_requires = [] for requires_name in requires_types: for requires in self.json_metadata.get(requires_name, []): if 'win' in requires.get('environment', {}): continue extracted_requires.extend(requires['requires']) return extracted_requires
Extracts requires of given types from metadata file, filter windows specific requires.
def backup_restore(cls, block_id, impl, working_dir): backup_dir = config.get_backups_directory(impl, working_dir) backup_paths = cls.get_backup_paths(block_id, impl, working_dir) for p in backup_paths: assert os.path.exists(p), "No such backup file: {}".format(p) for p in cls.get_state_paths(impl, working_dir): pbase = os.path.basename(p) backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id))) log.debug("Restoring '{}' to '{}'".format(backup_path, p)) shutil.copy(backup_path, p) return True
Restore from a backup, given the virutalchain implementation module and block number. NOT THREAD SAFE. DO NOT CALL WHILE INDEXING. Return True on success Raise exception on error, i.e. if a backup file is missing
def read_stack_qwords(self, count, offset = 0): stackData = self.read_stack_data(count * 8, offset) return struct.unpack('<'+('Q'*count), stackData)
Reads QWORDs from the top of the stack. @type count: int @param count: Number of QWORDs to read. @type offset: int @param offset: Offset from the stack pointer to begin reading. @rtype: tuple( int... ) @return: Tuple of integers read from the stack. @raise WindowsError: Could not read the requested data.
def merge_odd_even_csu_configurations(conf_odd, conf_even): merged_conf = deepcopy(conf_odd) for i in range(EMIR_NBARS): ibar = i + 1 if ibar % 2 == 0: merged_conf._csu_bar_left[i] = conf_even._csu_bar_left[i] merged_conf._csu_bar_right[i] = conf_even._csu_bar_right[i] merged_conf._csu_bar_slit_center[i] = \ conf_even._csu_bar_slit_center[i] merged_conf._csu_bar_slit_width[i] = \ conf_even._csu_bar_slit_width[i] return merged_conf
Merge CSU configuration using odd- and even-numbered values. The CSU returned CSU configuration include the odd-numbered values from 'conf_odd' and the even-numbered values from 'conf_even'. Parameters ---------- conf_odd : CsuConfiguration instance CSU configuration corresponding to odd-numbered slitlets. conf_even : CsuConfiguration instance CSU configuration corresponding to even-numbered slitlets. Returns ------- merged_conf : CsuConfiguration instance CSU configuration resulting from the merging process.
def handle_context_missing(self): if self.context_missing == 'RUNTIME_ERROR': log.error(MISSING_SEGMENT_MSG) raise SegmentNotFoundException(MISSING_SEGMENT_MSG) else: log.error(MISSING_SEGMENT_MSG)
Called whenever there is no trace entity to access or mutate.
def session_ended(self, f): self._session_ended_view_func = f @wraps(f) def wrapper(*args, **kw): self._flask_view_func(*args, **kw) return f
Decorator routes Alexa SessionEndedRequest to the wrapped view function to end the skill. @ask.session_ended def session_ended(): return "{}", 200 The wrapped function is registered as the session_ended view function and renders the response for requests to the end of the session. Arguments: f {function} -- session_ended view function
def delete_user(self, user_id): api = self._get_api(iam.AccountAdminApi) api.delete_user(user_id) return
Delete user specified user. :param str user_id: the ID of the user to delete (Required) :returns: void
def camel_to_snake(s: str) -> str: return CAMEL_CASE_RE.sub(r'_\1', s).strip().lower()
Convert string from camel case to snake case.
def skip_if_empty(func): @partial_safe_wraps(func) def inner(value, *args, **kwargs): if value is EMPTY: return else: return func(value, *args, **kwargs) return inner
Decorator for validation functions which makes them pass if the value passed in is the EMPTY sentinal value.
def miniaturize(self): element = self._first('NN') if element: if re.search('verkleinwoord: (\w+)', element, re.U): return re.findall('verkleinwoord: (\w+)', element, re.U) else: return [''] return [None]
Tries to scrape the miniaturized version from vandale.nl.
def suspend_processes(self, as_group, scaling_processes=None): params = {'AutoScalingGroupName': as_group} if scaling_processes: self.build_list_params(params, scaling_processes, 'ScalingProcesses') return self.get_status('SuspendProcesses', params)
Suspends Auto Scaling processes for an Auto Scaling group. :type as_group: string :param as_group: The auto scaling group to suspend processes on. :type scaling_processes: list :param scaling_processes: Processes you want to suspend. If omitted, all processes will be suspended.
def iter_factories(): factories = (entry.load() for entry in iter_entry_points(FACTORY_ENTRYPOINT)) for factory in sorted(factories, key=lambda f: getattr(f, 'priority', -1000), reverse=True): yield factory
Iterate through all factories identified by the factory entrypoint. Yields: function: A function that accepts a :class:`.Specification` and returns a :class:`.PenaltyModel`.
def get_info(domain_name): opts = salt.utils.namecheap.get_opts('namecheap.domains.getinfo') opts['DomainName'] = domain_name response_xml = salt.utils.namecheap.get_request(opts) if response_xml is None: return [] domaingetinforesult = response_xml.getElementsByTagName("DomainGetInfoResult")[0] return salt.utils.namecheap.xml_to_dict(domaingetinforesult)
Returns information about the requested domain returns a dictionary of information about the domain_name domain_name string Domain name to get information about CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains.get_info my-domain-name
def SPI_write(self, chip_select, data): 'Writes data to SPI device selected by chipselect bit. ' dat = list(data) dat.insert(0, chip_select) return self.bus.write_i2c_block(self.address, dat);
Writes data to SPI device selected by chipselect bit.
def add(self, cards, end=TOP): if end is TOP: try: self.cards += cards except: self.cards += [cards] elif end is BOTTOM: try: self.cards.extendleft(cards) except: self.cards.extendleft([cards])
Adds the given list of ``Card`` instances to the top of the stack. :arg cards: The cards to add to the ``Stack``. Can be a single ``Card`` instance, or a ``list`` of cards. :arg str end: The end of the ``Stack`` to add the cards to. Can be ``TOP`` ("top") or ``BOTTOM`` ("bottom").
def _split_date(self, time): if isinstance(time, str): month, day, year = [int(t) for t in re.split(r'-|/', time)] if year < 100: year += 2000 time = date(year, month, day) return time.strftime('%Y-%m-%dT%H:%M:%SZ')
Split apart a date string.
def nanrankdata(a, axis=-1, inplace=False): from scipy.stats import rankdata if hasattr(a, "dtype") and issubdtype(a.dtype, integer): raise ValueError("Integer type is not supported.") if isinstance(a, (tuple, list)): if inplace: raise ValueError("Can't use `inplace=True` for {}.".format(type(a))) a = asarray(a, float) orig_shape = a.shape if a.ndim == 1: a = a.reshape(orig_shape + (1,)) if not inplace: a = a.copy() def rank1d(x): idx = ~isnan(x) x[idx] = rankdata(x[idx]) return x a = a.swapaxes(1, axis) a = apply_along_axis(rank1d, 0, a) a = a.swapaxes(1, axis) return a.reshape(orig_shape)
Rank data for arrays contaning NaN values. Parameters ---------- X : array_like Array of values. axis : int, optional Axis value. Defaults to `1`. inplace : bool, optional Defaults to `False`. Returns ------- array_like Ranked array. Examples -------- .. doctest:: >>> from numpy_sugar import nanrankdata >>> from numpy import arange >>> >>> X = arange(15).reshape((5, 3)).astype(float) >>> print(nanrankdata(X)) [[1. 1. 1.] [2. 2. 2.] [3. 3. 3.] [4. 4. 4.] [5. 5. 5.]]
def move_parent_up(self): cursor = self.nav.absolute_index if cursor > 0: level = max(self.content.get(cursor)['level'], 1) while self.content.get(cursor - 1)['level'] >= level: self._move_cursor(-1) cursor -= 1 self._move_cursor(-1) else: self.term.flash() self.clear_input_queue()
Move the cursor up to the comment's parent. If the comment is top-level, jump to the previous top-level comment.
def _FetchServerCertificate(self): if self.server_certificate: return True response = self.http_manager.OpenServerEndpoint( "server.pem", verify_cb=self.VerifyServerPEM) if response.Success(): self.server_certificate = response.data return True self.timer.SlowPoll() return False
Attempts to fetch the server cert. Returns: True if we succeed.
def _get_sorted_inputs(filename): with tf.gfile.Open(filename) as f: records = f.read().split("\n") inputs = [record.strip() for record in records] if not inputs[-1]: inputs.pop() input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True) sorted_inputs = [] sorted_keys = {} for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
Read and sort lines from the file sorted by decreasing length. Args: filename: String name of file to read inputs from. Returns: Sorted list of inputs, and dictionary mapping original index->sorted index of each element.
def callback(func): @functools.wraps(func) def wrapped(*args, **kwargs): result = func(*args, **kwargs) if inspect.iscoroutine(result): aio_loop.create_task(result) return wrapped
This decorator turns `func` into a callback for Tkinter to be able to use, even if `func` is an awaitable coroutine.
def vrelg(v1, v2, ndim): v1 = stypes.toDoubleVector(v1) v2 = stypes.toDoubleVector(v2) ndim = ctypes.c_int(ndim) return libspice.vrelg_c(v1, v2, ndim)
Return the relative difference between two vectors of general dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vrelg_c.html :param v1: First vector :type v1: Array of floats :param v2: Second vector :type v2: Array of floats :param ndim: Dimension of v1 and v2. :type ndim: int :return: the relative difference between v1 and v2. :rtype: float
def validate(self, **kwargs): self.check_crossrefs() for value in self.values(): value.validate(**kwargs)
Validates each entry (passing the provided arguments down to them and also tries to resolve all cross-references between the entries.
def set_weights(self, new_weights): self._check_sess() assign_list = [ self.assignment_nodes[name] for name in new_weights.keys() if name in self.assignment_nodes ] assert assign_list, ("No variables in the input matched those in the " "network. Possible cause: Two networks were " "defined in the same TensorFlow graph. To fix " "this, place each network definition in its own " "tf.Graph.") self.sess.run( assign_list, feed_dict={ self.placeholders[name]: value for (name, value) in new_weights.items() if name in self.placeholders })
Sets the weights to new_weights. Note: Can set subsets of variables as well, by only passing in the variables you want to be set. Args: new_weights (Dict): Dictionary mapping variable names to their weights.
def run(self): "sets up the desired services and runs the requested action" self.addServices() self.catalogServers(self.hendrix) action = self.action fd = self.options['fd'] if action.startswith('start'): chalk.blue(self._listening_message()) getattr(self, action)(fd) try: self.reactor.run() finally: shutil.rmtree(PID_DIR, ignore_errors=True) elif action == 'restart': getattr(self, action)(fd=fd) else: getattr(self, action)()
sets up the desired services and runs the requested action
def to_dict(self): ret = {} for key in ['name', 'cmd', 'id', 'start_time', 'end_time', 'outcome', 'start_time_string', 'start_delta_string']: val = getattr(self, key) ret[key] = val() if hasattr(val, '__call__') else val ret['parent'] = self.parent.to_dict() if self.parent else None return ret
Useful for providing arguments to templates. :API: public
def clean_caches(path): for dirname, subdirlist, filelist in os.walk(path): for f in filelist: if f.endswith('pyc'): try: os.remove(os.path.join(dirname, f)) except FileNotFoundError: pass if dirname.endswith('__pycache__'): shutil.rmtree(dirname)
Removes all python cache files recursively on a path. :param path: the path :return: None
def getAngle(self, mode='deg'): if self.refresh is True: self.getMatrix() try: if self.mflag: if mode == 'deg': return self.bangle / np.pi * 180 else: return self.bangle else: return 0 except AttributeError: print("Please execute getMatrix() first.")
return bend angle :param mode: 'deg' or 'rad' :return: deflecting angle in RAD
def log_normalize(a, axis=None): with np.errstate(under="ignore"): a_lse = logsumexp(a, axis) a -= a_lse[:, np.newaxis]
Normalizes the input array so that the exponent of the sum is 1. Parameters ---------- a : array Non-normalized input data. axis : int Dimension along which normalization is performed. Notes ----- Modifies the input **inplace**.
def _verify_run(out, cmd=None): if out.get('retcode', 0) and out['stderr']: if cmd: log.debug('Command: \'%s\'', cmd) log.debug('Return code: %s', out.get('retcode')) log.debug('Error output:\n%s', out.get('stderr', 'N/A')) raise CommandExecutionError(out['stderr'])
Crash to the log if command execution was not successful.
def send_facebook(self, token): self.send_struct('<B%iB' % len(token), 81, *map(ord, token)) self.facebook_token = token
Tells the server which Facebook account this client uses. After sending, the server takes some time to get the data from Facebook. Seems to be broken in recent versions of the game.