code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def end_of_line(event): " Move to the end of the line. " buff = event.current_buffer buff.cursor_position += buff.document.get_end_of_line_position()
Move to the end of the line.
def iterator(self): default_language = getattr(self, '_default_language', None) for obj in super(MultilingualModelQuerySet, self).iterator(): obj._default_language = default_language yield obj
Add the default language information to all returned objects.
def add_group(self, groupname, statements): msg = OmapiMessage.open(b"group") msg.message.append(("create", struct.pack("!I", 1))) msg.obj.append(("name", groupname)) msg.obj.append(("statements", statements)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add group failed")
Adds a group @type groupname: bytes @type statements: str
def diagonal_line(xi=None, yi=None, *, ax=None, c=None, ls=None, lw=None, zorder=3): if ax is None: ax = plt.gca() if xi is None: xi = ax.get_xlim() if yi is None: yi = ax.get_ylim() if c is None: c = matplotlib.rcParams["grid.color"] if ls is None: ls = matplotlib.rcParams["grid.linestyle"] if lw is None: lw = matplotlib.rcParams["grid.linewidth"] if ax is None: ax = plt.gca() diag_min = max(min(xi), min(yi)) diag_max = min(max(xi), max(yi)) line = ax.plot([diag_min, diag_max], [diag_min, diag_max], c=c, ls=ls, lw=lw, zorder=zorder) return line
Plot a diagonal line. Parameters ---------- xi : 1D array-like (optional) The x axis points. If None, taken from axis limits. Default is None. yi : 1D array-like The y axis points. If None, taken from axis limits. Default is None. ax : axis (optional) Axis to plot on. If none is supplied, the current axis is used. c : string (optional) Line color. Default derives from rcParams grid color. ls : string (optional) Line style. Default derives from rcParams linestyle. lw : float (optional) Line width. Default derives from rcParams linewidth. zorder : number (optional) Matplotlib zorder. Default is 3. Returns ------- matplotlib.lines.Line2D object The plotted line.
def decode(self, data, password=None): self.password = password self.raw = gntp.shim.u(data) parts = self.raw.split('\r\n\r\n') self.info = self._parse_info(self.raw) self.headers = self._parse_dict(parts[0])
Decode GNTP Message :param string data:
def as_dict(self, quiet: bool = False, infer_type_and_cast: bool = False): if infer_type_and_cast: params_as_dict = infer_and_cast(self.params) else: params_as_dict = self.params if quiet: return params_as_dict def log_recursively(parameters, history): for key, value in parameters.items(): if isinstance(value, dict): new_local_history = history + key + "." log_recursively(value, new_local_history) else: logger.info(history + key + " = " + str(value)) logger.info("Converting Params object to dict; logging of default " "values will not occur when dictionary parameters are " "used subsequently.") logger.info("CURRENTLY DEFINED PARAMETERS: ") log_recursively(self.params, self.history) return params_as_dict
Sometimes we need to just represent the parameters as a dict, for instance when we pass them to PyTorch code. Parameters ---------- quiet: bool, optional (default = False) Whether to log the parameters before returning them as a dict. infer_type_and_cast : bool, optional (default = False) If True, we infer types and cast (e.g. things that look like floats to floats).
def _is_socket(cls, stream): try: fd = stream.fileno() except ValueError: return False sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW) try: sock.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE) except socket.error as ex: if ex.args[0] != errno.ENOTSOCK: return True else: return True
Check if the given stream is a socket.
def get_setting(*args, **kwargs): for name in args: if hasattr(settings, name): return getattr(settings, name) if kwargs.get('raise_error', False): setting_url = url % args[0].lower().replace('_', '-') raise ImproperlyConfigured('Please make sure you specified at ' 'least one of these settings: %s \r\nDocumentation: %s' % (args, setting_url)) return kwargs.get('default_value', None)
Get a setting and raise an appropriate user friendly error if the setting is not found.
def _response(self, pdu): if _debug: UDPDirector._debug("_response %r", pdu) addr = pdu.pduSource peer = self.peers.get(addr, None) if not peer: peer = self.actorClass(self, addr) peer.response(pdu)
Incoming datagrams are routed through an actor.
def walk(self, oid, host, port, community): ret = {} if not isinstance(oid, tuple): oid = self._convert_to_oid(oid) host = socket.gethostbyname(host) snmpAuthData = cmdgen.CommunityData( 'agent-{}'.format(community), community) snmpTransportData = cmdgen.UdpTransportTarget( (host, port), int(self.config['timeout']), int(self.config['retries'])) resultTable = self.snmpCmdGen.nextCmd(snmpAuthData, snmpTransportData, oid) varBindTable = resultTable[3] for varBindTableRow in varBindTable: for o, v in varBindTableRow: ret[str(o)] = v.prettyPrint() return ret
Perform an SNMP walk on a given OID
def init_word_db(cls, name, text): text = text.replace('\n', ' ').replace('\r', ' ') words = [w.strip() for w in text.split(' ') if w.strip()] assert len(words) > 2, \ 'Database text sources must contain 3 or more words.' freqs = {} for i in range(len(words) - 2): w1 = words[i] w2 = words[i + 1] w3 = words[i + 2] key = (w1, w2) if key in freqs: freqs[key].append(w3) else: freqs[key] = [w3] cls._dbs[name] = { 'freqs': freqs, 'words': words, 'word_count': len(words) - 2 }
Initialize a database of words for the maker with the given name
def blt(f: List[SYM], x: List[SYM]) -> Dict[str, Any]: J = ca.jacobian(f, x) nblock, rowperm, colperm, rowblock, colblock, coarserow, coarsecol = J.sparsity().btf() return { 'J': J, 'nblock': nblock, 'rowperm': rowperm, 'colperm': colperm, 'rowblock': rowblock, 'colblock': colblock, 'coarserow': coarserow, 'coarsecol': coarsecol }
Sort equations by dependence
def _debugGraph(self): print("Len of graph: ", len(self.rdflib_graph)) for x, y, z in self.rdflib_graph: print(x, y, z)
internal util to print out contents of graph
def parse_log_line(line): matches = re.search(r'^\[([^\]]+)\] ([^:]+: .*)', line) error = re.search(r'Traceback', line) if error: return {'line': line, 'step': 'error'} if not matches: return {'line': line, 'step': None} tstamp = matches.group(1) msg = matches.group(2) if not msg.find('Timing: ') >= 0: return {'line': line, 'step': None} when = datetime.strptime(tstamp, '%Y-%m-%dT%H:%MZ').replace( tzinfo=pytz.timezone('UTC')) step = msg.split(":")[-1].strip() return {'line': line, 'step': step, 'when': when}
Parses a log line and returns it with more information :param line: str - A line from a bcbio-nextgen log :returns dict: A dictionary containing the line, if its a new step if its a Traceback or if the analysis is finished
def draw(board, term, cells): for (x, y), state in board.iteritems(): with term.location(x, y): print cells[state],
Draw a board to the terminal.
def reverse(self): if self.closed(): raise ValueError("Attempt to call reverse() on a " "closed Queryable.") try: r = reversed(self._iterable) return self._create(r) except TypeError: pass return self._create(self._generate_reverse_result())
Returns the sequence reversed. Note: This method uses deferred execution, but the whole source sequence is consumed once execution commences. Returns: The source sequence in reverse order. Raises: ValueError: If the Queryable is closed().
def vector_str(p, decimal_places=2, print_zero=True): style = '{0:.' + str(decimal_places) + 'f}' return '[{0}]'.format(", ".join([' ' if not print_zero and a == 0 else style.format(a) for a in p]))
Pretty-print the vector values.
def _trigger(self): self._completed.set() for callback in self._callbacks: callback(self)
Trigger all callbacks registered to this Future. This method is called internally by the batch once the batch completes. Args: message_id (str): The message ID, as a string.
def nearest_subpackage(cls, package, all_packages): def shared_prefix(candidate): zipped = zip(package.split('.'), candidate.split('.')) matching = itertools.takewhile(lambda pair: pair[0] == pair[1], zipped) return [pair[0] for pair in matching] shared_packages = [_f for _f in map(shared_prefix, all_packages) if _f] return '.'.join(max(shared_packages, key=len)) if shared_packages else package
Given a package, find its nearest parent in all_packages.
def draw(self): glPushAttrib(GL_ALL_ATTRIB_BITS) self.draw_score() for sprite in self: sprite.draw() glPopAttrib()
Draw all the sprites in the system using their renderers. This method is convenient to call from you Pyglet window's on_draw handler to redraw particles when needed.
def remove_metadata_key(self, obj, key): meta_dict = {key: ""} return self.set_metadata(obj, meta_dict)
Removes the specified key from the object's metadata. If the key does not exist in the metadata, nothing is done.
def predecessors(self, node, exclude_compressed=True): preds = super(Graph, self).predecessors(node) if exclude_compressed: return [n for n in preds if not self.node[n].get('compressed', False)] else: return preds
Returns the list of predecessors of a given node Parameters ---------- node : str The target node exclude_compressed : boolean If true, compressed nodes are excluded from the predecessors list Returns ------- list List of predecessors nodes
def normalize_result(result, default, threshold=0.2): if result is None: return default if result.get('confidence') is None: return default if result.get('confidence') < threshold: return default return normalize_encoding(result.get('encoding'), default=default)
Interpret a chardet result.
def merge_tags(left, right, factory=Tags): if isinstance(left, Mapping): tags = dict(left) elif hasattr(left, 'tags'): tags = _tags_to_dict(left.tags) else: tags = _tags_to_dict(left) if isinstance(right, Mapping): tags.update(right) elif hasattr(left, 'tags'): tags.update(_tags_to_dict(right.tags)) else: tags.update(_tags_to_dict(right)) return factory(**tags)
Merge two sets of tags into a new troposphere object Args: left (Union[dict, troposphere.Tags]): dictionary or Tags object to be merged with lower priority right (Union[dict, troposphere.Tags]): dictionary or Tags object to be merged with higher priority factory (type): Type of object to create. Defaults to the troposphere Tags class.
def create_exclude_rules(args): global _cached_exclude_rules if _cached_exclude_rules is not None: return _cached_exclude_rules rules = [] for excl_path in args.exclude: abspath = os.path.abspath(os.path.join(args.root, excl_path)) rules.append((abspath, True)) for incl_path in args.include: abspath = os.path.abspath(os.path.join(args.root, incl_path)) rules.append((abspath, False)) _cached_exclude_rules = sorted(rules, key=lambda p: p[0]) return _cached_exclude_rules
Creates the exlude rules
def difference(self, *others): return self.copy(super(NGram, self).difference(*others))
Return the difference of two or more sets as a new set. >>> from ngram import NGram >>> a = NGram(['spam', 'eggs']) >>> b = NGram(['spam', 'ham']) >>> list(a.difference(b)) ['eggs']
def hasKey(self, key, notNone=False): result = [] result_tracker = [] for counter, row in enumerate(self.table): (target, _, value) = internal.dict_crawl(row, key) if target: if notNone==False or not value is None: result.append(row) result_tracker.append(self.index_track[counter]) self.table = result self.index_track = result_tracker return self
Return entries where the key is present. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": None , "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).hasKey("income").returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 20, income: None , name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill', wigs: None } ] >>> print PLOD(test).hasKey("income", notNone=True).returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 19, income: 29000, name: 'Bill', wigs: None} ] .. versionadded:: 0.1.2 :param key: The dictionary key (or cascading list of keys) to locate. :param notNone: If True, then None is the equivalent of a missing key. Otherwise, a key with a value of None is NOT considered missing. :returns: self
def sub_tags(self, *tags, **kw): records = [x for x in self.sub_records if x.tag in tags] if kw.get('follow', True): records = [rec.ref if isinstance(rec, Pointer) else rec for rec in records] return records
Returns list of direct sub-records matching any tag name. Unlike :py:meth:`sub_tag` method this method does not support hierarchical paths and does not resolve pointers. :param str tags: Names of the sub-record tag :param kw: Keyword arguments, only recognized keyword is `follow` with the same meaning as in :py:meth:`sub_tag`. :return: List of `Records`, possibly empty.
def createSQL(self, sql, args=()): before = time.time() self._execSQL(sql, args) after = time.time() if after - before > 2.0: log.msg('Extremely long CREATE: %s' % (after - before,)) log.msg(sql)
For use with auto-committing statements such as CREATE TABLE or CREATE INDEX.
def get_dependants(project_name): for package in get_installed_distributions(user_only=ENABLE_USER_SITE): if is_dependant(package, project_name): yield package.project_name
Yield dependants of `project_name`.
def from_Suite(suite, maxwidth=80): subtitle = str(len(suite.compositions)) + ' Compositions' if suite.subtitle\ == '' else suite.subtitle result = os.linesep.join(add_headers( maxwidth, suite.title, subtitle, suite.author, suite.email, suite.description, )) hr = maxwidth * '=' n = os.linesep result = n + hr + n + result + n + hr + n + n for comp in suite: c = from_Composition(comp, maxwidth) result += c + n + hr + n + n return result
Convert a mingus.containers.Suite to an ASCII tablature string, complete with headers. This function makes use of the Suite's title, subtitle, author, email and description attributes.
def _create_trial_info(self, expr_dir): meta = self._build_trial_meta(expr_dir) self.logger.debug("Create trial for %s" % meta) trial_record = TrialRecord.from_json(meta) trial_record.save()
Create information for given trial. Meta file will be loaded if exists, and the trial information will be saved in db backend. Args: expr_dir (str): Directory path of the experiment.
def get_span_kind_as_int(self, span): kind = None if "span.kind" in span.tags: if span.tags["span.kind"] in self.entry_kind: kind = 1 elif span.tags["span.kind"] in self.exit_kind: kind = 2 else: kind = 3 return kind
Will retrieve the `span.kind` tag and return the appropriate integer value for the Instana backend or None if the tag is set to something we don't recognize. :param span: The span to search for the `span.kind` tag :return: Integer
def render_filter(self, next_filter): next(next_filter) while True: data = (yield) res = [self.cell_format(access(data)) for access in self.accessors] next_filter.send(res)
Produce formatted output from the raw data stream.
def spectrogram(t_signal, frame_width=FRAME_WIDTH, overlap=FRAME_STRIDE): frame_width = min(t_signal.shape[0], frame_width) w = np.hanning(frame_width) num_components = frame_width // 2 + 1 num_frames = 1 + (len(t_signal) - frame_width) // overlap f_signal = np.empty([num_frames, num_components], dtype=np.complex_) for i, t in enumerate(range(0, len(t_signal) - frame_width, overlap)): f_signal[i] = rfft(w * t_signal[t:t + frame_width]) return 20 * np.log10(1 + np.absolute(f_signal))
Calculate the magnitude spectrogram of a single-channel time-domain signal from the real frequency components of the STFT with a hanning window applied to each frame. The frame size and overlap between frames should be specified in number of samples.
def check_status_code(response, codes=None): codes = codes or [200] if response.status_code not in codes: raise StatusCodeError(response.status_code)
Checks response.status_code is in codes. :param requests.request response: Requests response :param list codes: List of accepted codes or callable :raises: StatusCodeError if code invalid
def init_centers_widths(self, R): kmeans = KMeans( init='k-means++', n_clusters=self.K, n_init=10, random_state=100) kmeans.fit(R) centers = kmeans.cluster_centers_ widths = self._get_max_sigma(R) * np.ones((self.K, 1)) return centers, widths
Initialize prior of centers and widths Returns ------- centers : 2D array, with shape [K, n_dim] Prior of factors' centers. widths : 1D array, with shape [K, 1] Prior of factors' widths.
def recv_blocking(conn, msglen): msg = b'' while len(msg) < msglen: maxlen = msglen-len(msg) if maxlen > 4096: maxlen = 4096 tmpmsg = conn.recv(maxlen) if not tmpmsg: raise RuntimeError("socket connection broken") msg += tmpmsg logging.debug("Msglen: %d of %d", len(msg), msglen) logging.debug("Message: %s", msg) return msg
Recieve data until msglen bytes have been received.
def result(self, timeout=None): if not self._poll(timeout).HasField('response'): raise GaxError(self._operation.error.message) return _from_any(self._result_type, self._operation.response)
Enters polling loop on OperationsClient.get_operation, and once Operation.done is true, then returns Operation.response if successful or throws GaxError if not successful. This method will wait up to timeout seconds. If the call hasn't completed in timeout seconds, then a RetryError will be raised. timeout can be an int or float. If timeout is not specified or None, there is no limit to the wait time.
def list_active_vms(cwd=None): vms = [] cmd = 'vagrant status' reply = __salt__['cmd.shell'](cmd, cwd=cwd) log.info('--->\n%s', reply) for line in reply.split('\n'): tokens = line.strip().split() if len(tokens) > 1: if tokens[1] == 'running': vms.append(tokens[0]) return vms
Return a list of machine names for active virtual machine on the host, which are defined in the Vagrantfile at the indicated path. CLI Example: .. code-block:: bash salt '*' vagrant.list_active_vms cwd=/projects/project_1
def upload(self, picture, resize=None, rotation=None, noexif=None, callback=None): if not resize: resize = self._resize if not rotation: rotation = self._rotation if not noexif: noexif = self._noexif if not callback: callback = self._callback return upload(self._apikey, picture, resize, rotation, noexif, callback)
wraps upload function :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value.\ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function will be called after every read. \ Need to take one argument. you can use the len function to \ determine the body length and call bytes_read().
def replace_entity_resource(model, oldres, newres): oldrids = set() for rid, link in model: if link[ORIGIN] == oldres or link[TARGET] == oldres or oldres in link[ATTRIBUTES].values(): oldrids.add(rid) new_link = (newres if o == oldres else o, r, newres if t == oldres else t, dict((k, newres if v == oldres else v) for k, v in a.items())) model.add(*new_link) model.delete(oldrids) return
Replace one entity in the model with another with the same links :param model: Versa model to be updated :param oldres: old/former resource IRI to be replaced :param newres: new/replacement resource IRI :return: None
def check_compatibility(self, other, check_edges=False, precision=1E-7): if self.GetDimension() != other.GetDimension(): raise TypeError("histogram dimensionalities do not match") if len(self) != len(other): raise ValueError("histogram sizes do not match") for axis in range(self.GetDimension()): if self.nbins(axis=axis) != other.nbins(axis=axis): raise ValueError( "numbers of bins along axis {0:d} do not match".format( axis)) if check_edges: for axis in range(self.GetDimension()): if not all([abs(l - r) < precision for l, r in zip(self._edges(axis), other._edges(axis))]): raise ValueError( "edges do not match along axis {0:d}".format(axis))
Test whether two histograms are considered compatible by the number of dimensions, number of bins along each axis, and optionally the bin edges. Parameters ---------- other : histogram A rootpy histogram check_edges : bool, optional (default=False) If True then also check that the bin edges are equal within the specified precision. precision : float, optional (default=1E-7) The value below which differences between floats are treated as nil when comparing bin edges. Raises ------ TypeError If the histogram dimensionalities do not match ValueError If the histogram sizes, number of bins along an axis, or optionally the bin edges do not match
def get_single_file_info(self, rel_path): f_path = self.get_full_file_path(rel_path) return get_single_file_info(f_path, rel_path)
Gets last change time for a single file
def fetch_objects(self, oids): objects = self.model.objects.in_bulk(oids) if len(objects.keys()) != len(oids): non_existants = set(oids) - set(objects.keys()) msg = _('Unknown identifiers: {identifiers}').format( identifiers=', '.join(str(ne) for ne in non_existants)) raise validators.ValidationError(msg) return [objects[id] for id in oids]
This methods is used to fetch models from a list of identifiers. Default implementation performs a bulk query on identifiers. Override this method to customize the objects retrieval.
def set_file_path(self, filePath): if filePath is not None: assert isinstance(filePath, basestring), "filePath must be None or string" filePath = str(filePath) self.__filePath = filePath
Set the file path that needs to be locked. :Parameters: #. filePath (None, path): The file that needs to be locked. When given and a lock is acquired, the file will be automatically opened for writing or reading depending on the given mode. If None is given, the locker can always be used for its general purpose as shown in the examples.
def generate_cutD_genomic_CDR3_segs(self): max_palindrome_L = self.max_delDl_palindrome max_palindrome_R = self.max_delDr_palindrome self.cutD_genomic_CDR3_segs = [] for CDR3_D_seg in [x[1] for x in self.genD]: if len(CDR3_D_seg) < min(max_palindrome_L, max_palindrome_R): self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, len(CDR3_D_seg)), 0, len(CDR3_D_seg))] else: self.cutD_genomic_CDR3_segs += [cutR_seq(cutL_seq(CDR3_D_seg, 0, max_palindrome_L), 0, max_palindrome_R)]
Add palindromic inserted nucleotides to germline V sequences. The maximum number of palindromic insertions are appended to the germline D segments so that delDl and delDr can index directly for number of nucleotides to delete from a segment. Sets the attribute cutV_genomic_CDR3_segs.
def get_secondary_count(context, default=0): if not is_ar(context): return default primary = context.getPrimaryAnalysisRequest() if not primary: return default return len(primary.getSecondaryAnalysisRequests())
Returns the number of secondary ARs of this AR
def pad_to_size(data, shape, value=0.0): shape = [data.shape[i] if shape[i] == -1 else shape[i] for i in range(len(shape))] new_data = np.empty(shape) new_data[:] = value II = [slice((shape[i] - data.shape[i])//2, (shape[i] - data.shape[i])//2 + data.shape[i]) for i in range(len(shape))] new_data[II] = data return new_data
This is similar to `pad`, except you specify the final shape of the array. Parameters ---------- data : ndarray Numpy array of any dimension and type. shape : tuple Final shape of padded array. Should be tuple of length ``data.ndim``. If it has to pad unevenly, it will pad one more at the end of the axis than at the beginning. If a dimension is specified as ``-1``, then it will remain its current size along that dimension. value : data.dtype The value with which to pad. Default is ``0.0``. This can even be an array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the size of the padded array. Examples -------- >>> import deepdish as dd >>> import numpy as np Pad an array with zeros. >>> x = np.ones((4, 2)) >>> dd.util.pad_to_size(x, (5, 5)) array([[ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 1., 1., 0., 0.], [ 0., 0., 0., 0., 0.]])
def _finalize_memory(jvm_opts): avoid_min = 32 avoid_max = 48 out_opts = [] for opt in jvm_opts: if opt.startswith("-Xmx"): spec = opt[4:] val = int(spec[:-1]) mod = spec[-1] if mod.upper() == "M": adjust = 1024 min_val = avoid_min * 1024 max_val = avoid_max * 1024 else: adjust = 1 min_val, max_val = avoid_min, avoid_max if val >= min_val and val < max_val: val = min_val - adjust opt = "%s%s%s" % (opt[:4], val, mod) out_opts.append(opt) return out_opts
GRIDSS does not recommend setting memory between 32 and 48Gb. https://github.com/PapenfussLab/gridss#memory-usage
def CreateTask(self, session_identifier): task = tasks.Task(session_identifier) logger.debug('Created task: {0:s}.'.format(task.identifier)) with self._lock: self._tasks_queued[task.identifier] = task self._total_number_of_tasks += 1 self.SampleTaskStatus(task, 'created') return task
Creates a task. Args: session_identifier (str): the identifier of the session the task is part of. Returns: Task: task attribute container.
def Runs(self): with self._accumulators_mutex: items = list(six.iteritems(self._accumulators)) return {run_name: accumulator.Tags() for run_name, accumulator in items}
Return all the run names in the `EventMultiplexer`. Returns: ``` {runName: { scalarValues: [tagA, tagB, tagC], graph: true, meta_graph: true}} ```
def write_sampler_metadata(self, sampler): super(MultiTemperedMetadataIO, self).write_sampler_metadata(sampler) self[self.sampler_group].attrs["ntemps"] = sampler.ntemps
Adds writing ntemps to file.
def permissions(self): can_read = self._info.file.permissions & lib.GP_FILE_PERM_READ can_write = self._info.file.permissions & lib.GP_FILE_PERM_DELETE return "{0}{1}".format("r" if can_read else "-", "w" if can_write else "-")
Permissions of the file. Can be "r-" (read-only), "-w" (write-only), "rw" (read-write) or "--" (no rights). :rtype: str
def scan_django_settings(values, imports): if isinstance(values, (str, bytes)): if utils.is_import_str(values): imports.add(values) elif isinstance(values, dict): for k, v in values.items(): scan_django_settings(k, imports) scan_django_settings(v, imports) elif hasattr(values, '__file__') and getattr(values, '__file__'): imp, _ = utils.import_path_from_file(getattr(values, '__file__')) imports.add(imp) elif hasattr(values, '__iter__'): for item in values: scan_django_settings(item, imports)
Recursively scans Django settings for values that appear to be imported modules.
def _readline(sock, buf): chunks = [] last_char = b'' while True: if last_char == b'\r' and buf[0:1] == b'\n': chunks[-1] = chunks[-1][:-1] return buf[1:], b''.join(chunks) elif buf.find(b'\r\n') != -1: before, sep, after = buf.partition(b"\r\n") chunks.append(before) return after, b''.join(chunks) if buf: chunks.append(buf) last_char = buf[-1:] buf = _recv(sock, RECV_SIZE) if not buf: raise MemcacheUnexpectedCloseError()
Read line of text from the socket. Read a line of text (delimited by "\r\n") from the socket, and return that line along with any trailing characters read from the socket. Args: sock: Socket object, should be connected. buf: String, zero or more characters, returned from an earlier call to _readline or _readvalue (pass an empty string on the first call). Returns: A tuple of (buf, line) where line is the full line read from the socket (minus the "\r\n" characters) and buf is any trailing characters read after the "\r\n" was found (which may be an empty string).
def parameter_action(self, text, loc, par): exshared.setpos(loc, text) if DEBUG > 0: print("PARAM:",par) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return index = self.symtab.insert_parameter(par.name, par.type) self.shared.function_params += 1 return index
Code executed after recognising a parameter
def median(self, **kwargs): if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().median(**kwargs) axis = kwargs.get("axis", 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(axis, func)
Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row.
def pollNextEvent(self, pEvent): fn = self.function_table.pollNextEvent result = fn(byref(pEvent), sizeof(VREvent_t)) return result != 0
Returns true and fills the event with the next event on the queue if there is one. If there are no events this method returns false. uncbVREvent should be the size in bytes of the VREvent_t struct
def filter(self, displayed=False, enabled=False): if self.evaluated: result = self if displayed: result = ElementSelector( result.browser, elements=[e for e in result if e.is_displayed()] ) if enabled: result = ElementSelector( result.browser, elements=[e for e in result if e.is_enabled()] ) else: result = copy(self) if displayed: result.displayed = True if enabled: result.enabled = True return result
Filter elements by visibility and enabled status. :param displayed: whether to filter out invisible elements :param enabled: whether to filter out disabled elements Returns: an :class:`ElementSelector`
def update(self, friendly_name=None, description=None, query=None): self._table._load_info() if query is not None: if isinstance(query, _query.Query): query = query.sql self._table._info['view'] = {'query': query} self._table.update(friendly_name=friendly_name, description=description)
Selectively updates View information. Any parameters that are None (the default) are not applied in the update. Args: friendly_name: if not None, the new friendly name. description: if not None, the new description. query: if not None, a new query string for the View.
def loc_to_index(self, loc): if loc is None: return self._active_renderer_index elif isinstance(loc, int): return loc elif isinstance(loc, collections.Iterable): assert len(loc) == 2, '"loc" must contain two items' return loc[0]*self.shape[0] + loc[1]
Return index of the render window given a location index. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. Returns ------- idx : int Index of the render window.
def patch_func(replacement, target_mod, func_name): original = getattr(target_mod, func_name) vars(replacement).setdefault('unpatched', original) setattr(target_mod, func_name, replacement)
Patch func_name in target_mod with replacement Important - original must be resolved by name to avoid patching an already patched function.
def delete_project(self, id): url = '/projects/{id}'.format(id=id) response = self.delete(url) if response is True: return {} else: return response
Delete a project from the Gitlab server Gitlab currently returns a Boolean True if the deleted and as such we return an empty Dictionary :param id: The ID of the project or NAMESPACE/PROJECT_NAME :return: Dictionary :raise: HttpError: If invalid response returned
def update_parameters(parameters, grads, learning_rate=1.2): W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] dW1 = grads["dW1"] db1 = grads["db1"] dW2 = grads["dW2"] db2 = grads["db2"] W1 -= learning_rate * dW1 b1 -= learning_rate * db1 W2 -= learning_rate * dW2 b2 -= learning_rate * db2 parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters
Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters
def _areadist(ax, v, xr, c, bins=100, by=None, alpha=1, label=None): y, x = np.histogram(v[~np.isnan(v)], bins) x = x[:-1] if by is None: by = np.zeros((bins,)) ax.fill_between(x, y, by, facecolor=c, alpha=alpha, label=label) return y
Plot the histogram distribution but as an area plot
def entity(self, entity_type, identifier=None): entity = _ACLEntity(entity_type=entity_type, identifier=identifier) if self.has_entity(entity): entity = self.get_entity(entity) else: self.add_entity(entity) return entity
Factory method for creating an Entity. If an entity with the same type and identifier already exists, this will return a reference to that entity. If not, it will create a new one and add it to the list of known entities for this ACL. :type entity_type: str :param entity_type: The type of entity to create (ie, ``user``, ``group``, etc) :type identifier: str :param identifier: The ID of the entity (if applicable). This can be either an ID or an e-mail address. :rtype: :class:`_ACLEntity` :returns: A new Entity or a reference to an existing identical entity.
def download_object(container_name, object_name, destination_path, profile, overwrite_existing=False, delete_on_failure=True, **libcloud_kwargs): conn = _get_driver(profile=profile) obj = conn.get_object(container_name, object_name) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) return conn.download_object(obj, destination_path, overwrite_existing, delete_on_failure, **libcloud_kwargs)
Download an object to the specified destination path. :param container_name: Container name :type container_name: ``str`` :param object_name: Object name :type object_name: ``str`` :param destination_path: Full path to a file or a directory where the incoming file will be saved. :type destination_path: ``str`` :param profile: The profile key :type profile: ``str`` :param overwrite_existing: True to overwrite an existing file, defaults to False. :type overwrite_existing: ``bool`` :param delete_on_failure: True to delete a partially downloaded file if the download was not successful (hash mismatch / file size). :type delete_on_failure: ``bool`` :param libcloud_kwargs: Extra arguments for the driver's download_object method :type libcloud_kwargs: ``dict`` :return: True if an object has been successfully downloaded, False otherwise. :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.download_object MyFolder me.jpg /tmp/me.jpg profile1
def best_hits(self): self.quality_sort() best_hits = dict((query, next(blines)) for (query, blines) in \ groupby(self, lambda x: x.query)) self.ref_sort() return best_hits
returns a dict with query => best mapped position
def hlog_inv(y, b=500, r=_display_max, d=_l_mmax): aux = 1. * d / r * y s = sign(y) if s.shape: s[s == 0] = 1 elif s == 0: s = 1 return s * 10 ** (s * aux) + b * aux - s
Inverse of base 10 hyperlog transform.
def generate_tags_multiple_files(input_files, tag, ignore_tags, ns=None): return itertools.chain.from_iterable([generate_xmltags( fn, tag, ignore_tags, ns) for fn in input_files])
Calls xmltag generator for multiple files.
def parse_function(fn): try: return parse_string(inspect.getsource(fn)) except (IOError, OSError) as e: raise ValueError( 'Cannot differentiate function: %s. Tangent must be able to access the ' 'source code of the function. Functions defined in a Python ' 'interpreter and functions backed by C extension modules do not ' 'have accessible source code.' % e)
Get the source of a function and return its AST.
def get_crawldelay (self, useragent): for entry in self.entries: if entry.applies_to(useragent): return entry.crawldelay return 0
Look for a configured crawl delay. @return: crawl delay in seconds or zero @rtype: integer >= 0
def parse( args: typing.List[str] = None, arg_parser: ArgumentParser = None ) -> dict: parser = arg_parser or create_parser() return vars(parser.parse_args(args))
Parses the arguments for the cauldron server
def _remove_dots(src): output = {} for key, val in six.iteritems(src): if isinstance(val, dict): val = _remove_dots(val) output[key.replace('.', '-')] = val return output
Remove dots from the given data structure
def save(self, f): return pickle.dump((self.perceptron.weights, self.tagdict, self.classes, self.clusters), f, protocol=pickle.HIGHEST_PROTOCOL)
Save pickled model to file.
def ae_core_density(self): mesh, values, attrib = self._parse_radfunc("ae_core_density") return RadialFunction(mesh, values)
The all-electron radial density.
def content(self, **args): self.gist_name = '' if 'name' in args: self.gist_name = args['name'] self.gist_id = self.getMyID(self.gist_name) elif 'id' in args: self.gist_id = args['id'] else: raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid') if self.gist_id: r = requests.get( '%s'%BASE_URL+'/gists/%s' %self.gist_id, headers=self.gist.header ) if (r.status_code == 200): r_text = json.loads(r.text) if self.gist_name!='': content = r.json()['files'][self.gist_name]['content'] else: for key,value in r.json()['files'].iteritems(): content = r.json()['files'][value['filename']]['content'] return content raise Exception('No such gist found')
Doesn't require manual fetching of gistID of a gist passing gistName will return the content of gist. In case, names are ambigious, provide GistID or it will return the contents of recent ambigious gistname
def render_pdf_file_to_image_files(pdf_file_name, output_filename_root, program_to_use): res_x = str(args.resX) res_y = str(args.resY) if program_to_use == "Ghostscript": if ex.system_os == "Windows": ex.render_pdf_file_to_image_files__ghostscript_bmp( pdf_file_name, output_filename_root, res_x, res_y) else: ex.render_pdf_file_to_image_files__ghostscript_png( pdf_file_name, output_filename_root, res_x, res_y) elif program_to_use == "pdftoppm": use_gray = False if use_gray: ex.render_pdf_file_to_image_files_pdftoppm_pgm( pdf_file_name, output_filename_root, res_x, res_y) else: ex.render_pdf_file_to_image_files_pdftoppm_ppm( pdf_file_name, output_filename_root, res_x, res_y) else: print("Error in renderPdfFileToImageFile: Unrecognized external program.", file=sys.stderr) ex.cleanup_and_exit(1)
Render all the pages of the PDF file at pdf_file_name to image files with path and filename prefix given by output_filename_root. Any directories must have already been created, and the calling program is responsible for deleting any directories or image files. The program program_to_use, currently either the string "pdftoppm" or the string "Ghostscript", will be called externally. The image type that the PDF is converted into must to be directly openable by PIL.
def _calc_b(w, aod700): b1 = 0.00925*aod700**2 + 0.0148*aod700 - 0.0172 b0 = -0.7565*aod700**2 + 0.5057*aod700 + 0.4557 b = b1 * np.log(w) + b0 return b
Calculate the b coefficient.
def _mangle_prefix(res): res['total_addresses'] = unicode(res['total_addresses']) res['used_addresses'] = unicode(res['used_addresses']) res['free_addresses'] = unicode(res['free_addresses']) if res['expires'].tzinfo is None: res['expires'] = pytz.utc.localize(res['expires']) if res['expires'] == pytz.utc.localize(datetime.datetime.max): res['expires'] = None return res
Mangle prefix result
def list_spiders(self, project): url = self._build_url(constants.LIST_SPIDERS_ENDPOINT) params = {'project': project} json = self.client.get(url, params=params, timeout=self.timeout) return json['spiders']
Lists all known spiders for a specific project. First class, maps to Scrapyd's list spiders endpoint.
def get_genes_for_hgnc_id(self, hgnc_symbol): headers = {"content-type": "application/json"} self.attempt = 0 ext = "/xrefs/symbol/homo_sapiens/{}".format(hgnc_symbol) r = self.ensembl_request(ext, headers) genes = [] for item in json.loads(r): if item["type"] == "gene": genes.append(item["id"]) return genes
obtain the ensembl gene IDs that correspond to a HGNC symbol
def to_python(self, reply, propagate=True): try: return reply['ok'] except KeyError: error = self.Error(*reply.get('nok') or ()) if propagate: raise error return error
Extracts the value out of the reply message. :param reply: In the case of a successful call the reply message will be:: {'ok': return_value, **default_fields} Therefore the method returns: return_value, **default_fields If the method raises an exception the reply message will be:: {'nok': [repr exc, str traceback], **default_fields} :keyword propagate - Propagate exceptions raised instead of returning a result representation of the error.
def set_ignore_interrupts(flag=True): log.info("setting ignore_interrupts to %r" % flag) state.ignore_interrupts = bool(flag)
turn off EINTR-raising from emulated syscalls on interruption by signals due to the nature of greenhouse's system call emulation, ``signal.siginterrupt`` can't be made to work with it. specifically, greenhouse can't differentiate between different signals. so this function toggles whether to restart for *ALL* or *NO* signals. :param flag: whether to turn EINTR exceptions off (``True``) or on (``False``) :type flag: bool
def run_pip_install(upgrade=0): command = 'pip install -r {0}'.format( settings.FAB_SETTING('SERVER_REQUIREMENTS_PATH')) if upgrade: command += ' --upgrade' run_workon(command)
Installs the requirement.txt file on the given server. Usage:: fab <server> run_pip_install fab <server> run_pip_install:upgrade=1 :param upgrade: If set to 1, the command will be executed with the ``--upgrade`` flag.
def values(self): def iter_values(): val = self._element.val if val is None: return for idx in range(val.ptCount_val): yield val.pt_v(idx) return tuple(iter_values())
Read-only. A sequence containing the float values for this series, in the order they appear on the chart.
def check_enough_space(dataset_local_dir, remote_fname, local_fname, max_disk_usage=0.9): storage_need = os.path.getsize(remote_fname) storage_total, storage_used = disk_usage(dataset_local_dir) return ((storage_used + storage_need) < (storage_total * max_disk_usage))
Check if the given local folder has enough space. Check if the given local folder has enough space to store the specified remote file. Parameters ---------- remote_fname : str Path to the remote file remote_fname : str Path to the local folder max_disk_usage : float Fraction indicating how much of the total space in the local folder can be used before the local cache must stop adding to it. Returns ------- output : boolean True if there is enough space to store the remote file.
def spaceless_pdf_plot_maker(array, filename, vmax=None, dpi=DEFAULT_DPI): if vmax is None: vmax = np.percentile(array, DEFAULT_SATURATION_THRESHOLD) plt.gca().set_axis_off() plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.figure() if SEABORN: sns.heatmap(array, vmax=vmax, cmap="Reds") else: plt.imshow(array, vmax=vmax, cmap="Reds", interpolation="none") plt.colorbar() plt.savefig(filename, bbox_inches="tight", pad_inches=0.0, dpi=dpi) plt.close()
Draw a pretty plot from an array A function that performs all the tedious matplotlib magic to draw a 2D array with as few parameters and as little whitespace as possible. Parameters ---------- array : array_like The input array to draw. filename : file, str or pathlib.Path The output image to save the array into. vmax : float, optional The default saturation threshold for the array. If set to None, the 80th percentile value of the array is chosen. Default is None. dpi : int, optional Dots per inch (DPI) of the output image. Default is 200.
def build(id=None, name=None, revision=None, temporary_build=False, timestamp_alignment=False, no_build_dependencies=False, keep_pod_on_failure=False, force_rebuild=False, rebuild_mode=common.REBUILD_MODES_DEFAULT): data = build_raw(id, name, revision, temporary_build, timestamp_alignment, no_build_dependencies, keep_pod_on_failure, force_rebuild, rebuild_mode) if data: return utils.format_json(data)
Trigger a BuildConfiguration by name or ID
def __populate_repositories_of_interest(self, username): user = self.github.get_user(username) self.user_starred_repositories.extend(user.get_starred()) if self.deep_dive: for following_user in user.get_following(): self.user_following_starred_repositories.extend( following_user.get_starred() )
Method to populate repositories which will be used to suggest repositories for the user. For this purpose we use two kinds of repositories. 1. Repositories starred by user him/herself. 2. Repositories starred by the users followed by the user. :param username: Username for the user for whom repositories are being suggested for.
def estimateKronCovariances(phenos,K1r=None,K1c=None,K2r=None,K2c=None,covs=None,Acovs=None,covar_type='lowrank_diag',rank=1): print(".. Training the backgrond covariance with a GP model") vc = VAR.CVarianceDecomposition(phenos) if K1r is not None: vc.addRandomEffect(K1r,covar_type=covar_type,rank=rank) if K2r is not None: vc.addRandomEffect(is_noise=True,K=K2r,covar_type=covar_type,rank=rank) for ic in range(len(Acovs)): vc.addFixedEffect(covs[ic],Acovs[ic]) start = time.time() conv = vc.findLocalOptimum(fast=True) assert conv, "CVariance Decomposition has not converged" time_el = time.time()-start print(("Background model trained in %.2f s" % time_el)) return vc
estimates the background covariance model before testing Args: phenos: [N x P] SP.array of P phenotypes for N individuals K1r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed K1c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed K2r: [N x N] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed K2c: [P x P] SP.array of LMM-covariance/kinship koefficients (optional) If not provided, then linear regression analysis is performed covs: list of SP.arrays holding covariates. Each covs[i] has one corresponding Acovs[i] Acovs: list of SP.arrays holding the phenotype design matrices for covariates. Each covs[i] has one corresponding Acovs[i]. covar_type: type of covaraince to use. Default 'freeform'. possible values are 'freeform': free form optimization, 'fixed': use a fixed matrix specified in covar_K0, 'diag': optimize a diagonal matrix, 'lowrank': optimize a low rank matrix. The rank of the lowrank part is specified in the variable rank, 'lowrank_id': optimize a low rank matrix plus the weight of a constant diagonal matrix. The rank of the lowrank part is specified in the variable rank, 'lowrank_diag': optimize a low rank matrix plus a free diagonal matrix. The rank of the lowrank part is specified in the variable rank, 'block': optimize the weight of a constant P x P block matrix of ones, 'block_id': optimize the weight of a constant P x P block matrix of ones plus the weight of a constant diagonal matrix, 'block_diag': optimize the weight of a constant P x P block matrix of ones plus a free diagonal matrix, rank: rank of a possible lowrank component (default 1) Returns: CVarianceDecomposition object
def longest_run_1d(arr): v, rl = rle_1d(arr)[:2] return np.where(v, rl, 0).max()
Return the length of the longest consecutive run of identical values. Parameters ---------- arr : bool array Input array Returns ------- int Length of longest run.
def copy_contents_to(self, destination): logger.info("Copying contents of %s to %s" % (self, destination)) target = Folder(destination) target.make() self._create_target_tree(target) dir_util.copy_tree(self.path, unicode(target)) return target
Copies the contents of this directory to the given destination. Returns a Folder object that represents the moved directory.
def remove_label(self, label, relabel=False): self.remove_labels(label, relabel=relabel)
Remove the label number. The removed label is assigned a value of zero (i.e., background). Parameters ---------- label : int The label number to remove. relabel : bool, optional If `True`, then the segmentation image will be relabeled such that the labels are in consecutive order starting from 1. Examples -------- >>> from photutils import SegmentationImage >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5) >>> segm.data array([[1, 1, 0, 0, 4, 4], [0, 0, 0, 0, 0, 4], [0, 0, 3, 3, 0, 0], [7, 0, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0], [7, 7, 0, 0, 0, 0]]) >>> segm = SegmentationImage([[1, 1, 0, 0, 4, 4], ... [0, 0, 0, 0, 0, 4], ... [0, 0, 3, 3, 0, 0], ... [7, 0, 0, 0, 0, 5], ... [7, 7, 0, 5, 5, 5], ... [7, 7, 0, 0, 5, 5]]) >>> segm.remove_label(label=5, relabel=True) >>> segm.data array([[1, 1, 0, 0, 3, 3], [0, 0, 0, 0, 0, 3], [0, 0, 2, 2, 0, 0], [4, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0]])
def users(self): return sa.orm.relationship( "User", secondary="users_resources_permissions", passive_deletes=True, passive_updates=True, )
returns all users that have permissions for this resource
def queue_stats(self, queue, tags): for mname, pymqi_value in iteritems(metrics.queue_metrics()): try: mname = '{}.queue.{}'.format(self.METRIC_PREFIX, mname) m = queue.inquire(pymqi_value) self.gauge(mname, m, tags=tags) except pymqi.Error as e: self.warning("Error getting queue stats for {}: {}".format(queue, e)) for mname, func in iteritems(metrics.queue_metrics_functions()): try: mname = '{}.queue.{}'.format(self.METRIC_PREFIX, mname) m = func(queue) self.gauge(mname, m, tags=tags) except pymqi.Error as e: self.warning("Error getting queue stats for {}: {}".format(queue, e))
Grab stats from queues
def const_shuffle(arr, seed=23980): old_seed = np.random.seed() np.random.seed(seed) np.random.shuffle(arr) np.random.seed(old_seed)
Shuffle an array in-place with a fixed seed.
def to_dict(self): return {'access_key': self.access_key, 'secret_key': self.secret_key, 'session_token': self.session_token, 'expiration': self.expiration, 'request_id': self.request_id}
Return a Python dict containing the important information about this Session Token.
def add_user(self, username, email, directoryId=1, password=None, fullname=None, notify=False, active=True, ignore_existing=False, application_keys=None, ): if not fullname: fullname = username url = self._options['server'] + '/rest/api/latest/user' x = OrderedDict() x['displayName'] = fullname x['emailAddress'] = email x['name'] = username if password: x['password'] = password if notify: x['notification'] = 'True' if application_keys is not None: x['applicationKeys'] = application_keys payload = json.dumps(x) try: self._session.post(url, data=payload) except JIRAError as e: err = e.response.json()['errors'] if 'username' in err and err['username'] == 'A user with that username already exists.' and ignore_existing: return True raise e return True
Create a new JIRA user. :param username: the username of the new user :type username: str :param email: email address of the new user :type email: str :param directoryId: The directory ID the new user should be a part of (Default: 1) :type directoryId: int :param password: Optional, the password for the new user :type password: Optional[str] :param fullname: Optional, the full name of the new user :type fullname: Optional[str] :param notify: Whether or not to send a notification to the new user. (Default: False) :type notify: bool :param active: Whether or not to make the new user active upon creation. (Default: True) :type active: bool :param ignore_existing: Whether or not to ignore and existing user. (Default: False) :type ignore_existing: bool :param applicationKeys: Keys of products user should have access to :type applicationKeys: Optional[list] :return: Whether or not the user creation was successful. :rtype: bool :raises JIRAError: If username already exists and `ignore_existing` has not been set to `True`.