Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
375,800
def _load_user_dn(self): if self._using_simple_bind_mode(): self._user_dn = self._construct_simple_user_dn() else: if self.settings.CACHE_TIMEOUT > 0: cache_key = valid_cache_key( "django_auth_ldap.user_dn.{}".format(self._username) ) self._user_dn = cache.get_or_set( cache_key, self._search_for_user_dn, self.settings.CACHE_TIMEOUT ) else: self._user_dn = self._search_for_user_dn()
Populates self._user_dn with the distinguished name of our user. This will either construct the DN from a template in AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it. If we have to search, we'll cache the DN.
375,801
def dict_of_lists_add(dictionary, key, value): list_objs = dictionary.get(key, list()) list_objs.append(value) dictionary[key] = list_objs
Add value to a list in a dictionary by key Args: dictionary (DictUpperBound): Dictionary to which to add values key (Any): Key within dictionary value (Any): Value to add to list in dictionary Returns: None
375,802
def check(self, func=None, name=None): if func is None: return functools.partial(self.check, name=name) if name is None: name = func.__name__ self.logger.info(, name) @functools.wraps(func) def decorated_function(*args, **kwargs): self.logger.info(, name) return func(*args, **kwargs) self.checks[name] = decorated_function return decorated_function
A decorator to register a new Dockerflow check to be run when the /__heartbeat__ endpoint is called., e.g.:: from dockerflow.flask import checks @dockerflow.check def storage_reachable(): try: acme.storage.ping() except SlowConnectionException as exc: return [checks.Warning(exc.msg, id='acme.health.0002')] except StorageException as exc: return [checks.Error(exc.msg, id='acme.health.0001')] or using a custom name:: @dockerflow.check(name='acme-storage-check) def storage_reachable(): # ...
375,803
def generate_daterange(report): metadata = report["report_metadata"] begin_date = human_timestamp_to_datetime(metadata["begin_date"]) end_date = human_timestamp_to_datetime(metadata["end_date"]) begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S") end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S") date_range = [begin_date_human, end_date_human] logger.debug("date_range is {}".format(date_range)) return date_range
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS based on begin and end dates for easier parsing in Kibana. Move to utils to avoid duplication w/ elastic?
375,804
def metastable_sets(self): res = [] assignment = self.metastable_assignment for i in range(self.m): res.append(np.where(assignment == i)[0]) return res
Crisp clustering using PCCA. This is only recommended for visualization purposes. You *cannot* compute any actual quantity of the coarse-grained kinetics without employing the fuzzy memberships! Returns ------- A list of length equal to metastable states. Each element is an array with microstate indexes contained in it
375,805
async def dict(self, full): node = await self.open(full) return await HiveDict.anit(self, node)
Open a HiveDict at the given full path.
375,806
def retry(self): self.state = STATE_STARTING self.loop.call_later(RETRY_TIMER, self.start) _LOGGER.debug(, RETRY_TIMER)
Retry to connect to deCONZ.
375,807
def get_histories_over_repetitions(self, exp, tags, aggregate): params = self.get_params(exp) if tags == : tags = self.get_history(exp, 0, ).keys() if not hasattr(tags, ): tags = [tags] results = {} for tag in tags: histories = zeros((params[], params[])) skipped = [] for i in range(params[]): try: histories[i, :] = self.get_history(exp, i, tag) except ValueError: h = self.get_history(exp, i, tag) if len(h) == 0: print(%(i, params[])) skipped.append(i) elif len(h) > params[]: print(%(i, len(h), params[])) h = h[:params[]] histories[i,:] = h elif len(h) < params[]: print(%(i, len(h), params[])) params[] = len(h) histories = histories[:,:params[]] histories[i, :] = h histories = delete(histories, skipped, axis=0) params[] -= len(skipped) aggregated = zeros(params[]) for i in range(params[]): aggregated[i] = aggregate(histories[:, i]) if len(tags) == 1: return aggregated else: results[tag] = aggregated return results
this function gets all histories of all repetitions using get_history() on the given tag(s), and then applies the function given by 'aggregate' to all corresponding values in each history over all iterations. Typical aggregate functions could be 'mean' or 'max'.
375,808
def parse(self): try: if not os.path.getsize(self.ns.pathname): self.job.LOG.warn("Ignoring 0-byte metafile " % (self.ns.pathname,)) return self.metadata = metafile.checked_open(self.ns.pathname) except EnvironmentError as exc: self.job.LOG.error("Can%s%s%s%s%s%s' already added to client" % (self.ns.info_hash, name)) return return True
Parse metafile and check pre-conditions.
375,809
def from_keras_log(csv_path, output_dir_path, **kwargs): _from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
Plot accuracy and loss from a Keras CSV log. Args: csv_path: The path to the CSV log with the actual data. output_dir_path: The path to the directory where the resultings plots should end up.
375,810
def content_written(generator, content): url = "%s/%s" % (generator.settings.get(, ), content.url) make_posts(generator, content.metadata, url)
create a url and call make posts (which has less information)
375,811
def client_config(path, env_var=, defaults=None): /etc/salt/master if defaults is None: defaults = DEFAULT_MASTER_OPTS.copy() xdg_dir = salt.utils.xdg.xdg_config_dir() if os.path.isdir(xdg_dir): client_config_dir = xdg_dir saltrc_config_file = else: client_config_dir = os.path.expanduser() saltrc_config_file = opts = { : defaults.get( , os.path.join(client_config_dir, ) ) } if opts[] == : opts[] = if not in opts: opts[] = .format( ip=salt.utils.zeromq.ip_bracket(opts[]), port=opts[] ) _validate_opts(opts) return opts
Load Master configuration data Usage: .. code-block:: python import salt.config master_opts = salt.config.client_config('/etc/salt/master') Returns a dictionary of the Salt Master configuration file with necessary options needed to communicate with a locally-running Salt Master daemon. This function searches for client specific configurations and adds them to the data from the master configuration. This is useful for master-side operations like :py:class:`~salt.client.LocalClient`.
375,812
def find_or_create_role(self, name, **kwargs): kwargs["name"] = name return self.find_role(name) or self.create_role(**kwargs)
Returns a role matching the given name or creates it with any additionally provided parameters.
375,813
def _populate_alternate_kwargs(kwargs): resource_namespace = kwargs[] resource_type = kwargs.get(.format(kwargs[])) or kwargs[] resource_name = kwargs.get(.format(kwargs[])) or kwargs[] _get_parents_from_parts(kwargs) kwargs[] = resource_namespace kwargs[] = resource_type kwargs[] = resource_name return kwargs
Translates the parsed arguments into a format used by generic ARM commands such as the resource and lock commands.
375,814
def organizations(self, user, include=None): return self._query_zendesk(self.endpoint.organizations, , id=user, include=include)
Retrieve the organizations for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id
375,815
def incoming(self, packet): console_text = packet.data.decode() self.receivedChar.call(console_text)
Callback for data received from the copter.
375,816
async def info(self, token): token_id = extract_attr(token, keys=["ID"]) response = await self._api.get("/v1/acl/info", token_id) meta = extract_meta(response.headers) try: result = decode_token(response.body[0]) except IndexError: raise NotFound(response.body, meta=meta) return consul(result, meta=meta)
Queries the policy of a given token. Parameters: token (ObjectID): Token ID Returns: ObjectMeta: where value is token Raises: NotFound: It returns a body like this:: { "CreateIndex": 3, "ModifyIndex": 3, "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05", "Name": "Client Token", "Type": "client", "Rules": { "key": { "": { "policy": "read" }, "private/": { "policy": "deny" } } } }
375,817
def check_status(self, **kwargs): for work in self: work.check_status() if kwargs.pop("show", False): self.show_status(**kwargs)
Check the status of the works in self. Args: show: True to show the status of the flow. kwargs: keyword arguments passed to show_status
375,818
def tags(self, resource_id=None): resource = self.copy() resource._request_entity = resource._request_uri = .format(resource._request_uri) if resource_id is not None: resource._request_uri = .format( resource._request_uri, self.tcex.safetag(resource_id) ) return resource
Tag endpoint for this resource with optional tag name. This method will set the resource endpoint for working with Tags. The HTTP GET method will return all tags applied to this resource or if a resource id (tag name) is provided it will return the provided tag if it has been applied, which could be useful to verify a tag is applied. The provided resource_id (tag) can be applied to this resource using the HTTP POST method. The HTTP DELETE method will remove the provided tag from this resource. **Example Endpoints URI's** +--------------+------------------------------------------------------------+ | HTTP Method | API Endpoint URI's | +==============+============================================================+ | GET | /v2/groups/{resourceType}/{uniqueId}/tags | +--------------+------------------------------------------------------------+ | GET | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} | +--------------+------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/tags | +--------------+------------------------------------------------------------+ | GET | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} | +--------------+------------------------------------------------------------+ | DELETE | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} | +--------------+------------------------------------------------------------+ | DELETE | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} | +--------------+------------------------------------------------------------+ | POST | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} | +--------------+------------------------------------------------------------+ | POST | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} | +--------------+------------------------------------------------------------+ Args: resource_id (Optional [string]): The resource id (tag name).
375,819
def local_services(self): if not self._loop.inside_loop(): self._state_lock.acquire() try: return sorted([(index, name) for index, name in self._name_map.items()], key=lambda element: element[0]) finally: if not self._loop.inside_loop(): self._state_lock.release()
Get a list of id, name pairs for all of the known synced services. This method is safe to call outside of the background event loop without any race condition. Internally it uses a thread-safe mutex to protect the local copies of supervisor data and ensure that it cannot change while this method is iterating over it. Returns: list (id, name): A list of tuples with id and service name sorted by id from low to high
375,820
def check(projects): log = logging.getLogger() log.info(.format(len(projects))) print() blockers = dependencies.blockers(projects) print() for line in message(blockers): print(line) print() for line in pprint_blockers(blockers): print(, line) return len(blockers) == 0
Check the specified projects for Python 3 compatibility.
375,821
def data_from_file(self, file, apple_fix=False): with open(file, mode=) as f: content = f.read() if not content: raise IOError("File %f is not readable or is empty!" % file) return self.decode(content, apple_fix=apple_fix)
Read iCal data from file. :param file: file to read :param apple_fix: fix wrong Apple tzdata in iCal :return: decoded (and fixed) iCal data
375,822
def unwrap(self, dt): expires = self._expires if expires is AlwaysExpired or expires < dt: raise Expired(self._expires) return self._value
Get the cached value. Returns ------- value : object The cached value. Raises ------ Expired Raised when `dt` is greater than self.expires.
375,823
def receive_message(self, msg): _LOGGER.debug() if hasattr(msg, ) and msg.isack: _LOGGER.debug() if self._send_msg_lock.locked(): self._send_msg_lock.release() callbacks = self._message_callbacks.get_callbacks_from_message(msg) _LOGGER.debug(, len(callbacks), msg) for callback in callbacks: _LOGGER.debug(, callback) self._plm.loop.call_soon(callback, msg) self._last_communication_received = datetime.datetime.now() _LOGGER.debug()
Receive a message sent to this device.
375,824
def good(txt): print("%s sys.stdout.flush()
Print, emphasized 'good', the given 'txt' message
375,825
def fitted(self, fid=0): self._checkid(fid) return not (self._fitids[fid]["fit"] > 0 or self._fitids[fid]["fit"] < -0.001)
Test if enough Levenberg-Marquardt loops have been done. It returns True if no improvement possible. :param fid: the id of the sub-fitter (numerical)
375,826
def update_policy(self,defaultHeaders): if self.inputs is not None: for k,v in defaultHeaders.items(): if k not in self.inputs: self.inputs[k] = v if k == : self.inputs[k] = self.inputs[k] + defaultHeaders[k] return self.inputs else: return self.inputs
rewrite update policy so that additional pins are added and not overwritten
375,827
def copyh5(inh5, outh5): if not isinstance(inh5, h5py.Group): inh5 = h5py.File(inh5, mode="r") if outh5 is None: h5kwargs = {"name": "qpimage{}.h5".format(QPImage._instances), "driver": "core", "backing_store": False, "mode": "a"} outh5 = h5py.File(**h5kwargs) return_h5obj = True QPImage._instances += 1 elif not isinstance(outh5, h5py.Group): outh5 = h5py.File(outh5, mode="w") return_h5obj = False else: return_h5obj = True for key in inh5: if key in outh5: del outh5[key] if isinstance(inh5[key], h5py.Group): outh5.create_group(key) copyh5(inh5[key], outh5[key]) else: dset = write_image_dataset(group=outh5, key=key, data=inh5[key][:], h5dtype=inh5[key].dtype) dset.attrs.update(inh5[key].attrs) outh5.attrs.update(inh5.attrs) if return_h5obj: return outh5 else: fn = outh5.filename outh5.flush() outh5.close() return fn
Recursively copy all hdf5 data from one group to another Data from links is copied. Parameters ---------- inh5: str, h5py.File, or h5py.Group The input hdf5 data. This can be either a file name or an hdf5 object. outh5: str, h5py.File, h5py.Group, or None The output hdf5 data. This can be either a file name or an hdf5 object. If set to `None`, a new hdf5 object is created in memory. Notes ----- All data in outh5 are overridden by the inh5 data.
375,828
def issue_type_by_name(self, name): issue_types = self.issue_types() try: issue_type = [it for it in issue_types if it.name == name][0] except IndexError: raise KeyError("Issue type is unknown." % name) return issue_type
:param name: Name of the issue type :type name: str :rtype: IssueType
375,829
def add_waveform(self, waveform): if not isinstance(waveform, PlotWaveform): self.log_exc(u"waveform must be an instance of PlotWaveform", None, True, TypeError) self.waveform = waveform self.log(u"Added waveform")
Add a waveform to the plot. :param waveform: the waveform to be added :type waveform: :class:`~aeneas.plotter.PlotWaveform` :raises: TypeError: if ``waveform`` is not an instance of :class:`~aeneas.plotter.PlotWaveform`
375,830
def get_value(self, key): for title in _TITLES.get(key, ()) + (key,): try: value = [entry[][] for entry in self.data[] if entry[] == title][0] return value except IndexError: pass return None
Extract a value for a given key.
375,831
def tip_zscores(a): weighted = a * a.mean(axis=0) scores = weighted.sum(axis=1) zscores = (scores - scores.mean()) / scores.std() return zscores
Calculates the "target identification from profiles" (TIP) zscores from Cheng et al. 2001, Bioinformatics 27(23):3221-3227. :param a: NumPy array, where each row is the signal for a feature.
375,832
def mbar_W_nk(u_kn, N_k, f_k): return np.exp(mbar_log_W_nk(u_kn, N_k, f_k))
Calculate the weight matrix. Parameters ---------- u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float' The reduced potential energies, i.e. -log unnormalized probabilities N_k : np.ndarray, shape=(n_states), dtype='int' The number of samples in each state f_k : np.ndarray, shape=(n_states), dtype='float' The reduced free energies of each state Returns ------- W_nk : np.ndarray, dtype='float', shape=(n_samples, n_states) The normalized weights. Notes ----- Equation (9) in JCP MBAR paper.
375,833
def current_line_num(self): if self.is_optimized_out(): return None f_trace = self.field() if long(f_trace) != 0: return self.f_lineno else: return self.co.addr2line(self.f_lasti)
Get current line number as an integer (1-based) Translated from PyFrame_GetLineNumber and PyCode_Addr2Line See Objects/lnotab_notes.txt
375,834
def run_object_query(client, base_object_query, start_record, limit_to, verbose=False): if verbose: print("[start: %d limit: %d]" % (start_record, limit_to)) start = datetime.datetime.now() result = client.execute_object_query( object_query=base_object_query, start_record=start_record, limit_to=limit_to) end = datetime.datetime.now() if verbose: print("[%s - %s]" % (start, end)) return result
inline method to take advantage of retry
375,835
def _raise_missing_antenna_errors(ant_uvw, max_err): problems = np.nonzero(np.add.reduce(np.isnan(ant_uvw), axis=2)) problem_str = [] for c, a in zip(*problems): problem_str.append("[chunk %d antenna %d]" % (c, a)) if len(problem_str) >= max_err: break if len(problem_str) == 0: return problem_str = ["Antenna were missing"] + problem_str raise AntennaMissingError(.join(problem_str))
Raises an informative error for missing antenna
375,836
def time_logger(name): start_time = time.time() yield end_time = time.time() total_time = end_time - start_time logging.info("%s; time: %ss", name, total_time)
This logs the time usage of a code block
375,837
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=False): if self.params_initialized and not force_init: return assert self.binded, self._curr_module.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init, allow_extra=allow_extra) self._params_dirty = False self.params_initialized = True
Initializes parameters. Parameters ---------- initializer : Initializer arg_params : dict Defaults to ``None``. Existing parameters. This has higher priority than `initializer`. aux_params : dict Defaults to ``None``. Existing auxiliary states. This has higher priority than `initializer`. allow_missing : bool Allow missing values in `arg_params` and `aux_params` (if not ``None``). In this case, missing values will be filled with `initializer`. force_init : bool Defaults to ``False``. allow_extra : boolean, optional Whether allow extra parameters that are not needed by symbol. If this is True, no error will be thrown when arg_params or aux_params contain extra parameters that is not needed by the executor.
375,838
def add_to_configs(self, configs): if len(configs) == 0: return None if self.configs is None: self.configs = np.atleast_2d(configs) else: configs = np.atleast_2d(configs) self.configs = np.vstack((self.configs, configs)) return self.configs
Add one or more measurement configurations to the stored configurations Parameters ---------- configs: list or numpy.ndarray list or array of configurations Returns ------- configs: Kx4 numpy.ndarray array holding all configurations of this instance
375,839
def weight_statistics(self): all_weights = [d.get(, None) for u, v, d in self.graph.edges(data=True)] stats = describe(all_weights, nan_policy=) return { : all_weights, : stats.minmax[0], : stats.minmax[1], : stats.mean, : stats.variance }
Extract a statistical summary of edge weights present in the graph. :return: A dict with an 'all_weights' list, 'minimum', 'maximum', 'median', 'mean', 'std_dev'
375,840
def purge_module(self, module_name): containers = self.config["py3_config"][".module_groups"] containers_to_update = set() if module_name in containers: containers_to_update.update(set(containers[module_name])) for container in containers_to_update: try: self.modules[container].module_class.items.remove(module_name) except ValueError: pass
A module has been removed e.g. a module that had an error. We need to find any containers and remove the module from them.
375,841
def generate(self, z_mu=None): if z_mu is None: z_mu = np.random.normal(size=self.network_architecture["n_z"]) return self.sess.run(self.x_reconstr_mean, feed_dict={self.z: z_mu})
Generate data by sampling from latent space. If z_mu is not None, data for this point in latent space is generated. Otherwise, z_mu is drawn from prior in latent space.
375,842
def choi_matrix(pauli_tm, basis): if not basis.is_orthonormal(): raise ValueError("Need an orthonormal operator basis.") if not all((is_hermitian(op) for op in basis.ops)): raise ValueError("Need an operator basis of hermitian operators.") sbasis = basis.super_basis() D = basis.dim choi = sum((pauli_tm[jj, kk] * sbasis.ops[jj + kk * D] for jj in range(D) for kk in range(D))) choi.superrep = CHOI return choi
Compute the Choi matrix for a quantum process from its Pauli Transfer Matrix. This agrees with the definition in `Chow et al. <https://doi.org/10.1103/PhysRevLett.109.060501>`_ except for a different overall normalization. Our normalization agrees with that of qutip. :param numpy.ndarray pauli_tm: The Pauli Transfer Matrix as 2d-array. :param OperatorBasis basis: The operator basis, typically products of normalized Paulis. :return: The Choi matrix as qutip.Qobj. :rtype: qutip.Qobj
375,843
def validate(self, data): e = self._error try: if self._pattern.search(data): return data else: raise SchemaError("%r does not match %r" % (self, data), e) except TypeError: raise SchemaError("%r is not string nor buffer" % data, e)
Validated data using defined regex. :param data: data to be validated :return: return validated data.
375,844
def get_dataframe_from_data(data): if isinstance(data, str): if data.endswith(".csv"): dataframe = pd.read_csv(data) else: msg_1 = "data = {} is of unknown file type." msg_2 = " Please pass path to csv." raise ValueError(msg_1.format(data) + msg_2) elif isinstance(data, pd.DataFrame): dataframe = data else: msg_1 = "type(data) = {} is an invalid type." msg_2 = " Please pass pandas dataframe or path to csv." raise TypeError(msg_1.format(type(data)) + msg_2) return dataframe
Parameters ---------- data : string or pandas dataframe. If string, data should be an absolute or relative path to a CSV file containing the long format data for this choice model. Note long format has one row per available alternative for each observation. If pandas dataframe, the dataframe should be the long format data for the choice model. Returns ------- dataframe : pandas dataframe of the long format data for the choice model.
375,845
def _onArgument(self, name, annotation): self.objectsStack[-1].arguments.append(Argument(name, annotation))
Memorizes a function argument
375,846
def update_state(self, slots: Union[List[Tuple[str, Any]], Dict[str, Any]]) -> : pass
Updates dialogue state with new ``slots``, calculates features. Returns: Tracker: .
375,847
def set_data(self, data=None, **kwargs): if data is None: pos = None else: if isinstance(data, tuple): pos = np.array(data).T.astype(np.float32) else: pos = np.atleast_1d(data).astype(np.float32) if pos.ndim == 1: pos = pos[:, np.newaxis] elif pos.ndim > 2: raise ValueError() if pos.size == 0: pos = self._line.pos if len(kwargs) == 0: raise TypeError("neither line points nor line properties" "are provided") elif pos.shape[1] == 1: x = np.arange(pos.shape[0], dtype=np.float32)[:, np.newaxis] pos = np.concatenate((x, pos), axis=1) elif pos.shape[1] > 3: raise TypeError("Too many coordinates given (%s; max is 3)." % pos.shape[1]) line_kwargs = {} for k in self._line_kwargs: if k in kwargs: k_ = self._kw_trans[k] if k in self._kw_trans else k line_kwargs[k] = kwargs.pop(k_) if pos is not None or len(line_kwargs) > 0: self._line.set_data(pos=pos, **line_kwargs) marker_kwargs = {} for k in self._marker_kwargs: if k in kwargs: k_ = self._kw_trans[k] if k in self._kw_trans else k marker_kwargs[k_] = kwargs.pop(k) if pos is not None or len(marker_kwargs) > 0: self._markers.set_data(pos=pos, **marker_kwargs) if len(kwargs) > 0: raise TypeError("Invalid keyword arguments: %s" % kwargs.keys())
Set the line data Parameters ---------- data : array-like The data. **kwargs : dict Keywoard arguments to pass to MarkerVisual and LineVisal.
375,848
def _read(self, command, future): response = self._reader.gets() if response is not False: if isinstance(response, hiredis.ReplyError): if response.args[0].startswith(): self._on_cluster_data_moved(response.args[0], command, future) elif response.args[0].startswith(): self._on_read_only_error(command, future) else: future.set_exception(exceptions.RedisError(response)) elif command.callback is not None: future.set_result(command.callback(response)) elif command.expectation is not None: self._eval_expectation(command, response, future) else: future.set_result(response) else: def on_data(data): self._reader.feed(data) self._read(command, future) command.connection.read(on_data)
Invoked when a command is executed to read and parse its results. It will loop on the IOLoop until the response is complete and then set the value of the response in the execution future. :param command: The command that was being executed :type command: tredis.client.Command :param future: The execution future :type future: tornado.concurrent.Future
375,849
def _try_assign_utc_time(self, raw_time, time_base): if raw_time != IOTileEvent.InvalidRawTime and (raw_time & (1 << 31)): y2k_offset = self.raw_time ^ (1 << 31) return self._Y2KReference + datetime.timedelta(seconds=y2k_offset) if time_base is not None: return time_base + datetime.timedelta(seconds=raw_time) return None
Try to assign a UTC time to this reading.
375,850
def set_obs_angle(self, theta_rad): self.in_vals[IN_VAL_THETA] = theta_rad * 180 / np.pi return self
Set the observer angle relative to the field. **Call signature** *theta_rad* The angle between the ray path and the local magnetic field, in radians. Returns *self* for convenience in chaining.
375,851
def _get_last_worker_died(self): for service_id in list(self._running_services.keys()): processes = list(self._running_services[service_id].items()) for process, worker_id in processes: if not process.is_alive(): self._run_hooks(, service_id, worker_id, process.exitcode) if process.exitcode < 0: sig = _utils.signal_to_name(process.exitcode) LOG.info(, dict(pid=process.pid, sig=sig)) else: LOG.info(, dict(pid=process.pid, code=process.exitcode)) del self._running_services[service_id][process] return service_id, worker_id
Return the last died worker information or None
375,852
def match_date(self, value, strict=False): value = stringify(value) try: parse(value) except Exception: self.shout(, strict, value)
if value is a date
375,853
def get_symlink_luid(): symlink_luid = privilege.LUID() res = privilege.LookupPrivilegeValue( None, "SeCreateSymbolicLinkPrivilege", symlink_luid) if not res > 0: raise RuntimeError("Couldn't lookup privilege value") return symlink_luid
Get the LUID for the SeCreateSymbolicLinkPrivilege
375,854
def safe_type(self, data, tree): if not isinstance(data, list): name = self.__class__.__name__ msg = "did not pass validation against callable: %s" % name reason = % safe_repr(data) raise Invalid(self.schema, tree, reason=reason, pair=, msg=msg)
Make sure that the incoming data complies with the class type we are expecting it to be. In this case, classes that inherit from this base class expect data to be of type ``list``.
375,855
def lstlti(x, n, array): array = stypes.toIntVector(array) x = ctypes.c_int(x) n = ctypes.c_int(n) return libspice.lstlti_c(x, n, array)
Given a number x and an array of non-decreasing int, find the index of the largest array element less than x. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlti_c.html :param x: Value to search against :type x: int :param n: Number elements in array :type n: int :param array: Array of possible lower bounds :type array: list :return: index of the last element of array that is less than x. :rtype: int
375,856
def runningMedian(seq, M): seq = iter(seq) s = [] m = M // 2 s = [item for item in islice(seq,M)] d = deque(s) median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5 s.sort() medians = [median()] for item in seq: old = d.popleft() d.append(item) del s[bisect_left(s, old)] insort(s, item) medians.append(median()) return medians
Purpose: Find the median for the points in a sliding window (odd number in size) as it is moved from left to right by one point at a time. Inputs: seq -- list containing items for which a running median (in a sliding window) is to be calculated M -- number of items in window (window size) -- must be an integer > 1 Otputs: medians -- list of medians with size N - M + 1 Note: 1. The median of a finite list of numbers is the "center" value when this list is sorted in ascending order. 2. If M is an even number the two elements in the window that are close to the center are averaged to give the median (this is not by definition)
375,857
def update_checkplot_objectinfo(cpf, fast_mode=False, findercmap=, finderconvolve=None, deredden_object=True, custom_bandpasses=None, gaia_submit_timeout=10.0, gaia_submit_tries=3, gaia_max_timeout=180.0, gaia_mirror=None, complete_query_later=True, lclistpkl=None, nbrradiusarcsec=60.0, maxnumneighbors=5, plotdpi=100, findercachedir=, verbose=True): s objectinfo dict. custom_bandpasses : dict This is a dict used to provide custom bandpass definitions for any magnitude measurements in the objectinfo dict that are not automatically recognized by the `varclass.starfeatures.color_features` function. See its docstring for details on the required format. gaia_submit_timeout : float Sets the timeout in seconds to use when submitting a request to look up the objects information. If `fast_mode` is set, this is ignored, and the services will be contacted only once (meaning that a failure to respond will be silently ignored and no GAIA data will be added to the checkplots information. Note that if `fast_mode` is set, this is ignored. gaia_mirror : str This sets the GAIA mirror to use. This is a key in the :py:data:`astrobase.services.gaia.GAIA_URLS` dict which defines the URLs to hit for each mirror. complete_query_later : bool If this is True, saves the state of GAIA queries that are not yet complete when `gaia_max_timeout` is reached while waiting for the GAIA service to respond to our request. A later call for GAIA info on the same object will attempt to pick up the results from the existing query if it cpd = _read_checkplot_picklefile(cpf) if cpd[][] is not None: objecttags = cpd[][][::] else: objecttags = None varinfo = deepcopy(cpd[]) if in cpd and cpd[] is not None: comments = cpd[][::] else: comments = None newcpd = _pkl_finder_objectinfo(cpd[], varinfo, findercmap, finderconvolve, cpd[], cpd[], cpd[], fast_mode=fast_mode, deredden_object=deredden_object, custom_bandpasses=custom_bandpasses, gaia_submit_timeout=gaia_submit_timeout, gaia_submit_tries=gaia_submit_tries, gaia_max_timeout=gaia_max_timeout, gaia_mirror=gaia_mirror, complete_query_later=complete_query_later, lclistpkl=lclistpkl, nbrradiusarcsec=nbrradiusarcsec, maxnumneighbors=maxnumneighbors, plotdpi=plotdpi, findercachedir=findercachedir, verbose=verbose) if (( in newcpd[][] or ( in newcpd[] and newcpd[][] is None)) and in cpd[][]): newcpd[][] = deepcopy( cpd[][] ) if in cpd[]: newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) newcpd[][] = deepcopy( cpd[][] ) if (not np.isfinite(newcpd[][]) and np.isfinite(cpd[][])): newcpd[][] = deepcopy( cpd[][] ) if (not np.isfinite(newcpd[][]) and np.isfinite(cpd[][])): newcpd[][] = deepcopy( cpd[][] ) if (not np.isfinite(newcpd[][]) and np.isfinite(cpd[][])): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) if (newcpd[][] is None and cpd[][] is not None): newcpd[][] = deepcopy( cpd[][] ) newcpf = _write_checkplot_picklefile(cpd, outfile=cpf) return newcpf
This updates a checkplot objectinfo dict. Useful in cases where a previous round of GAIA/finderchart/external catalog acquisition failed. This will preserve the following keys in the checkplot if they exist:: comments varinfo objectinfo.objecttags Parameters ---------- cpf : str The path to the checkplot pickle to update. fast_mode : bool or float This runs the external catalog operations in a "fast" mode, with short timeouts and not trying to hit external catalogs that take a long time to respond. See the docstring for :py:func:`astrobase.checkplot.pkl_utils._pkl_finder_objectinfo` for details on how this works. If this is True, will run in "fast" mode with default timeouts (5 seconds in most cases). If this is a float, will run in "fast" mode with the provided timeout value in seconds. findercmap : str or matplotlib.cm.ColorMap object The Colormap object to use for the finder chart image. finderconvolve : astropy.convolution.Kernel object or None If not None, the Kernel object to use for convolving the finder image. deredden_objects : bool If this is True, will use the 2MASS DUST service to get extinction coefficients in various bands, and then try to deredden the magnitudes and colors of the object already present in the checkplot's objectinfo dict. custom_bandpasses : dict This is a dict used to provide custom bandpass definitions for any magnitude measurements in the objectinfo dict that are not automatically recognized by the `varclass.starfeatures.color_features` function. See its docstring for details on the required format. gaia_submit_timeout : float Sets the timeout in seconds to use when submitting a request to look up the object's information to the GAIA service. Note that if `fast_mode` is set, this is ignored. gaia_submit_tries : int Sets the maximum number of times the GAIA services will be contacted to obtain this object's information. If `fast_mode` is set, this is ignored, and the services will be contacted only once (meaning that a failure to respond will be silently ignored and no GAIA data will be added to the checkplot's objectinfo dict). gaia_max_timeout : float Sets the timeout in seconds to use when waiting for the GAIA service to respond to our request for the object's information. Note that if `fast_mode` is set, this is ignored. gaia_mirror : str This sets the GAIA mirror to use. This is a key in the :py:data:`astrobase.services.gaia.GAIA_URLS` dict which defines the URLs to hit for each mirror. complete_query_later : bool If this is True, saves the state of GAIA queries that are not yet complete when `gaia_max_timeout` is reached while waiting for the GAIA service to respond to our request. A later call for GAIA info on the same object will attempt to pick up the results from the existing query if it's completed. If `fast_mode` is True, this is ignored. lclistpkl : dict or str If this is provided, must be a dict resulting from reading a catalog produced by the `lcproc.catalogs.make_lclist` function or a str path pointing to the pickle file produced by that function. This catalog is used to find neighbors of the current object in the current light curve collection. Looking at neighbors of the object within the radius specified by `nbrradiusarcsec` is useful for light curves produced by instruments that have a large pixel scale, so are susceptible to blending of variability and potential confusion of neighbor variability with that of the actual object being looked at. If this is None, no neighbor lookups will be performed. nbrradiusarcsec : float The radius in arcseconds to use for a search conducted around the coordinates of this object to look for any potential confusion and blending of variability amplitude caused by their proximity. maxnumneighbors : int The maximum number of neighbors that will have their light curves and magnitudes noted in this checkplot as potential blends with the target object. plotdpi : int The resolution in DPI of the plots to generate in this function (e.g. the finder chart, etc.) findercachedir : str The path to the astrobase cache directory for finder chart downloads from the NASA SkyView service. verbose : bool If True, will indicate progress and warn about potential problems. Returns ------- str Path to the updated checkplot pickle file.
375,858
def extract_links(bs4): unique_links = list(set([anchor[] for anchor in bs4.select() if anchor.has_attr()])) unique_links = [link for link in unique_links if link != ] return [convert_invalid_url(link) for link in unique_links]
Extracting links from BeautifulSoup object :param bs4: `BeautifulSoup` :return: `list` List of links
375,859
def filter_none(list_of_points): remove_elementnone = filter(lambda p: p is not None, list_of_points) remove_sublistnone = filter(lambda p: not contains_none(p), remove_elementnone) return list(remove_sublistnone)
:param list_of_points: :return: list_of_points with None's removed
375,860
def formatmany(self, sql, many_params): if isinstance(sql, unicode): string_type = unicode elif isinstance(sql, bytes): string_type = bytes sql = sql.decode(_BYTES_ENCODING) else: raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql)) if not isinstance(many_params, collections.Iterable) or isinstance(many_params, (unicode, bytes)): raise TypeError("many_params:{!r} is not iterable.".format(many_params)) names = self.match.findall(sql) name_set = set(names) many_ord_params = [] name_to_ords = {} name_to_len = {} repl_str = self.replace repl_tuple = (repl_str,) for i, params in enumerate(many_params): if self.named == : if isinstance(params, collections.Mapping): params = {string_type(idx): val for idx, val in iteritems(params)} elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)): params = {string_type(idx): val for idx, val in enumerate(params, 1)} if not isinstance(params, collections.Mapping): raise TypeError("many_params[{}]:{!r} is not a dict.".format(i, params)) if not i: for name in name_set: value = params[name] if isinstance(value, tuple): tuple_len = len(value) name_to_ords[name] = + .join(repl_tuple * tuple_len) + name_to_len[name] = tuple_len else: name_to_ords[name] = repl_str name_to_len[name] = None ord_params = [] for name in names: value = params[name] tuple_len = name_to_len[name] if tuple_len is not None: if not isinstance(value, tuple): raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, name, value)) elif len(value) != tuple_len: raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, name, value, tuple_len)) ord_params.extend(value) else: ord_params.append(value) many_ord_params.append(ord_params) sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql) if string_type is bytes: sql = sql.encode(_BYTES_ENCODING) return sql, many_ord_params
Formats the SQL query to use ordinal parameters instead of named parameters. *sql* (|string|) is the SQL query. *many_params* (|iterable|) contains each *params* to format. - *params* (|dict|) maps each named parameter (|string|) to value (|object|). If |self.named| is "numeric", then *params* can be simply a |sequence| of values mapped by index. Returns a 2-|tuple| containing: the formatted SQL query (|string|), and a |list| containing each ordinal parameters (|list|).
375,861
def register_hooks(self, field): for hook, subhooks in field.register_hooks(): self.hooks[hook].append(field) self.subhooks[hook] |= set(subhooks)
Register a field on its target hooks.
375,862
def pick_up_tip(self, location=None, presses=None, increment=None): if self.tip_attached: log.warning("There is already a tip attached to this pipette.") if not location: location = self.get_next_tip() self.current_tip(None) if location: placeable, _ = unpack_location(location) self.current_tip(placeable) presses = (self._pick_up_presses if not helpers.is_number(presses) else presses) increment = (self._pick_up_increment if not helpers.is_number(increment) else increment) def _pick_up_tip( self, location, presses, increment): self.instrument_actuator.set_active_current(self._plunger_current) self.robot.poses = self.instrument_actuator.move( self.robot.poses, x=self._get_plunger_position() ) self.current_volume = 0 self.move_to(self.current_tip().top(0)) for i in range(int(presses)): self.instrument_mover.push_speed() self.instrument_mover.push_active_current() self.instrument_mover.set_active_current(self._pick_up_current) self.instrument_mover.set_speed(self._pick_up_speed) dist = (-1 * self._pick_up_distance) + (-1 * increment * i) self.move_to( self.current_tip().top(dist), strategy=) self.instrument_mover.pop_active_current() self.instrument_mover.pop_speed() self.move_to( self.current_tip().top(0), strategy=) self._add_tip( length=self._tip_length ) if in self.quirks: self._shake_off_tips(location) self._shake_off_tips(location) self.previous_placeable = None self.robot.poses = self.instrument_mover.fast_home( self.robot.poses, self._pick_up_distance) return self do_publish(self.broker, commands.pick_up_tip, self.pick_up_tip, , None, None, self, location, presses, increment) _pick_up_tip( self, location=location, presses=presses, increment=increment) do_publish(self.broker, commands.pick_up_tip, self.pick_up_tip, , self, None, self, location, presses, increment) return self
Pick up a tip for the Pipette to run liquid-handling commands with Notes ----- A tip can be manually set by passing a `location`. If no location is passed, the Pipette will pick up the next available tip in it's `tip_racks` list (see :any:`Pipette`) Parameters ---------- location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`) The :any:`Placeable` (:any:`Well`) to perform the pick_up_tip. Can also be a tuple with first item :any:`Placeable`, second item relative :any:`Vector` presses : :any:int The number of times to lower and then raise the pipette when picking up a tip, to ensure a good seal (0 [zero] will result in the pipette hovering over the tip but not picking it up--generally not desireable, but could be used for dry-run). Default: 3 presses increment: :int The additional distance to travel on each successive press (e.g.: if presses=3 and increment=1, then the first press will travel down into the tip by 3.5mm, the second by 4.5mm, and the third by 5.5mm. Default: 1mm Returns ------- This instance of :class:`Pipette`. Examples -------- .. >>> from opentrons import instruments, labware, robot # doctest: +SKIP >>> robot.reset() # doctest: +SKIP >>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP >>> p300 = instruments.P300_Single(mount='left', ... tip_racks=[tiprack]) # doctest: +SKIP >>> p300.pick_up_tip(tiprack[0]) # doctest: +SKIP >>> p300.return_tip() # doctest: +SKIP # `pick_up_tip` will automatically go to tiprack[1] >>> p300.pick_up_tip() # doctest: +SKIP >>> p300.return_tip() # doctest: +SKIP
375,863
def bulk_csv_import_mongo(csvfile, database_name, collection_name, delete_collection_before_import=False): l = [] response_dict = {} try: mongodb_client_url = getattr(settings, , ) mc = MongoClient(mongodb_client_url,document_class=OrderedDict) db = mconnection[database_name] collection = db[collection_name] if delete_collection_before_import: myobjectid = collection.remove({}) csvhandle = csv.reader(open(csvfile._get_path(), ), delimiter=) rowindex = 0 errors = 0 error_list = [] success = 0 for row in csvhandle: if rowindex == 0: column_headers = row cleaned_headers = [] for c in column_headers: c = c.replace(".", "") c = c.replace("$", "-") c = c.replace(" ", "_") cleaned_headers.append(c) else: record = OrderedDict(zip(cleaned_headers, row)) kwargs = OrderedDict() for k, v in record.items(): if v: if v.isdigit(): kwargs[k] = int(v) else: kwargs[k] = v try: myobjectid = collection.insert(kwargs) success += 1 except: error_message = "Error on row " + \ rowindex + ". " + str(sys.exc_info()) error_list.append(str(sys.exc_info())) rowindex += 1 if error_list: response_dict = {} response_dict[] = rowindex response_dict[] = len(error_list) response_dict[] = error_list response_dict[] = 400 response_dict[] = "Completed with errors" else: response_dict = {} response_dict[] = success response_dict[] = 200 response_dict[] = "Completed." return response_dict except: response_dict[] = 0 response_dict[] = 400 response_dict[] = "Error" response_dict[] = [] response_dict[] = str(sys.exc_info()) return response_dict
return a response_dict with a list of search results
375,864
def resolve_variable(var_name, var_def, provided_variable, blueprint_name): try: var_type = var_def["type"] except KeyError: raise VariableTypeRequired(blueprint_name, var_name) if provided_variable: if not provided_variable.resolved: raise UnresolvedVariable(blueprint_name, provided_variable) value = provided_variable.value else: try: value = var_def["default"] except KeyError: raise MissingVariable(blueprint_name, var_name) validator = var_def.get("validator", lambda v: v) try: value = validator(value) except Exception as exc: raise ValidatorError(var_name, validator.__name__, value, exc) value = validate_variable_type(var_name, var_type, value) allowed_values = var_def.get("allowed_values") if not validate_allowed_values(allowed_values, value): message = ( "Invalid value passed to in blueprint: %s. Got: , " "expected one of %s" ) % (var_name, blueprint_name, value, allowed_values) raise ValueError(message) return value
Resolve a provided variable value against the variable definition. Args: var_name (str): The name of the defined variable on a blueprint. var_def (dict): A dictionary representing the defined variables attributes. provided_variable (:class:`stacker.variables.Variable`): The variable value provided to the blueprint. blueprint_name (str): The name of the blueprint that the variable is being applied to. Returns: object: The resolved variable value, could be any python object. Raises: MissingVariable: Raised when a variable with no default is not provided a value. UnresolvedVariable: Raised when the provided variable is not already resolved. ValueError: Raised when the value is not the right type and cannot be cast as the correct type. Raised by :func:`stacker.blueprints.base.validate_variable_type` ValidatorError: Raised when a validator raises an exception. Wraps the original exception.
375,865
def extract(self, feature, remove_subfeatures=False): extracted = self[feature.start:feature.stop] for gap in feature.gaps: for i in range(*gap): extracted[i] = self._any_char if remove_subfeatures: extracted.features = [feature] for feature in extracted.features: feature.move(-feature.start) return extracted
Extract a feature from the sequence. This operation is complementary to the .excise() method. :param feature: Feature object. :type feature: coral.sequence.Feature :param remove_subfeatures: Remove all features in the extracted sequence aside from the input feature. :type remove_subfeatures: bool :returns: A subsequence from start to stop of the feature.
375,866
def run(self, module, options): logger.debug("Running maintainability harvester") return dict(self.harvester.results)
Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict``
375,867
def _bin_update_items(self, items, replace_at_most_one, replacements, leftovers): for key, values in items: like_list_not_str = self._quacks_like_a_list_but_not_str(values) if not like_list_not_str or (like_list_not_str and not values): values = [values] for value in values: if value == []: replacements[key] = [] leftovers[:] = [l for l in leftovers if key != l[0]] continue if (key in self and (key not in replacements or (key in replacements and replacements[key] == []))): replacements[key] = [value] elif (key in self and not replace_at_most_one and len(replacements[key]) < len(self.values(key))): replacements[key].append(value) else: if replace_at_most_one: replacements[key] = [value] else: leftovers.append((key, value))
Subclassed from omdict._bin_update_items() to make update() and updateall() process lists of values as multiple values. <replacements and <leftovers> are modified directly, ala pass by reference.
375,868
def set_row(self, index, values): if self._sort: exists, i = sorted_exists(self._index, index) if not exists: self._insert_row(i, index) else: try: i = self._index.index(index) except ValueError: i = len(self._index) self._add_row(index) if isinstance(values, dict): if not (set(values.keys()).issubset(self._columns)): raise ValueError() for c, column in enumerate(self._columns): self._data[c][i] = values.get(column, self._data[c][i]) else: raise TypeError()
Sets the values of the columns in a single row. :param index: index value :param values: dict with the keys as the column names and the values what to set that column to :return: nothing
375,869
def _rename(self): newname = self.action[] try: newpath = self.fs.rename(self.fp,newname) except OSError: raise tornado.web.HTTPError(400) return newpath
Called during a PUT request where the action specifies a rename operation. Returns resource URI of the renamed file.
375,870
def fetch_attacks_data(self): if self.attacks_data_initialized: return self.submissions.init_from_datastore() self.dataset_batches.init_from_datastore() self.adv_batches.init_from_datastore() if not os.path.exists(LOCAL_DATASET_DIR): os.makedirs(LOCAL_DATASET_DIR) eval_lib.download_dataset(self.storage_client, self.dataset_batches, LOCAL_DATASET_DIR, os.path.join(LOCAL_DATASET_COPY, self.dataset_name, )) self.read_dataset_metadata() self.attacks_data_initialized = True
Initializes data necessary to execute attacks. This method could be called multiple times, only first call does initialization, subsequent calls are noop.
375,871
def sorted_for_ner(crf_classes): def key(cls): if len(cls) > 2 and cls[1] == : return cls.split(, 1)[1], cls return , cls return sorted(crf_classes, key=key)
Return labels sorted in a default order suitable for NER tasks: >>> sorted_for_ner(['B-ORG', 'B-PER', 'O', 'I-PER']) ['O', 'B-ORG', 'B-PER', 'I-PER']
375,872
def save(self): for form in self._forms: if isinstance(form, BaseForm): form.save(commit=False) self.instance.save() for form in self.forms: if isinstance(form, BaseForm): if hasattr(form, ): form.save_m2m() if hasattr(form, ): form.save_related() for form in self._forms: if isinstance(form, BaseFormSet): form.save(commit=True) return self.instance
Save the changes to the instance and any related objects.
375,873
def get_credentials(username=None, password=None, netrc=None, use_keyring=False): if netrc: path = None if netrc is True else netrc return authenticate_through_netrc(path) if not username: raise CredentialsError( ) if not password and use_keyring: password = keyring.get_password(KEYRING_SERVICE_NAME, username) if not password: password = getpass.getpass(.format(username)) if use_keyring: keyring.set_password(KEYRING_SERVICE_NAME, username, password) return username, password
Return valid username, password tuple. Raises CredentialsError if username or password is missing.
375,874
def dataframe(self): if self._away_points is None and self._home_points is None: return None fields_to_include = { : self.attendance, : self.away_first_downs, : self.away_fourth_down_attempts, : self.away_fourth_down_conversions, : self.away_fumbles, : self.away_fumbles_lost, : self.away_interceptions, : self.away_net_pass_yards, : self.away_pass_attempts, : self.away_pass_completions, : self.away_pass_touchdowns, : self.away_pass_yards, : self.away_penalties, : self.away_points, : self.away_rush_attempts, : self.away_rush_touchdowns, : self.away_rush_yards, : self.away_third_down_attempts, : self.away_third_down_conversions, : self.away_time_of_possession, : self.away_times_sacked, : self.away_total_yards, : self.away_turnovers, : self.away_yards_from_penalties, : self.away_yards_lost_from_sacks, : self.date, : self.duration, : self.home_first_downs, : self.home_fourth_down_attempts, : self.home_fourth_down_conversions, : self.home_fumbles, : self.home_fumbles_lost, : self.home_interceptions, : self.home_net_pass_yards, : self.home_pass_attempts, : self.home_pass_completions, : self.home_pass_touchdowns, : self.home_pass_yards, : self.home_penalties, : self.home_points, : self.home_rush_attempts, : self.home_rush_touchdowns, : self.home_rush_yards, : self.home_third_down_attempts, : self.home_third_down_conversions, : self.home_time_of_possession, : self.home_times_sacked, : self.home_total_yards, : self.home_turnovers, : self.home_yards_from_penalties, : self.home_yards_lost_from_sacks, : self.losing_abbr, : self.losing_name, : self.stadium, : self.time, : self.winner, : self.winning_abbr, : self.winning_name } return pd.DataFrame([fields_to_include], index=[self._uri])
Returns a pandas DataFrame containing all other class properties and values. The index for the DataFrame is the string URI that is used to instantiate the class, such as '201802040nwe'.
375,875
def get_layout(self, page): if type(page) == int: page = self.get_page(page) self.interpreter.process_page(page) layout = self.device.get_result() layout = self._add_annots(layout, page.annots) return layout
Get PDFMiner Layout object for given page object or page number.
375,876
def set(self, key, value, expire=0, noreply=None): if noreply is None: noreply = self.default_noreply return self._store_cmd(b, {key: value}, expire, noreply)[key]
The memcached "set" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: If no exception is raised, always returns True. If an exception is raised, the set may or may not have occurred. If noreply is True, then a successful return does not guarantee a successful set.
375,877
def get(cls, uni_char): uni_char = unicod(uni_char) return unicod(unicodedata.category(uni_char))
Return the general category code (as Unicode string) for the given Unicode character
375,878
def request_get_variable_json(self, py_db, request, thread_id): py_db.post_method_as_internal_command( thread_id, internal_get_variable_json, request)
:param VariablesRequest request:
375,879
def list_functions(*args, **kwargs): ****sys.list_**module.specific_function if not args: for func in __salt__: if func.startswith(moduledot): names.add(func) return sorted(names)
List the functions for all modules. Optionally, specify a module or modules from which to list. CLI Example: .. code-block:: bash salt '*' sys.list_functions salt '*' sys.list_functions sys salt '*' sys.list_functions sys user Function names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_functions 'sys.list_*' .. versionadded:: ? .. code-block:: bash salt '*' sys.list_functions 'module.specific_function'
375,880
def get_access_token(self, code=None, **params): if self._access_token is None: if code is None: raise ValueError(_()) self.access_token_dict = self._get_access_token(code, **params) try: self._access_token = self.access_token_dict[] except KeyError, e: raise OAuthError("Credentials could not be validated, the provider returned no access token.") return self._access_token
Return the memoized access token or go out and fetch one.
375,881
def get_recurrence(self, config): model = MFD_MAP[config[]]() model.setUp(config) model.get_mmax(config, self.msr, self.rake, self.area) model.mmax = model.mmax + (self.msr_sigma * model.mmax_sigma) if in config[]: if not self.disp_length_ratio: self.disp_length_ratio = 1.25E-5 min_mag, bin_width, occur_rates = model.get_mfd( self.slip, self.area, self.shear_modulus, self.disp_length_ratio) else: min_mag, bin_width, occur_rates = model.get_mfd(self.slip, self.area, self.shear_modulus) self.recurrence = IncrementalMFD(min_mag, bin_width, occur_rates) self.magnitudes = min_mag + np.cumsum( bin_width * np.ones(len(occur_rates), dtype=float)) - bin_width self.max_mag = np.max(self.magnitudes)
Calculates the recurrence model for the given settings as an instance of the openquake.hmtk.models.IncrementalMFD :param dict config: Configuration settings of the magnitude frequency distribution.
375,882
def _GetContainingRange(self, partition_key): for keyrange in self.partition_map.keys(): if keyrange.Contains(partition_key): return keyrange return None
Gets the containing range based on the partition key.
375,883
def to_latlon(easting, northing, zone_number, zone_letter=None, northern=None, strict=True): if not zone_letter and northern is None: raise ValueError() elif zone_letter and northern is not None: raise ValueError() if strict: if not in_bounds(easting, 100000, 1000000, upper_strict=True): raise OutOfRangeError() if not in_bounds(northing, 0, 10000000): raise OutOfRangeError() check_valid_zone(zone_number, zone_letter) if zone_letter: zone_letter = zone_letter.upper() northern = (zone_letter >= ) x = easting - 500000 y = northing if not northern: y -= 10000000 m = y / K0 mu = m / (R * M1) p_rad = (mu + P2 * mathlib.sin(2 * mu) + P3 * mathlib.sin(4 * mu) + P4 * mathlib.sin(6 * mu) + P5 * mathlib.sin(8 * mu)) p_sin = mathlib.sin(p_rad) p_sin2 = p_sin * p_sin p_cos = mathlib.cos(p_rad) p_tan = p_sin / p_cos p_tan2 = p_tan * p_tan p_tan4 = p_tan2 * p_tan2 ep_sin = 1 - E * p_sin2 ep_sin_sqrt = mathlib.sqrt(1 - E * p_sin2) n = R / ep_sin_sqrt r = (1 - E) / ep_sin c = _E * p_cos**2 c2 = c * c d = x / (n * K0) d2 = d * d d3 = d2 * d d4 = d3 * d d5 = d4 * d d6 = d5 * d latitude = (p_rad - (p_tan / r) * (d2 / 2 - d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) + d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2)) longitude = (d - d3 / 6 * (1 + 2 * p_tan2 + c) + d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4)) / p_cos return (mathlib.degrees(latitude), mathlib.degrees(longitude) + zone_number_to_central_longitude(zone_number))
This function convert an UTM coordinate into Latitude and Longitude Parameters ---------- easting: int Easting value of UTM coordinate northing: int Northing value of UTM coordinate zone number: int Zone Number is represented with global map numbers of an UTM Zone Numbers Map. More information see utmzones [1]_ zone_letter: str Zone Letter can be represented as string values. Where UTM Zone Designators can be accessed in [1]_ northern: bool You can set True or False to set this parameter. Default is None .. _[1]: http://www.jaworski.ca/utmzones.htm
375,884
def normalize_pdf(mu, pofmu): if min(pofmu) < 0: raise ValueError("Probabilities cannot be negative, dont ask me to " "normalize a function over a negative domain!") dp = integral_element(mu, pofmu) return mu, pofmu/sum(dp)
Takes a function pofmu defined at rate sample values mu and normalizes it to be a suitable pdf. Both mu and pofmu must be arrays or lists of the same length.
375,885
def create_ui(self): super(GtkShapesCanvasView, self).create_ui() self.widget.set_events(gtk.gdk.BUTTON_PRESS | gtk.gdk.BUTTON_RELEASE | gtk.gdk.BUTTON_MOTION_MASK | gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK) self._dirty_check_timeout_id = gtk.timeout_add(30, self.check_dirty) self.resize = Debounce(self._resize, wait=250) debounced_on_expose_event = Debounce(self._on_expose_event, wait=250, leading=True, trailing=True) self.widget.connect(, debounced_on_expose_event)
.. versionchanged:: 0.20 Debounce window expose and resize handlers to improve responsiveness. .. versionchanged:: X.X.X Call debounced `_on_expose_event` handler on _leading_ edge to make UI update more responsive when, e.g., changing window focus. Decrease debounce time to 250 ms.
375,886
def split_namespace(clarkName): if clarkName.startswith("{") and "}" in clarkName: ns, localname = clarkName.split("}", 1) return (ns[1:], localname) return ("", clarkName)
Return (namespace, localname) tuple for a property name in Clark Notation. Namespace defaults to ''. Example: '{DAV:}foo' -> ('DAV:', 'foo') 'bar' -> ('', 'bar')
375,887
def merge(self, other): other = self.coerce(other) if list_diff(self.domain, other.domain) != []: raise Exception("Incomparable orderings. Different domains") if self.is_equal(other): return self elif other.is_entailed_by(self): return self elif self.is_entailed_by(other): self.low, self.high = other.low, other.high elif self.is_contradictory(other): raise Contradiction("Cannot merge %s and %s" % (self, other)) else: to_i = self.to_i self.low = self.domain[max(map(to_i, [self.low, other.low]))] self.high =self.domain[min(map(to_i, [self.high, other.high]))] return self
Merges the two values
375,888
def _set_key(self): if self.roll: self.date = time.strftime(self.date_format, time.gmtime(self.start_time)) self.final_key = .format(self.key, self.date) else: self.final_key = self.key
sets the final key to be used currently
375,889
def unpin_chat_message(self, *args, **kwargs): return unpin_chat_message(*args, **self._merge_overrides(**kwargs)).run()
See :func:`unpin_chat_message`
375,890
def parse_bitcode(bitcode, context=None): if context is None: context = get_global_context() buf = c_char_p(bitcode) bufsize = len(bitcode) with ffi.OutputString() as errmsg: mod = ModuleRef(ffi.lib.LLVMPY_ParseBitcode( context, buf, bufsize, errmsg), context) if errmsg: mod.close() raise RuntimeError( "LLVM bitcode parsing error\n{0}".format(errmsg)) return mod
Create Module from a LLVM *bitcode* (a bytes object).
375,891
def unsubscribe(self): body = { "EmailAddress": self.email_address} response = self._post("/subscribers/%s/unsubscribe.json" % self.list_id, json.dumps(body))
Unsubscribes this subscriber from the associated list.
375,892
def add(self, tipo_opcao, nome_opcao): url= return self.post(url, {: tipo_opcao, "name":nome_opcao })
Inserts a new Option Pool and returns its identifier. :param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-] :param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-] :return: Following dictionary: :: {'id': < id > , 'type':<type>, 'name':<name>} :raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
375,893
def write_short(self, number): buf = pack(self.byte_order + "h", number) self.write(buf)
Writes a short integer to the underlying output file as a 2-byte value.
375,894
def find_ctrlpts_surface(t_u, t_v, surf, **kwargs): span_func = kwargs.get(, helpers.find_span_linear) span_u = span_func(surf.degree_u, surf.knotvector_u, surf.ctrlpts_size_u, t_u) span_v = span_func(surf.degree_v, surf.knotvector_v, surf.ctrlpts_size_v, t_v) idx_u = span_u - surf.degree_u idx_v = span_v - surf.degree_v surf_ctrlpts = [[] for _ in range(surf.degree_u + 1)] for k in range(surf.degree_u + 1): temp = [() for _ in range(surf.degree_v + 1)] for l in range(surf.degree_v + 1): temp[l] = surf.ctrlpts2d[idx_u + k][idx_v + l] surf_ctrlpts[k] = temp return surf_ctrlpts
Finds the control points involved in the evaluation of the surface point defined by the input parameter pair. This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller. :param t_u: parameter on the u-direction :type t_u: float :param t_v: parameter on the v-direction :type t_v: float :param surf: input surface :type surf: abstract.Surface :return: 2-dimensional control points array :rtype: list
375,895
def format_npm_command_for_logging(command): if platform.system().lower() == : if command[0] == and command[1] == : return "npx.cmd -c \"%s\"" % " ".join(command[2:]) return " ".join(command) return " ".join(command).replace(\, ')
Convert npm command list to string for display to user.
375,896
def update_unit(self, unit_id, unit_dict): return self._create_put_request(resource=UNITS, billomat_id=unit_id, send_data=unit_dict)
Updates an unit :param unit_id: the unit id :param unit_dict: dict :return: dict
375,897
def scopusRecordParser(record, header = None): if header is None: header = scopusHeader splitRecord = record[:-1].split() tagDict = {} quoted = False for key in reversed(header): currentVal = splitRecord.pop() if currentVal == : pass elif currentVal[-1] == : if re.match(firstQuotingRegex, currentVal) is None: valString = + currentVal[:-1] currentVal = splitRecord.pop() while re.match(innerQuotingRegex, currentVal) is None: valString = + currentVal + valString currentVal = splitRecord.pop() valString = currentVal[1:] + valString else: try: valString = currentVal[1:-1] except ValueError: valString = currentVal[1:-1] tagDict[key] = valString else: tagDict[key] = currentVal return tagDict
The parser [ScopusRecords](../classes/ScopusRecord.html#metaknowledge.scopus.ScopusRecord) use. This takes a line from [scopusParser()](#metaknowledge.scopus.scopusHandlers.scopusParser) and parses it as a part of the creation of a `ScopusRecord`. **Note** this is for csv files downloaded from scopus _not_ the text records as those are less complete. Also, Scopus uses double quotes (`"`) to quote strings, such as abstracts, in the csv so double quotes in the string must be escaped. For reasons not fully understandable by mortals they choose to use two double quotes in a row (`""`) to represent an escaped double quote. This parser does not unescape these quotes, but it does correctly handle their interacts with the outer double quotes. # Parameters _record_ : `str` > string ending with a newline containing the record's entry # Returns `dict` > A dictionary of the key-vaue pairs in the entry
375,898
def run(self, host=, port=1234): loop = asyncio.get_event_loop() coro = asyncio.start_server(self._handle, host, port, loop=loop) try: server = loop.run_until_complete(coro) except Exception as e: self._logger.error(.format(e)) return self._logger.info(.format(server.sockets[0].getsockname())) try: loop.run_forever() except KeyboardInterrupt: pass server.close() loop.run_until_complete(server.wait_closed()) loop.close()
Launch the server. Will run forever accepting connections until interrupted. Parameters: * host: The host to listen on * port: The port to listen on
375,899
def has_delete_permission(self, request, obj=None): opts = self.opts return request.user.has_perm(opts.app_label + + opts.get_delete_permission(), obj)
Returns True if the given request has permission to change the given Django model instance, the default implementation doesn't examine the `obj` parameter. Can be overriden by the user in subclasses. In such case it should return True if the given request has permission to delete the `obj` model instance. If `obj` is None, this should return True if the given request has permission to delete *any* object of the given type.