code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _on_process_error(self, error): if self is None: return if error not in PROCESS_ERROR_STRING: error = -1 if not self._prevent_logs: _logger().warning(PROCESS_ERROR_STRING[error])
Logs process error
def _is_ipv4_like(s): parts = s.split('.') if len(parts) != 4: return False for part in parts: try: int(part) except ValueError: return False return True
Find if a string superficially looks like an IPv4 address. AWS documentation plays it fast and loose with this; in other regions, it seems like even non-valid IPv4 addresses (in particular, ones that possess decimal numbers out of range for IPv4) are rejected.
def has_object_permission(self, request, view, obj): user = request.user if not user.is_superuser and not user.is_anonymous(): valid = False try: ct = ContentType.objects.get_for_model(obj) fpm = FilterPermissionModel.objects.get(user=user, content_type=ct) myq = QSerializer(base64=True).loads(fpm.filter) try: myobj = obj.__class__.objects.filter(myq).distinct().get(pk=obj.pk) if myobj: valid = True except ObjectDoesNotExist: valid = False except ObjectDoesNotExist: valid = True finally: return valid else: return True
check filter permissions
def sort_untl(self, sort_structure): self.children.sort(key=lambda obj: sort_structure.index(obj.tag))
Sort the UNTL Python object by the index of a sort structure pre-ordered list.
def normalise_rows(matrix): lengths = np.apply_along_axis(np.linalg.norm, 1, matrix) if not (lengths > 0).all(): lengths[lengths == 0] = 1 return matrix / lengths[:, np.newaxis]
Scales all rows to length 1. Fails when row is 0-length, so it leaves these unchanged
def upload(self, src_file_path, dst_file_name=None): self._check_session() status, data = self._rest.upload_file( 'files', src_file_path, dst_file_name) return data
Upload the specified file to the server.
def get_dep(self, name: str) -> str: deps = self.meta["dependencies"] for d in deps: if d["model"] == name: return d raise KeyError("%s not found in %s." % (name, deps))
Return the uuid of the dependency identified with "name". :param name: :return: UUID
def returnIndexList(self, limit=False): if limit==False: return self.index_track result = [] for i in range(limit): if len(self.table)>i: result.append(self.index_track[i]) return result
Return a list of integers that are list-index references to the original list of dictionaries." Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "order": 2}, ... {"name": "Larry", "age": 18, "order": 3}, ... {"name": "Joe", "age": 20, "income": 15000, "order": 1}, ... {"name": "Bill", "age": 19, "income": 29000, "order": 4}, ... ] >>> print PLOD(test).returnIndexList() [0, 1, 2, 3] >>> print PLOD(test).sort("name").returnIndexList() [3, 0, 2, 1] :param limit: A number limiting the quantity of entries to return. Defaults to False, which means that the full list is returned. :return: A list of integers representing the original indices.
def fetch_import_ref_restriction(self,): inter = self.get_refobjinter() restricted = self.status() not in (self.LOADED, self.UNLOADED) return restricted or inter.fetch_action_restriction(self, 'import_reference')
Fetch whether importing the reference is restricted :returns: True, if importing the reference is restricted :rtype: :class:`bool` :raises: None
def reverse_timezone(self, query, timeout=DEFAULT_SENTINEL): ensure_pytz_is_installed() try: lat, lng = self._coerce_point_to_string(query).split(',') except ValueError: raise ValueError("Must be a coordinate pair or Point") params = { "lat": lat, "lng": lng, "username": self.username, } url = "?".join((self.api_timezone, urlencode(params))) logger.debug("%s.reverse_timezone: %s", self.__class__.__name__, url) return self._parse_json_timezone( self._call_geocoder(url, timeout=timeout) )
Find the timezone for a point in `query`. GeoNames always returns a timezone: if the point being queried doesn't have an assigned Olson timezone id, a ``pytz.FixedOffset`` timezone is used to produce the :class:`geopy.timezone.Timezone`. .. versionadded:: 1.18.0 :param query: The coordinates for which you want a timezone. :type query: :class:`geopy.point.Point`, list or tuple of (latitude, longitude), or string as "%(latitude)s, %(longitude)s" :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: :class:`geopy.timezone.Timezone`
def _compute(self): newstate = self._implicit_solver() adjustment = {} tendencies = {} for name, var in self.state.items(): adjustment[name] = newstate[name] - var tendencies[name] = adjustment[name] / self.timestep self.adjustment = adjustment self._update_diagnostics(newstate) return tendencies
Computes the state variable tendencies in time for implicit processes. To calculate the new state the :func:`_implicit_solver()` method is called for daughter classes. This however returns the new state of the variables, not just the tendencies. Therefore, the adjustment is calculated which is the difference between the new and the old state and stored in the object's attribute adjustment. Calculating the new model states through solving the matrix problem already includes the multiplication with the timestep. The derived adjustment is divided by the timestep to calculate the implicit subprocess tendencies, which can be handeled by the :func:`~climlab.process.time_dependent_process.TimeDependentProcess.compute` method of the parent :class:`~climlab.process.time_dependent_process.TimeDependentProcess` class. :ivar dict adjustment: holding all state variables' adjustments of the implicit process which are the differences between the new states (which have been solved through matrix inversion) and the old states.
def decompose_nfkd(text): if text is None: return None if not hasattr(decompose_nfkd, '_tr'): decompose_nfkd._tr = Transliterator.createInstance('Any-NFKD') return decompose_nfkd._tr.transliterate(text)
Perform unicode compatibility decomposition. This will replace some non-standard value representations in unicode and normalise them, while also separating characters and their diacritics into two separate codepoints.
def scheduled(wait=False): manager.run_scheduled() while wait: manager.run_scheduled() time.sleep(settings.SCHEDULER_INTERVAL)
Run crawlers that are due.
def logp_partial_gradient(self, variable, calculation_set=None): if (calculation_set is None) or (self in calculation_set): if not datatypes.is_continuous(variable): return zeros(shape(variable.value)) if variable is self: try: gradient_func = self._logp_partial_gradients['value'] except KeyError: raise NotImplementedError( repr( self) + " has no gradient function for 'value'") gradient = np.reshape( gradient_func.get( ), np.shape( variable.value)) else: gradient = builtins.sum( [self._pgradient(variable, parameter, value) for parameter, value in six.iteritems(self.parents)]) return gradient else: return 0
Calculates the partial gradient of the posterior of self with respect to variable. Returns zero if self is not in calculation_set.
def pay(self, predecessor): assert predecessor is None or isinstance(predecessor, MatchSet) if predecessor is not None: expectation = self._algorithm.get_future_expectation(self) predecessor.payoff += expectation
If the predecessor is not None, gives the appropriate amount of payoff to the predecessor in payment for its contribution to this match set's expected future payoff. The predecessor argument should be either None or a MatchSet instance whose selected action led directly to this match set's situation. Usage: match_set = model.match(situation) match_set.pay(previous_match_set) Arguments: predecessor: The MatchSet instance which was produced by the same classifier set in response to the immediately preceding situation, or None if this is the first situation in the scenario. Return: None
def register_factory(self, key, factory=_sentinel, scope=NoneScope, allow_overwrite=False): if factory is _sentinel: return functools.partial(self.register_factory, key, scope=scope, allow_overwrite=allow_overwrite) if not allow_overwrite and key in self._providers: raise KeyError("Key %s already exists" % key) provider = self.provider(factory, scope) self._providers[key] = provider return factory
Creates and registers a provider using the given key, factory, and scope. Can also be used as a decorator. :param key: Provider key :type key: object :param factory: Factory callable :type factory: callable :param scope: Scope key, factory, or instance :type scope: object or callable :return: Factory (or None if we're creating a provider without a factory) :rtype: callable or None
def source(self, fields=None, **kwargs): s = self._clone() if fields and kwargs: raise ValueError("You cannot specify fields and kwargs at the same time.") if fields is not None: s._source = fields return s if kwargs and not isinstance(s._source, dict): s._source = {} for key, value in kwargs.items(): if value is None: try: del s._source[key] except KeyError: pass else: s._source[key] = value return s
Selectively control how the _source field is returned. :arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes If ``fields`` is None, the entire document will be returned for each hit. If fields is a dictionary with keys of 'include' and/or 'exclude' the fields will be either included or excluded appropriately. Calling this multiple times with the same named parameter will override the previous values with the new ones. Example:: s = Search() s = s.source(include=['obj1.*'], exclude=["*.description"]) s = Search() s = s.source(include=['obj1.*']).source(exclude=["*.description"])
def getMemoryStats(self): if self._statusxml is None: self.initStats() node = self._statusxml.find('jvm/memory') memstats = {} if node is not None: for (key,val) in node.items(): memstats[key] = util.parse_value(val) return memstats
Return JVM Memory Stats for Apache Tomcat Server. @return: Dictionary of memory utilization stats.
def generate_csv(src, out): writer = UnicodeWriter(open(out, 'wb'), delimiter=';') writer.writerow(('Reference ID', 'Created', 'Origin', 'Subject')) for cable in cables_from_source(src, predicate=pred.origin_filter(pred.origin_germany)): writer.writerow((cable.reference_id, cable.created, cable.origin, titlefy(cable.subject)))
\ Walks through `src` and generates the CSV file `out`
def before_request(request, tracer=None): if tracer is None: tracer = opentracing.tracer tags_dict = { tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_URL: request.full_url, } remote_ip = request.remote_ip if remote_ip: tags_dict[tags.PEER_HOST_IPV4] = remote_ip caller_name = request.caller_name if caller_name: tags_dict[tags.PEER_SERVICE] = caller_name remote_port = request.remote_port if remote_port: tags_dict[tags.PEER_PORT] = remote_port operation = request.operation try: carrier = {} for key, value in six.iteritems(request.headers): carrier[key] = value parent_ctx = tracer.extract( format=Format.HTTP_HEADERS, carrier=carrier ) except Exception as e: logging.exception('trace extract failed: %s' % e) parent_ctx = None span = tracer.start_span( operation_name=operation, child_of=parent_ctx, tags=tags_dict) return span
Attempts to extract a tracing span from incoming request. If no tracing context is passed in the headers, or the data cannot be parsed, a new root span is started. :param request: HTTP request with `.headers` property exposed that satisfies a regular dictionary interface :param tracer: optional tracer instance to use. If not specified the global opentracing.tracer will be used. :return: returns a new, already started span.
def _sections_to_variance_sections(self, sections_over_time): variance_sections = [] for i in range(len(sections_over_time[0])): time_sections = [sections[i] for sections in sections_over_time] variance = np.var(time_sections, axis=0) variance_sections.append(variance) return variance_sections
Computes the variance of corresponding sections over time. Returns: a list of np arrays.
def add_locations(self, locations): if isinstance(locations, (str, ustr)): self._add_from_str(locations) elif isinstance(locations, (list, tuple)): self._add_from_list(locations)
Add extra locations to AstralGeocoder. Extra locations can be * A single string containing one or more locations separated by a newline. * A list of strings * A list of lists/tuples that are passed to a :class:`Location` constructor
def copy_node(node): if not isinstance(node, gast.AST): return [copy_node(n) for n in node] new_node = copy.deepcopy(node) setattr(new_node, anno.ANNOTATION_FIELD, getattr(node, anno.ANNOTATION_FIELD, {}).copy()) return new_node
Copy a node but keep its annotations intact.
def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): return MutationsBatcher(self, flush_count, max_row_bytes)
Factory to create a mutation batcher associated with this instance. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_mutations_batcher] :end-before: [END bigtable_mutations_batcher] :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Maximum number of rows per batch. If it reaches the max number of rows it calls finish_batch() to mutate the current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to flush. If it reaches the max number of row mutations size it calls finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES (5 MB).
def create_node_rating_counts_settings(sender, **kwargs): created = kwargs['created'] node = kwargs['instance'] if created: create_related_object.delay(NodeRatingCount, {'node': node}) create_related_object.delay(NodeParticipationSettings, {'node': node})
create node rating count and settings
def G(self, ID, lat, lon): eqM = utils.eqCoords(lon, lat) eqZ = eqM if lat != 0: eqZ = utils.eqCoords(lon, 0) return { 'id': ID, 'lat': lat, 'lon': lon, 'ra': eqM[0], 'decl': eqM[1], 'raZ': eqZ[0], 'declZ': eqZ[1], }
Creates a generic entry for an object.
def _cb_inform_sensor_status(self, msg): timestamp = msg.arguments[0] num_sensors = int(msg.arguments[1]) assert len(msg.arguments) == 2 + num_sensors * 3 for n in xrange(num_sensors): name = msg.arguments[2 + n * 3] status = msg.arguments[3 + n * 3] value = msg.arguments[4 + n * 3] self.update_sensor(name, timestamp, status, value)
Update received for an sensor.
def get_fd(file_or_fd, default=None): fd = file_or_fd if fd is None: fd = default if hasattr(fd, "fileno"): fd = fd.fileno() return fd
Helper function for getting a file descriptor.
def add_widget(self, widget, column=0): if self._frame is None: raise RuntimeError("You must add the Layout to the Frame before you can add a Widget.") self._columns[column].append(widget) widget.register_frame(self._frame) if widget.name in self._frame.data: widget.value = self._frame.data[widget.name]
Add a widget to this Layout. If you are adding this Widget to the Layout dynamically after starting to play the Scene, don't forget to ensure that the value is explicitly set before the next update. :param widget: The widget to be added. :param column: The column within the widget for this widget. Defaults to zero.
def generate_source_image(source_file, processor_options, generators=None, fail_silently=True): processor_options = ThumbnailOptions(processor_options) was_closed = getattr(source_file, 'closed', False) if generators is None: generators = [ utils.dynamic_import(name) for name in settings.THUMBNAIL_SOURCE_GENERATORS] exceptions = [] try: for generator in generators: source = source_file try: source.open() except Exception: try: source.seek(0) except Exception: source = None try: image = generator(source, **processor_options) except Exception as e: if not fail_silently: if len(generators) == 1: raise exceptions.append(e) image = None if image: return image finally: if was_closed: try: source_file.close() except Exception: pass if exceptions and not fail_silently: raise NoSourceGenerator(*exceptions)
Processes a source ``File`` through a series of source generators, stopping once a generator returns an image. The return value is this image instance or ``None`` if no generators return an image. If the source file cannot be opened, it will be set to ``None`` and still passed to the generators.
def set_element(self, row, col, value): javabridge.call( self.jobject, "setElement", "(IID)V", row, col, value)
Sets the float value at the specified location. :param row: the 0-based index of the row :type row: int :param col: the 0-based index of the column :type col: int :param value: the float value for that cell :type value: float
def update(self, **kwargs): data = kwargs.get('data') if data is not None: if (util.pd and isinstance(data, util.pd.DataFrame) and list(data.columns) != list(self.data.columns) and self._index): data = data.reset_index() self.verify(data) kwargs['data'] = self._concat(data) self._count += 1 super(Buffer, self).update(**kwargs)
Overrides update to concatenate streamed data up to defined length.
def _set_autocommit(connection): if hasattr(connection.connection, "autocommit"): if callable(connection.connection.autocommit): connection.connection.autocommit(True) else: connection.connection.autocommit = True elif hasattr(connection.connection, "set_isolation_level"): connection.connection.set_isolation_level(0)
Make sure a connection is in autocommit mode.
def string_to_datetime(self, obj): if isinstance(obj, six.string_types) and len(obj) == 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) > 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) == 10: try: return datetime.strptime(obj, "%Y-%m-%d") except ValueError: pass return obj
Decode a datetime string to a datetime object
async def get_wallet_record(wallet_handle: int, type_: str, id: str, options_json: str) -> str: logger = logging.getLogger(__name__) logger.debug("get_wallet_record: >>> wallet_handle: %r, type_: %r, id: %r, options_json: %r", wallet_handle, type_, id, options_json) if not hasattr(get_wallet_record, "cb"): logger.debug("get_wallet_record: Creating callback") get_wallet_record.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_type = c_char_p(type_.encode('utf-8')) c_id = c_char_p(id.encode('utf-8')) c_options_json = c_char_p(options_json.encode('utf-8')) wallet_record = await do_call('indy_get_wallet_record', c_wallet_handle, c_type, c_id, c_options_json, get_wallet_record.cb) res = wallet_record.decode() logger.debug("get_wallet_record: <<< res: %r", res) return res
Get an wallet record by id :param wallet_handle: wallet handler (created by open_wallet). :param type_: allows to separate different record types collections :param id: the id of record :param options_json: //TODO: FIXME: Think about replacing by bitmask { retrieveType: (optional, false by default) Retrieve record type, retrieveValue: (optional, true by default) Retrieve record value, retrieveTags: (optional, true by default) Retrieve record tags } :return: wallet record json: { id: "Some id", type: "Some type", // present only if retrieveType set to true value: "Some value", // present only if retrieveValue set to true tags: <tags json>, // present only if retrieveTags set to true }
def _handle_exists(self, node, scope, ctxt, stream): res = fields.Int() try: self._handle_node(node.expr, scope, ctxt, stream) res._pfp__set_value(1) except AttributeError: res._pfp__set_value(0) return res
Handle the exists unary operator :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
def query_echo(cls, request, foo: (Ptypes.query, String('A query parameter'))) -> [ (200, 'Ok', String)]: log.info('Echoing query param, value is: {}'.format(foo)) for i in range(randint(0, MAX_LOOP_DURATION)): yield msg = 'The value sent was: {}'.format(foo) Respond(200, msg)
Echo the query parameter.
def validate_args(args): if not args.minutes and not args.start_time: print("Error: missing --minutes or --start-time") return False if args.minutes and args.start_time: print("Error: --minutes shouldn't be specified if --start-time is used") return False if args.end_time and not args.start_time: print("Error: --end-time can't be used without --start-time") return False if args.minutes and args.minutes <= 0: print("Error: --minutes must be > 0") return False if args.start_time and not TIME_FORMAT_REGEX.match(args.start_time): print("Error: --start-time format is not valid") print("Example format: '2015-11-26 11:00:00'") return False if args.end_time and not TIME_FORMAT_REGEX.match(args.end_time): print("Error: --end-time format is not valid") print("Example format: '2015-11-26 11:00:00'") return False if args.batch_size <= 0: print("Error: --batch-size must be > 0") return False return True
Basic option validation. Returns False if the options are not valid, True otherwise. :param args: the command line options :type args: map :param brokers_num: the number of brokers
def use_comparative_composition_view(self): self._object_views['composition'] = COMPARATIVE for session in self._get_provider_sessions(): try: session.use_comparative_composition_view() except AttributeError: pass
Pass through to provider CompositionLookupSession.use_comparative_composition_view
def detect_terminal(_environ=os.environ): if _environ.get('TMUX'): return 'tmux' elif subdict_by_key_prefix(_environ, 'BYOBU'): return 'byobu' elif _environ.get('TERM').startswith('screen'): return _environ['TERM'] elif _environ.get('COLORTERM'): return _environ['COLORTERM'] else: return _environ.get('TERM')
Detect "terminal" you are using. First, this function checks if you are in tmux, byobu, or screen. If not it uses $COLORTERM [#]_ if defined and fallbacks to $TERM. .. [#] So, if you are in Gnome Terminal you have "gnome-terminal" instead of "xterm-color"".
def get_agent(msg): agent = msg['msg']['agent'] if isinstance(agent, list): agent = agent[0] return agent
Handy hack to handle legacy messages where 'agent' was a list.
def getManagedObjects(self, objectPath): d = {} for p in sorted(self.exports.keys()): if not p.startswith(objectPath) or p == objectPath: continue o = self.exports[p] i = {} d[p] = i for iface in o.getInterfaces(): i[iface.name] = o.getAllProperties(iface.name) return d
Returns a Python dictionary containing the reply content for org.freedesktop.DBus.ObjectManager.GetManagedObjects
def get_docstring(obj): docstring = getdoc(obj, allow_inherited=True) if docstring is None: logger = getLogger(__name__) logger.warning("Object %s doesn't have a docstring.", obj) docstring = 'Undocumented' return prepare_docstring(docstring, ignore=1)
Extract the docstring from an object as individual lines. Parameters ---------- obj : object The Python object (class, function or method) to extract docstrings from. Returns ------- lines : `list` of `str` Individual docstring lines with common indentation removed, and newline characters stripped. Notes ----- If the object does not have a docstring, a docstring with the content ``"Undocumented."`` is created.
def _normalize_overlap(overlap, window, nfft, samp, method='welch'): if method == 'bartlett': return 0 if overlap is None and isinstance(window, string_types): return recommended_overlap(window, nfft) if overlap is None: return 0 return seconds_to_samples(overlap, samp)
Normalise an overlap in physical units to a number of samples Parameters ---------- overlap : `float`, `Quantity`, `None` the overlap in some physical unit (seconds) window : `str` the name of the window function that will be used, only used if `overlap=None` is given nfft : `int` the number of samples that will be used in the fast Fourier transform samp : `Quantity` the sampling rate (Hz) of the data that will be transformed method : `str` the name of the averaging method, default: `'welch'`, only used to return `0` for `'bartlett'` averaging Returns ------- noverlap : `int` the number of samples to be be used for the overlap
def make_label(self, path): from datetime import datetime from StringIO import StringIO path = path.lstrip("/") bucket, label = path.split("/", 1) bucket = self.ofs._require_bucket(bucket) key = self.ofs._get_key(bucket, label) if key is None: key = bucket.new_key(label) self.ofs._update_key_metadata(key, { '_creation_time': str(datetime.utcnow()) }) key.set_contents_from_file(StringIO('')) key.close()
this borrows too much from the internals of ofs maybe expose different parts of the api?
def setup_arrow_buttons(self): vsb = self.scrollarea.verticalScrollBar() style = vsb.style() opt = QStyleOptionSlider() vsb.initStyleOption(opt) vsb_up_arrow = style.subControlRect( QStyle.CC_ScrollBar, opt, QStyle.SC_ScrollBarAddLine, self) up_btn = up_btn = QPushButton(icon=ima.icon('last_edit_location')) up_btn.setFlat(True) up_btn.setFixedHeight(vsb_up_arrow.size().height()) up_btn.clicked.connect(self.go_up) down_btn = QPushButton(icon=ima.icon('folding.arrow_down_on')) down_btn.setFlat(True) down_btn.setFixedHeight(vsb_up_arrow.size().height()) down_btn.clicked.connect(self.go_down) return up_btn, down_btn
Setup the up and down arrow buttons that are placed at the top and bottom of the scrollarea.
def last_week_of_year(cls, year): if year == cls.max.year: return cls.max return cls(year+1, 0)
Return the last week of the given year. This week with either have week-number 52 or 53. This will be the same as Week(year+1, 0), but will even work for year 9999 where this expression would overflow. The first week of a given year is simply Week(year, 1), so there is no dedicated classmethod for that.
def relative_humidity_from_dewpoint(temperature, dewpt): r e = saturation_vapor_pressure(dewpt) e_s = saturation_vapor_pressure(temperature) return (e / e_s)
r"""Calculate the relative humidity. Uses temperature and dewpoint in celsius to calculate relative humidity using the ratio of vapor pressure to saturation vapor pressures. Parameters ---------- temperature : `pint.Quantity` The temperature dew point : `pint.Quantity` The dew point temperature Returns ------- `pint.Quantity` The relative humidity See Also -------- saturation_vapor_pressure
def cleanup(self): if self.process is None: return if self.process.poll() is None: log.info("Sending TERM to %d", self.process.pid) self.process.terminate() start = time.clock() while time.clock() - start < 1.0: time.sleep(0.05) if self.process.poll() is not None: break else: log.info("Sending KILL to %d", self.process.pid) self.process.kill() assert self.process.poll() is not None
Clean up, making sure the process is stopped before we pack up and go home.
def _stream_search(self, query): for doc in self.solr.search(query, rows=100000000): if self.unique_key != "_id": doc["_id"] = doc.pop(self.unique_key) yield doc
Helper method for iterating over Solr search results.
def qsize(self): if not self.connected: raise QueueNotConnectedError("Queue is not Connected") try: size = self.__db.llen(self._key) except redis.ConnectionError as e: raise redis.ConnectionError(repr(e)) return size
Returns the number of items currently in the queue :return: Integer containing size of the queue :exception: ConnectionError if queue is not connected
def p(i, sample_size, weights): weight_i = weights[i] weights_sum = sum(weights) other_weights = list(weights) del other_weights[i] probability_of_i = 0 for picks in range(0, sample_size): permutations = list(itertools.permutations(other_weights, picks)) permutation_probabilities = [] for permutation in permutations: pick_probabilities = [] pick_weight_sum = weights_sum for pick in permutation: pick_probabilities.append(pick / pick_weight_sum) pick_weight_sum -= pick pick_probabilities += [weight_i / pick_weight_sum] permutation_probability = reduce( lambda x, y: x * y, pick_probabilities ) permutation_probabilities.append(permutation_probability) probability_of_i += sum(permutation_probabilities) return probability_of_i
Given a weighted set and sample size return the probabilty that the weight `i` will be present in the sample. Created to test the output of the `SomeOf` maker class. The math was provided by Andy Blackshaw - thank you dad :)
def _order_by_is_valid_or_none(self, params): if not "order_by" in params or not params["order_by"]: return True def _order_by_dict_is_not_well_formed(d): if not isinstance(d, dict): return True if "property_name" in d and d["property_name"]: if "direction" in d and not direction.is_valid_direction(d["direction"]): return True for k in d: if k != "property_name" and k != "direction": return True return False return True order_by_list = json.loads(params["order_by"]) for order_by in order_by_list: if _order_by_dict_is_not_well_formed(order_by): return False if not "group_by" in params or not params["group_by"]: return False return True
Validates that a given order_by has proper syntax. :param params: Query params. :return: Returns True if either no order_by is present, or if the order_by is well-formed.
def after_websocket(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: handler = ensure_coroutine(func) self.after_websocket_funcs[name].append(handler) return func
Add an after websocket function. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_websocket def func(response): return response Arguments: func: The after websocket function itself. name: Optional blueprint key name.
def get_words(data): words = re.findall(r"\w+", data) LOGGER.debug("> Words: '{0}'".format(", ".join(words))) return words
Extracts the words from given string. Usage:: >>> get_words("Users are: John Doe, Jane Doe, Z6PO.") [u'Users', u'are', u'John', u'Doe', u'Jane', u'Doe', u'Z6PO'] :param data: Data to extract words from. :type data: unicode :return: Words. :rtype: list
def cache_key_exist(self, key): key_exist = True if cache.get(key) else False status = 200 if key_exist else 404 return json_success(json.dumps({'key_exist': key_exist}), status=status)
Returns if a key from cache exist
def cached_read(self, kind): if not kind in self.cache: self.pull_stats(kind) if self.epochnow() - self.cache[kind]['lastcall'] > self.cache_timeout: self.pull_stats(kind) return self.cache[kind]['lastvalue']
Cache stats calls to prevent hammering the API
async def post(self, url_path: str, params: dict = None, rtype: str = RESPONSE_JSON, schema: dict = None) -> Any: if params is None: params = dict() client = API(self.endpoint.conn_handler(self.session, self.proxy)) response = await client.requests_post(url_path, **params) if schema is not None: await parse_response(response, schema) if rtype == RESPONSE_AIOHTTP: return response elif rtype == RESPONSE_TEXT: return await response.text() elif rtype == RESPONSE_JSON: return await response.json()
POST request on self.endpoint + url_path :param url_path: Url encoded path following the endpoint :param params: Url query string parameters dictionary :param rtype: Response type :param schema: Json Schema to validate response (optional, default None) :return:
def _prep_fields_param(fields): store_samples = False if fields is None: return True, None if isinstance(fields, str): fields = [fields] else: fields = list(fields) if 'samples' in fields: fields.remove('samples') store_samples = True elif '*' in fields: store_samples = True return store_samples, fields
Prepare the `fields` parameter, and determine whether or not to store samples.
def generous_parse_uri(uri): parse_result = urlparse(uri) if parse_result.scheme == '': abspath = os.path.abspath(parse_result.path) if IS_WINDOWS: abspath = windows_to_unix_path(abspath) fixed_uri = "file://{}".format(abspath) parse_result = urlparse(fixed_uri) return parse_result
Return a urlparse.ParseResult object with the results of parsing the given URI. This has the same properties as the result of parse_uri. When passed a relative path, it determines the absolute path, sets the scheme to file, the netloc to localhost and returns a parse of the result.
def main(args=None): try: from psyplot_gui import get_parser as _get_parser except ImportError: logger.debug('Failed to import gui', exc_info=True) parser = get_parser(create=False) parser.update_arg('output', required=True) parser.create_arguments() parser.parse2func(args) else: parser = _get_parser(create=False) parser.create_arguments() parser.parse_known2func(args)
Main function for usage of psyplot from the command line This function creates a parser that parses command lines to the :func:`make_plot` functions or (if the ``psyplot_gui`` module is present, to the :func:`psyplot_gui.start_app` function) Returns ------- psyplot.parser.FuncArgParser The parser that has been used from the command line
def remove_user(self, group, username): try: self.lookup_id(group) except ldap_tools.exceptions.InvalidResult as err: raise err from None operation = {'memberUid': [(ldap3.MODIFY_DELETE, [username])]} self.client.modify(self.__distinguished_name(group), operation)
Remove a user from the specified LDAP group. Args: group: Name of group to update username: Username of user to remove Raises: ldap_tools.exceptions.InvalidResult: Results of the query were invalid. The actual exception raised inherits from InvalidResult. See #lookup_id for more info.
def create_new_values(self): model = self.queryset.model pks = [] extra_create_kwargs = self.extra_create_kwargs() for value in self._new_values: create_kwargs = {self.create_field: value} create_kwargs.update(extra_create_kwargs) new_item = self.create_item(**create_kwargs) pks.append(new_item.pk) return model.objects.filter(pk__in=pks)
Create values created by the user input. Return the model instances QS.
def get_temperature_from_humidity(self): self._init_humidity() temp = 0 data = self._humidity.humidityRead() if (data[2]): temp = data[3] return temp
Returns the temperature in Celsius from the humidity sensor
def count_base_units(units): ret = {} for unit in units: factor, base_unit = get_conversion_factor(unit) ret.setdefault(base_unit, 0) ret[base_unit] += 1 return ret
Returns a dict mapping names of base units to how many times they appear in the given iterable of units. Effectively this counts how many length units you have, how many time units, and so forth.
def read(self, file_path): if not os.path.exists(file_path): raise InvalidZoneinfoFile("The tzinfo file does not exist") with open(file_path, "rb") as fd: return self._parse(fd)
Read a zoneinfo structure from the given path. :param file_path: The path of a zoneinfo file.
def install_hooks(): if PY3: return install_aliases() flog.debug('sys.meta_path was: {0}'.format(sys.meta_path)) flog.debug('Installing hooks ...') newhook = RenameImport(RENAMES) if not detect_hooks(): sys.meta_path.append(newhook) flog.debug('sys.meta_path is now: {0}'.format(sys.meta_path))
This function installs the future.standard_library import hook into sys.meta_path.
def generate(self, output_dir, work, ngrams, labels, minus_ngrams): template = self._get_template() colours = generate_colours(len(ngrams)) for siglum in self._corpus.get_sigla(work): ngram_data = zip(labels, ngrams) content = self._generate_base(work, siglum) for ngrams_group in ngrams: content = self._highlight(content, ngrams_group, True) content = self._highlight(content, minus_ngrams, False) self._ngrams_count = 1 content = self._format_content(content) report_name = '{}-{}.html'.format(work, siglum) self._write(work, siglum, content, output_dir, report_name, template, ngram_data=ngram_data, minus_ngrams=minus_ngrams, colours=colours)
Generates HTML reports for each witness to `work`, showing its text with the n-grams in `ngrams` highlighted. Any n-grams in `minus_ngrams` have any highlighting of them (or subsets of them) removed. :param output_dir: directory to write report to :type output_dir: `str` :param work: name of work to highlight :type work: `str` :param ngrams: groups of n-grams to highlight :type ngrams: `list` of `list` of `str` :param labels: labels for the groups of n-grams :type labels: `list` of `str` :param minus_ngrams: n-grams to remove highlighting from :type minus_ngrams: `list` of `str` :rtype: `str`
def is_archlinux(): if platform.system().lower() == 'linux': if platform.linux_distribution() == ('', '', ''): if os.path.exists('/etc/arch-release'): return True return False
return True if the current distribution is running on debian like OS.
def _less_or_close(a, value, **kwargs): r return (a < value) | np.isclose(a, value, **kwargs)
r"""Compare values for less or close to boolean masks. Returns a boolean mask for values less than or equal to a target within a specified absolute or relative tolerance (as in :func:`numpy.isclose`). Parameters ---------- a : array-like Array of values to be compared value : float Comparison value Returns ------- array-like Boolean array where values are less than or nearly equal to value.
def get_parameter_definitions(self): output = {} for var_name, attrs in self.defined_variables().items(): var_type = attrs.get("type") if isinstance(var_type, CFNType): cfn_attrs = copy.deepcopy(attrs) cfn_attrs["type"] = var_type.parameter_type output[var_name] = cfn_attrs return output
Get the parameter definitions to submit to CloudFormation. Any variable definition whose `type` is an instance of `CFNType` will be returned as a CloudFormation Parameter. Returns: dict: parameter definitions. Keys are parameter names, the values are dicts containing key/values for various parameter properties.
def create_worker_build(self, **kwargs): missing = set() for required in ('platform', 'release', 'arrangement_version'): if not kwargs.get(required): missing.add(required) if missing: raise ValueError("Worker build missing required parameters: %s" % missing) if kwargs.get('platforms'): raise ValueError("Worker build called with unwanted platforms param") arrangement_version = kwargs['arrangement_version'] kwargs.setdefault('inner_template', WORKER_INNER_TEMPLATE.format( arrangement_version=arrangement_version)) kwargs.setdefault('outer_template', WORKER_OUTER_TEMPLATE) kwargs.setdefault('customize_conf', WORKER_CUSTOMIZE_CONF) kwargs['build_type'] = BUILD_TYPE_WORKER try: return self._do_create_prod_build(**kwargs) except IOError as ex: if os.path.basename(ex.filename) == kwargs['inner_template']: raise OsbsValidationException("worker invalid arrangement_version %s" % arrangement_version) raise
Create a worker build Pass through method to create_prod_build with the following modifications: - platform param is required - release param is required - arrangement_version param is required, which is used to select which worker_inner:n.json template to use - inner template set to worker_inner:n.json if not set - outer template set to worker.json if not set - customize configuration set to worker_customize.json if not set :return: BuildResponse instance
def calculate_md5(fileobject, size=2**16): fileobject.seek(0) md5 = hashlib.md5() for data in iter(lambda: fileobject.read(size), b''): if not data: break if isinstance(data, six.text_type): data = data.encode('utf-8') md5.update(data) fileobject.seek(0) return md5.hexdigest()
Utility function to calculate md5 hashes while being light on memory usage. By reading the fileobject piece by piece, we are able to process content that is larger than available memory
def write(self, path): with open(path, 'wb') as f: f.write(self.getXML())
Write RSS content to file.
def namedb_get_all_namespace_ids( cur ): query = "SELECT namespace_id FROM namespaces WHERE op = ?;" args = (NAMESPACE_READY,) namespace_rows = namedb_query_execute( cur, query, args ) ret = [] for namespace_row in namespace_rows: ret.append( namespace_row['namespace_id'] ) return ret
Get a list of all READY namespace IDs.
def equities_sids_for_country_code(self, country_code): sids = self._compute_asset_lifetimes([country_code]).sid return tuple(sids.tolist())
Return all of the sids for a given country. Parameters ---------- country_code : str An ISO 3166 alpha-2 country code. Returns ------- tuple[int] The sids whose exchanges are in this country.
def _iter_indented_subactions(self, action): try: get_subactions = action._get_subactions except AttributeError: pass else: self._indent() if isinstance(action, argparse._SubParsersAction): for subaction in sorted( get_subactions(), key=lambda x: x.dest): yield subaction else: for subaction in get_subactions(): yield subaction self._dedent()
Sort the subcommands alphabetically
async def prepare_decrypter(client, cdn_client, cdn_redirect): cdn_aes = AESModeCTR( key=cdn_redirect.encryption_key, iv=cdn_redirect.encryption_iv[:12] + bytes(4) ) decrypter = CdnDecrypter( cdn_client, cdn_redirect.file_token, cdn_aes, cdn_redirect.cdn_file_hashes ) cdn_file = await cdn_client(GetCdnFileRequest( file_token=cdn_redirect.file_token, offset=cdn_redirect.cdn_file_hashes[0].offset, limit=cdn_redirect.cdn_file_hashes[0].limit )) if isinstance(cdn_file, CdnFileReuploadNeeded): await client(ReuploadCdnFileRequest( file_token=cdn_redirect.file_token, request_token=cdn_file.request_token )) cdn_file = decrypter.get_file() else: cdn_file.bytes = decrypter.cdn_aes.encrypt(cdn_file.bytes) cdn_hash = decrypter.cdn_file_hashes.pop(0) decrypter.check(cdn_file.bytes, cdn_hash) return decrypter, cdn_file
Prepares a new CDN decrypter. :param client: a TelegramClient connected to the main servers. :param cdn_client: a new client connected to the CDN. :param cdn_redirect: the redirect file object that caused this call. :return: (CdnDecrypter, first chunk file data)
def _get_cached_mounted_points(): result = [] try: mounted_devices_key = winreg.OpenKey( winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\MountedDevices" ) for v in _iter_vals(mounted_devices_key): if "DosDevices" not in v[0]: continue volume_string = v[1].decode("utf-16le", "ignore") if not _is_mbed_volume(volume_string): continue mount_point_match = re.match(".*\\\\(.:)$", v[0]) if not mount_point_match: logger.debug("Invalid disk pattern for entry %s, skipping", v[0]) continue mount_point = mount_point_match.group(1) result.append({"mount_point": mount_point, "volume_string": volume_string}) except OSError: logger.error('Failed to open "MountedDevices" in registry') return result
! Get the volumes present on the system @return List of mount points and their associated target id Ex. [{ 'mount_point': 'D:', 'target_id_usb_id': 'xxxx'}, ...]
def devices(self): 'return generator of configured devices' return self.fs is not None and [JFSDevice(d, self, parentpath=self.rootpath) for d in self.fs.devices.iterchildren()] or [x for x in []]
return generator of configured devices
def normalize_index(index): index = np.asarray(index) if len(index) == 0: return index.astype('int') if index.dtype == 'bool': index = index.nonzero()[0] elif index.dtype == 'int': pass else: raise ValueError('Index should be either integer or bool') return index
normalize numpy index
def user(self): return self.users.get(self.contexts[self.current_context].get("user", ""), {})
Returns the current user set by current context
def add_ip_address(list_name, item_name): payload = {"jsonrpc": "2.0", "id": "ID0", "method": "add_policy_ip_addresses", "params": [list_name, {"item_name": item_name}]} response = __proxy__['bluecoat_sslv.call'](payload, True) return _validate_change_result(response)
Add an IP address to an IP address list. list_name(str): The name of the specific policy IP address list to append to. item_name(str): The IP address to append to the list. CLI Example: .. code-block:: bash salt '*' bluecoat_sslv.add_ip_address MyIPAddressList 10.0.0.0/24
def list_tag(self, limit=500, offset=0): evt = self._client._request_entity_tag_list(self.__lid, limit=limit, offset=offset) self._client._wait_and_except_if_failed(evt) return evt.payload['tags']
List `all` the tags for this Thing Returns lists of tags, as below #!python [ "mytag1", "mytag2" "ein_name", "nochein_name" ] - OR... Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException) containing the error if the infrastructure detects a problem Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException) if there is a communications problem between you and the infrastructure `limit` (optional) (integer) Return at most this many tags `offset` (optional) (integer) Return tags starting at this offset
def report(self, name, ok, msg=None, deltat=20): r = self.reports[name] if time.time() < r.last_report + deltat: r.ok = ok return r.last_report = time.time() if ok and not r.ok: self.say("%s OK" % name) r.ok = ok if not r.ok: self.say(msg)
report a sensor error
def drop_index(self, table, column): self.execute('ALTER TABLE {0} DROP INDEX {1}'.format(wrap(table), column)) self._printer('\tDropped index from column {0}'.format(column))
Drop an index from a table.
def get_gene_id(gene_name): from intermine.webservice import Service service = Service('http://yeastmine.yeastgenome.org/yeastmine/service') query = service.new_query('Gene') query.add_view('primaryIdentifier', 'secondaryIdentifier', 'symbol', 'name', 'sgdAlias', 'crossReferences.identifier', 'crossReferences.source.name') query.add_constraint('organism.shortName', '=', 'S. cerevisiae', code='B') query.add_constraint('Gene', 'LOOKUP', gene_name, code='A') for row in query.rows(): gid = row['secondaryIdentifier'] return gid
Retrieve systematic yeast gene name from the common name. :param gene_name: Common name for yeast gene (e.g. ADE2). :type gene_name: str :returns: Systematic name for yeast gene (e.g. YOR128C). :rtype: str
def _recode_for_categories(codes, old_categories, new_categories): from pandas.core.algorithms import take_1d if len(old_categories) == 0: return codes.copy() elif new_categories.equals(old_categories): return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=-1) return new_codes
Convert a set of codes for to a new set of categories Parameters ---------- codes : array old_categories, new_categories : Index Returns ------- new_codes : array Examples -------- >>> old_cat = pd.Index(['b', 'a', 'c']) >>> new_cat = pd.Index(['a', 'b']) >>> codes = np.array([0, 1, 1, 2]) >>> _recode_for_categories(codes, old_cat, new_cat) array([ 1, 0, 0, -1])
def from_record(cls, record, crs, schema=None): properties = cls._to_properties(record, schema) vector = GeoVector(shape(record['geometry']), crs) if record.get('raster'): assets = {k: dict(type=RASTER_TYPE, product='visual', **v) for k, v in record.get('raster').items()} else: assets = record.get('assets', {}) return cls(vector, properties, assets)
Create GeoFeature from a record.
def list_files(tag=None, sat_id=None, data_path=None, format_str=None): index = pds.date_range(pysat.datetime(2017,12,1), pysat.datetime(2018,12,1)) names = [ data_path+date.strftime('%Y-%m-%d')+'.nofile' for date in index] return pysat.Series(names, index=index)
Produce a fake list of files spanning a year
def upload(self, sys_id, file_path, name=None, multipart=False): if not isinstance(multipart, bool): raise InvalidUsage('Multipart must be of type bool') resource = self.resource if name is None: name = os.path.basename(file_path) resource.parameters.add_custom({ 'table_name': self.table_name, 'table_sys_id': sys_id, 'file_name': name }) data = open(file_path, 'rb').read() headers = {} if multipart: headers["Content-Type"] = "multipart/form-data" path_append = '/upload' else: headers["Content-Type"] = "text/plain" path_append = '/file' return resource.request(method='POST', data=data, headers=headers, path_append=path_append)
Attaches a new file to the provided record :param sys_id: the sys_id of the record to attach the file to :param file_path: local absolute path of the file to upload :param name: custom name for the uploaded file (instead of basename) :param multipart: whether or not to use multipart :return: the inserted record
def get_raw(self): return [self.name, self.size, self.last_modified, self.location]
Get a list with information about the file. The returned list contains name, size, last_modified and location.
def unregister_dependent_on(self, tree): if tree in self.dependent_on: self.dependent_on.remove(tree)
unregistering tree that we are dependent on
def _resolved_pid(self): if not isinstance(self.pid, PersistentIdentifier): return resolve_pid(self.pid) return self.pid
Resolve self.pid if it is a fetched pid.
def _fingerprint_dict_with_files(self, option_val): return stable_option_fingerprint({ k: self._expand_possible_file_value(v) for k, v in option_val.items() })
Returns a fingerprint of the given dictionary containing file paths. Any value which is a file path which exists on disk will be fingerprinted by that file's contents rather than by its path. This assumes the files are small enough to be read into memory. NB: The keys of the dict are assumed to be strings -- if they are not, the dict should be converted to encode its keys with `stable_option_fingerprint()`, as is done in the `fingerprint()` method.
def unregister(self, plugin=None, name=None): if name is None: assert plugin is not None, "one of name or plugin needs to be specified" name = self.get_name(plugin) if plugin is None: plugin = self.get_plugin(name) if self._name2plugin.get(name): del self._name2plugin[name] for hookcaller in self._plugin2hookcallers.pop(plugin, []): hookcaller._remove_plugin(plugin) return plugin
unregister a plugin object and all its contained hook implementations from internal data structures.
def device_message(device, code, ts=None, origin=None, type=None, severity=None, title=None, description=None, hint=None, **metaData): if ts is None: ts = local_now() payload = MessagePayload(device=device) payload.messages.append( Message( code=code, ts=ts, origin=origin, type=type, severity=severity, title=title, description=description, hint=hint, **metaData)) return dumps(payload)
This quickly builds a time-stamped message. If `ts` is None, the current time is used.
def CreateAFF4Object(stat_response, client_id_urn, mutation_pool, token=None): urn = stat_response.pathspec.AFF4Path(client_id_urn) if stat.S_ISDIR(stat_response.st_mode): ftype = standard.VFSDirectory else: ftype = aff4_grr.VFSFile with aff4.FACTORY.Create( urn, ftype, mode="w", mutation_pool=mutation_pool, token=token) as fd: fd.Set(fd.Schema.STAT(stat_response)) fd.Set(fd.Schema.PATHSPEC(stat_response.pathspec))
This creates a File or a Directory from a stat response.
def addHeader(self, name, value, must_understand=False): self.headers[name] = value self.headers.set_required(name, must_understand)
Sets a persistent header to send with each request. @param name: Header name.
def get_facet_serializer_class(self): if self.facet_serializer_class is None: raise AttributeError( "%(cls)s should either include a `facet_serializer_class` attribute, " "or override %(cls)s.get_facet_serializer_class() method." % {"cls": self.__class__.__name__} ) return self.facet_serializer_class
Return the class to use for serializing facets. Defaults to using ``self.facet_serializer_class``.