code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def on_service_add(self, service): self.launch_thread(service.name, self.check_loop, service)
When a new service is added, a worker thread is launched to periodically run the checks for that service.
def make_server( server_class, handler_class, authorizer_class, filesystem_class, host_port, file_access_user=None, **handler_options): from . import compat if isinstance(handler_class, compat.string_type): handler_class = import_class(handler_class) if isinstance(authorizer_class, compat.string_type): authorizer_class = import_class(authorizer_class) if isinstance(filesystem_class, compat.string_type): filesystem_class = import_class(filesystem_class) authorizer = authorizer_class(file_access_user) handler = handler_class for key, value in handler_options.items(): setattr(handler, key, value) handler.authorizer = authorizer if filesystem_class is not None: handler.abstracted_fs = filesystem_class return server_class(host_port, handler)
make server instance :host_port: (host, port) :file_access_user: 'spam' handler_options: * timeout * passive_ports * masquerade_address * certfile * keyfile
def get_playlist_songs(self, playlist_id, limit=1000): url = 'http://music.163.com/weapi/v3/playlist/detail?csrf_token=' csrf = '' params = {'id': playlist_id, 'offset': 0, 'total': True, 'limit': limit, 'n': 1000, 'csrf_token': csrf} result = self.post_request(url, params) songs = result['playlist']['tracks'] songs = [Song(song['id'], song['name']) for song in songs] return songs
Get a playlists's all songs. :params playlist_id: playlist id. :params limit: length of result returned by weapi. :return: a list of Song object.
def check_or(state, *tests): success = False first_feedback = None for test in iter_tests(tests): try: multi(state, test) success = True except TestFail as e: if not first_feedback: first_feedback = e.feedback if success: return state state.report(first_feedback)
Test whether at least one SCT passes. If all of the tests fail, the feedback of the first test will be presented to the student. Args: state: State instance describing student and solution code, can be omitted if used with Ex() tests: one or more sub-SCTs to run :Example: The SCT below tests that the student typed either 'SELECT' or 'WHERE' (or both).. :: Ex().check_or( has_code('SELECT'), has_code('WHERE') ) The SCT below checks that a SELECT statement has at least a WHERE c or LIMIT clause.. :: Ex().check_node('SelectStmt', 0).check_or( check_edge('where_clause'), check_edge('limit_clause') )
def publish(idx=None): if idx is None: idx = '' else: idx = '-r ' + idx run('python setup.py register {}'.format(idx)) run('twine upload {} dist/*.whl dist/*.egg dist/*.tar.gz'.format(idx))
Publish packaged distributions to pypi index
def is_on_curve(self, point): X, Y = point.X, point.Y return ( pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b ) % self.P == 0
Checks whether a point is on the curve. Args: point (AffinePoint): Point to be checked. Returns: bool: True if point is on the curve, False otherwise.
def picard_fixmate(picard, align_bam): base, ext = os.path.splitext(align_bam) out_file = "%s-sort%s" % (base, ext) if not file_exists(out_file): with tx_tmpdir(picard._config) as tmp_dir: with file_transaction(picard._config, out_file) as tx_out_file: opts = [("INPUT", align_bam), ("OUTPUT", tx_out_file), ("TMP_DIR", tmp_dir), ("SORT_ORDER", "coordinate")] picard.run("FixMateInformation", opts) return out_file
Run Picard's FixMateInformation generating an aligned output file.
def parse(self): self.cmd = None self.comments = [] self.entrypoint = None self.environ = [] self.files = [] self.install = [] self.labels = [] self.ports = [] self.test = None self.volumes = [] if self.recipe: self.lines = read_file(self.recipe) if hasattr(self, '_parse'): self._parse()
parse is the base function for parsing the recipe, whether it be a Dockerfile or Singularity recipe. The recipe is read in as lines, and saved to a list if needed for the future. If the client has it, the recipe type specific _parse function is called. Instructions for making a client subparser: It should have a main function _parse that parses a list of lines from some recipe text file into the appropriate sections, e.g., self.fromHeader self.environ self.labels self.install self.files self.test self.entrypoint
def install(module): ret = { 'old': None, 'new': None, } old_info = show(module) cmd = 'cpan -i {0}'.format(module) out = __salt__['cmd.run'](cmd) if "don't know what it is" in out: ret['error'] = 'CPAN cannot identify this package' return ret new_info = show(module) ret['old'] = old_info.get('installed version', None) ret['new'] = new_info['installed version'] return ret
Install a Perl module from CPAN CLI Example: .. code-block:: bash salt '*' cpan.install Template::Alloy
def persistant_error(request, message, extra_tags='', fail_silently=False, *args, **kwargs): add_message(request, ERROR_PERSISTENT, message, extra_tags=extra_tags, fail_silently=fail_silently, *args, **kwargs)
Adds a persistant message with the ``ERROR`` level.
def get_port_binding_level(filters): session = db.get_reader_session() with session.begin(): return (session.query(ml2_models.PortBindingLevel). filter_by(**filters). order_by(ml2_models.PortBindingLevel.level). all())
Returns entries from PortBindingLevel based on the specified filters.
def list(self): import IPython data = [{'name': version['name'].split()[-1], 'deploymentUri': version['deploymentUri'], 'createTime': version['createTime']} for version in self.get_iterator()] IPython.display.display( datalab.utils.commands.render_dictionary(data, ['name', 'deploymentUri', 'createTime']))
List versions under the current model in a table view. Raises: Exception if it is called in a non-IPython environment.
def wgs84_to_pixel(lng, lat, transform, utm_epsg=None, truncate=True): east, north = wgs84_to_utm(lng, lat, utm_epsg) row, column = utm_to_pixel(east, north, transform, truncate=truncate) return row, column
Convert WGS84 coordinates to pixel image coordinates given transform and UTM CRS. If no CRS is given it will be calculated it automatically. :param lng: longitude of point :type lng: float :param lat: latitude of point :type lat: float :param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)` :type transform: tuple or list :param utm_epsg: UTM coordinate reference system enum constants :type utm_epsg: constants.CRS or None :param truncate: Whether to truncate pixel coordinates. Default is ``True`` :type truncate: bool :return: row and column pixel image coordinates :rtype: float, float or int, int
def _get_broadcast_shape(shape1, shape2): if shape1 == shape2: return shape1 length1 = len(shape1) length2 = len(shape2) if length1 > length2: shape = list(shape1) else: shape = list(shape2) i = max(length1, length2) - 1 for a, b in zip(shape1[::-1], shape2[::-1]): if a != 1 and b != 1 and a != b: raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2)) shape[i] = max(a, b) i -= 1 return tuple(shape)
Given two shapes that are not identical, find the shape that both input shapes can broadcast to.
def _calculate(self, field): base_offset = 0 if self.base_field is not None: base_offset = self.base_field.offset target_offset = self._field.offset if (target_offset is None) or (base_offset is None): return 0 return target_offset - base_offset
If the offset is unknown, return 0
def list(self, mask=None): if mask is None: mask = "mask[id, name, createDate, rule, guestCount, backendRouter[id, hostname]]" groups = self.client.call('Account', 'getPlacementGroups', mask=mask, iter=True) return groups
List existing placement groups Calls SoftLayer_Account::getPlacementGroups
def run(self, code: str) -> Output: output = self._execute(code) if self.echo and output.text: print(output.text) if self.check: output.raise_for_status() return output
Run some code in the managed Spark session. :param code: The code to run.
def _decompress_into_buffer(self, out_buffer): zresult = lib.ZSTD_decompressStream(self._decompressor._dctx, out_buffer, self._in_buffer) if self._in_buffer.pos == self._in_buffer.size: self._in_buffer.src = ffi.NULL self._in_buffer.pos = 0 self._in_buffer.size = 0 self._source_buffer = None if not hasattr(self._source, 'read'): self._finished_input = True if lib.ZSTD_isError(zresult): raise ZstdError('zstd decompress error: %s' % _zstd_error(zresult)) return (out_buffer.pos and (out_buffer.pos == out_buffer.size or zresult == 0 and not self._read_across_frames))
Decompress available input into an output buffer. Returns True if data in output buffer should be emitted.
def click_exists(self, timeout=0): e = self.get(timeout=timeout, raise_error=False) if e is None: return False e.click() return True
Wait element and perform click Args: timeout (float): timeout for wait Returns: bool: if successfully clicked
def string(s): @Parser def string_parser(text, index=0): slen, tlen = len(s), len(text) if text[index:index + slen] == s: return Value.success(index + slen, s) else: matched = 0 while matched < slen and index + matched < tlen and text[index + matched] == s[matched]: matched = matched + 1 return Value.failure(index + matched, s) return string_parser
Parser a string.
def tags(self): result = [] a = javabridge.call(self.jobject, "getTags", "()Lweka/core/Tag;]") length = javabridge.get_env().get_array_length(a) wrapped = javabridge.get_env().get_object_array_elements(a) for i in range(length): result.append(Tag(javabridge.get_env().get_string(wrapped[i]))) return result
Returns the associated tags. :return: the list of Tag objects :rtype: list
def handle_template(self, template, subdir): if template is None: return six.text_type(os.path.join(yacms.__path__[0], subdir)) return super(Command, self).handle_template(template, subdir)
Use yacms's project template by default. The method of picking the default directory is copied from Django's TemplateCommand.
def import_gwf_library(library, package=__package__): try: return importlib.import_module('.%s' % library, package=package) except ImportError as exc: exc.args = ('Cannot import %s frame API: %s' % (library, str(exc)),) raise
Utility method to import the relevant timeseries.io.gwf frame API This is just a wrapper around :meth:`importlib.import_module` with a slightly nicer error message
def teetext(table, source=None, encoding=None, errors='strict', template=None, prologue=None, epilogue=None): assert template is not None, 'template is required' return TeeTextView(table, source=source, encoding=encoding, errors=errors, template=template, prologue=prologue, epilogue=epilogue)
Return a table that writes rows to a text file as they are iterated over.
def log_error(self, message, *args, **kwargs): self._service.log(logging.ERROR, message, *args, **kwargs)
Log server error
def init_with_instance(self, instance): self._uid = api.get_uid(instance) self._brain = None self._catalog = self.get_catalog_for(instance) self._instance = instance
Initialize with an instance object
def string_to_identity(identity_str): m = _identity_regexp.match(identity_str) result = m.groupdict() log.debug('parsed identity: %s', result) return {k: v for k, v in result.items() if v}
Parse string into Identity dictionary.
def object_properties_count(self, o): o_type = type(o) if isinstance(o, (dict, list, tuple, set)): return len(o) elif isinstance(o, (type(None), bool, float, str, int, bytes, types.ModuleType, types.MethodType, types.FunctionType)): return 0 else: try: if hasattr(o, '__dict__'): count = len([m_name for m_name, m_value in o.__dict__.items() if not m_name.startswith('__') and not type(m_value) in (types.ModuleType, types.MethodType, types.FunctionType,) ]) else: count = 0 except: count = 0 return count
returns the number of user browsable properties of an object.
def dir_list(directory): try: content = listdir(directory) return content except WindowsError as winErr: print("Directory error: " + str((winErr)))
Returns the list of all files in the directory.
def find_field(item_list, cond, comparator, target_field): for item in item_list: if comparator(item, cond) and target_field in item: return item[target_field] return None
Finds the value of a field in a dict object that satisfies certain conditions. Args: item_list: A list of dict objects. cond: A param that defines the condition. comparator: A function that checks if an dict satisfies the condition. target_field: Name of the field whose value to be returned if an item satisfies the condition. Returns: Target value or None if no item satisfies the condition.
def create_insert_func(self, wb_url, wb_prefix, host_prefix, top_url, env, is_framed, coll='', include_ts=True, **kwargs): params = kwargs params['host_prefix'] = host_prefix params['wb_prefix'] = wb_prefix params['wb_url'] = wb_url params['top_url'] = top_url params['coll'] = coll params['is_framed'] = is_framed def make_head_insert(rule, cdx): params['wombat_ts'] = cdx['timestamp'] if include_ts else '' params['wombat_sec'] = timestamp_to_sec(cdx['timestamp']) params['is_live'] = cdx.get('is_live') if self.banner_view: banner_html = self.banner_view.render_to_string(env, cdx=cdx, **params) params['banner_html'] = banner_html return self.render_to_string(env, cdx=cdx, **params) return make_head_insert
Create the function used to render the header insert template for the current request. :param rewrite.wburl.WbUrl wb_url: The WbUrl for the request this template is being rendered for :param str wb_prefix: The URL prefix pywb is serving the content using (e.g. http://localhost:8080/live/) :param str host_prefix: The host URL prefix pywb is running on (e.g. http://localhost:8080) :param str top_url: The full URL for this request (e.g. http://localhost:8080/live/http://example.com) :param dict env: The WSGI environment dictionary for this request :param bool is_framed: Is pywb or a specific collection running in framed mode :param str coll: The name of the collection this request is associated with :param bool include_ts: Should a timestamp be included in the rendered template :param kwargs: Additional keyword arguments to be supplied to the Jninja template render method :return: A function to be used to render the header insert for the request this template is being rendered for :rtype: callable
def _finish(self): if self._process.returncode is None: self._process.stdin.flush() self._process.stdin.close() self._process.wait() self.closed = True
Closes and waits for subprocess to exit.
def collect_single_file(self, file_path): lines = FileToList.to_list(file_path) file_anchors = {} file_duplicates = [] for i in range(len(lines)): self._try_switches(lines, i) if self._no_switches_on(): for s in self._strategies: if s.test(lines, i): tag, convert_me = s.get(lines, i) if tag in file_anchors: file_duplicates.append((tag, i + 1, file_anchors[tag])) else: anchor = self._converter(convert_me, file_anchors) file_anchors[tag] = anchor self._arm_switches() return file_anchors, file_duplicates
Takes in a list of strings, usually the lines in a text file, and collects the AnchorHub tags and auto-generated anchors for the file according to the Collector's converter, strategies, and switches :param file_path: string file path of file to examine :return: A dictionary mapping AnchorHub tags to auto-generated anchors, and a list of containing an entry for each duplicate tag found on the page.
def _can_for_object(self, func_name, object_id, method_name): can_for_session = self._can(func_name) if (can_for_session or self._object_catalog_session is None or self._override_lookup_session is None): return can_for_session override_auths = self._override_lookup_session.get_authorizations_for_agent_and_function( self.get_effective_agent_id(), self._get_function_id(func_name)) if not override_auths.available(): return False if self._object_catalog_session is not None: catalog_ids = list(getattr(self._object_catalog_session, method_name)(object_id)) for auth in override_auths: if auth.get_qualifier_id() in catalog_ids: return True return False
Checks if agent can perform function for object
def bootstrap_methods(self) -> BootstrapMethod: bootstrap = self.attributes.find_one(name='BootstrapMethods') if bootstrap is None: bootstrap = self.attributes.create( ATTRIBUTE_CLASSES['BootstrapMethods'] ) return bootstrap.table
Returns the bootstrap methods table from the BootstrapMethods attribute, if one exists. If it does not, one will be created. :returns: Table of `BootstrapMethod` objects.
def uniquify_list(L): return [e for i, e in enumerate(L) if L.index(e) == i]
Same order unique list using only a list compression.
def drop(self, ex): "helper for apply_sql in DropX case" if ex.name not in self: if ex.ifexists: return raise KeyError(ex.name) table_ = self[ex.name] parent = table_.parent_table if table_.child_tables: if not ex.cascade: raise table.IntegrityError('delete_parent_without_cascade',ex.name) self.cascade_delete(ex.name) else: del self[ex.name] if parent: parent.child_tables.remove(table_)
helper for apply_sql in DropX case
def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): skopt_trial_info = self._live_trial_mapping.pop(trial_id) if result: self._skopt_opt.tell(skopt_trial_info, -result[self._reward_attr])
Passes the result to skopt unless early terminated or errored. The result is internally negated when interacting with Skopt so that Skopt Optimizers can "maximize" this value, as it minimizes on default.
def kill(self) -> None: self._proc.kill() self._loop.run_in_executor(None, self._proc.communicate)
Kill ffmpeg job.
def set_result(self, result): for future in self.traverse(): future.set_result(result) if not self.done(): super().set_result(result)
Complete all tasks.
def ecdsa_sign_compact(msg32, seckey): output64 = ffi.new("unsigned char[65]") recid = ffi.new("int *") lib.secp256k1_ecdsa_recoverable_signature_serialize_compact( ctx, output64, recid, _ecdsa_sign_recoverable(msg32, seckey) ) r = ffi.buffer(output64)[:64] + struct.pack("B", recid[0]) assert len(r) == 65, len(r) return r
Takes the same message and seckey as _ecdsa_sign_recoverable Returns an unsigned char array of length 65 containing the signed message
def nz(value, none_value, strict=True): if not DEBUG: debug = False else: debug = False if debug: print("START nz frameworkutilities.py ----------------------\n") if value is None and strict: return_val = none_value elif strict and value is not None: return_val = value elif not strict and not is_not_null(value): return_val = none_value else: return_val = value if debug: print("value: %s | none_value: %s | return_val: %s" % (value, none_value, return_val)) if debug: print("END nz frameworkutilities.py ----------------------\n") return return_val
This function is named after an old VBA function. It returns a default value if the passed in value is None. If strict is False it will treat an empty string as None as well. example: x = None nz(x,"hello") --> "hello" nz(x,"") --> "" y = "" nz(y,"hello") --> "" nz(y,"hello", False) --> "hello"
def contrail_error_handler(f): @wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except HttpError as e: if e.details: e.message, e.details = e.details, e.message e.args = ("%s (HTTP %s)" % (e.message, e.http_status),) raise return wrapper
Handle HTTP errors returned by the API server
def load_eidos_curation_table(): url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + \ 'src/main/resources/org/clulab/wm/eidos/english/confidence/' + \ 'rule_summary.tsv' res = StringIO(requests.get(url).text) table = pandas.read_table(res, sep='\t') table = table.drop(table.index[len(table)-1]) return table
Return a pandas table of Eidos curation data.
def _hexencode(bytestring, insert_spaces = False): _checkString(bytestring, description='byte string') separator = '' if not insert_spaces else ' ' byte_representions = [] for c in bytestring: byte_representions.append( '{0:02X}'.format(ord(c)) ) return separator.join(byte_representions).strip()
Convert a byte string to a hex encoded string. For example 'J' will return '4A', and ``'\\x04'`` will return '04'. Args: bytestring (str): Can be for example ``'A\\x01B\\x45'``. insert_spaces (bool): Insert space characters between pair of characters to increase readability. Returns: A string of twice the length, with characters in the range '0' to '9' and 'A' to 'F'. The string will be longer if spaces are inserted. Raises: TypeError, ValueError
def get_param_arg(param, idx, klass, arg, attr='id'): if isinstance(arg, klass): return getattr(arg, attr) elif isinstance(arg, (int, str)): return arg else: raise TypeError( "%s[%d] must be int, str, or %s, not %s" % ( param, idx, klass.__name__, type(arg).__name__))
Return the correct value for a fabric from `arg`.
def unpackb(packed, **kwargs): unpacker = Unpacker(None, **kwargs) unpacker.feed(packed) try: ret = unpacker._unpack() except OutOfData: raise UnpackValueError("Data is not enough.") if unpacker._got_extradata(): raise ExtraData(ret, unpacker._get_extradata()) return ret
Unpack an object from `packed`. Raises `ExtraData` when `packed` contains extra bytes. See :class:`Unpacker` for options.
def checksum(digits): sum_mod11 = sum(map(operator.mul, digits, Provider.scale1)) % 11 if sum_mod11 < 10: return sum_mod11 sum_mod11 = sum(map(operator.mul, digits, Provider.scale2)) % 11 return 0 if sum_mod11 == 10 else sum_mod11
Calculate checksum of Estonian personal identity code. Checksum is calculated with "Modulo 11" method using level I or II scale: Level I scale: 1 2 3 4 5 6 7 8 9 1 Level II scale: 3 4 5 6 7 8 9 1 2 3 The digits of the personal code are multiplied by level I scale and summed; if remainder of modulo 11 of the sum is less than 10, checksum is the remainder. If remainder is 10, then level II scale is used; checksum is remainder if remainder < 10 or 0 if remainder is 10. See also https://et.wikipedia.org/wiki/Isikukood
def get_temperature_from_pressure(self): self._init_pressure() temp = 0 data = self._pressure.pressureRead() if (data[2]): temp = data[3] return temp
Returns the temperature in Celsius from the pressure sensor
def compute_actor_handle_id_non_forked(actor_handle_id, current_task_id): assert isinstance(actor_handle_id, ActorHandleID) assert isinstance(current_task_id, TaskID) handle_id_hash = hashlib.sha1() handle_id_hash.update(actor_handle_id.binary()) handle_id_hash.update(current_task_id.binary()) handle_id = handle_id_hash.digest() return ActorHandleID(handle_id)
Deterministically compute an actor handle ID in the non-forked case. This code path is used whenever an actor handle is pickled and unpickled (for example, if a remote function closes over an actor handle). Then, whenever the actor handle is used, a new actor handle ID will be generated on the fly as a deterministic function of the actor ID, the previous actor handle ID and the current task ID. TODO(rkn): It may be possible to cause problems by closing over multiple actor handles in a remote function, which then get unpickled and give rise to the same actor handle IDs. Args: actor_handle_id: The original actor handle ID. current_task_id: The ID of the task that is unpickling the handle. Returns: An ID for the new actor handle.
def list_vmss_sub(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachineScaleSets', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VM Scale Sets in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VM scale sets.
def unpack_bytes(self, obj_bytes, encoding=None): assert self.bytes_to_dict or self.string_to_dict encoding = encoding or self.default_encoding LOGGER.debug('%r decoding %d bytes with encoding of %s', self, len(obj_bytes), encoding) if self.bytes_to_dict: return escape.recursive_unicode(self.bytes_to_dict(obj_bytes)) return self.string_to_dict(obj_bytes.decode(encoding))
Unpack a byte stream into a dictionary.
def load(fnames, tag=None, sat_id=None, fake_daily_files_from_monthly=False, flatten_twod=True): import pysatCDF if len(fnames) <= 0 : return pysat.DataFrame(None), None else: if fake_daily_files_from_monthly: fname = fnames[0][0:-11] date = pysat.datetime.strptime(fnames[0][-10:], '%Y-%m-%d') with pysatCDF.CDF(fname) as cdf: data, meta = cdf.to_pysat(flatten_twod=flatten_twod) data = data.ix[date:date+pds.DateOffset(days=1) - pds.DateOffset(microseconds=1),:] return data, meta else: with pysatCDF.CDF(fnames[0]) as cdf: return cdf.to_pysat(flatten_twod=flatten_twod)
Load NASA CDAWeb CDF files. This routine is intended to be used by pysat instrument modules supporting a particular NASA CDAWeb dataset. Parameters ------------ fnames : (pandas.Series) Series of filenames tag : (str or NoneType) tag or None (default=None) sat_id : (str or NoneType) satellite id or None (default=None) fake_daily_files_from_monthly : bool Some CDAWeb instrument data files are stored by month, interfering with pysat's functionality of loading by day. This flag, when true, parses of daily dates to monthly files that were added internally by the list_files routine, when flagged. These dates are used here to provide data by day. Returns --------- data : (pandas.DataFrame) Object containing satellite data meta : (pysat.Meta) Object containing metadata such as column names and units Examples -------- :: # within the new instrument module, at the top level define # a new variable named load, and set it equal to this load method # code below taken from cnofs_ivm.py. # support load routine # use the default CDAWeb method load = cdw.load
def main(dbfile, pidfile, mode): Inspector(dbfile, pidfile).reuse_snapshot().snapshot(mode)
Main analyzer routine.
def schedule(ident, cron=None, minute='*', hour='*', day_of_week='*', day_of_month='*', month_of_year='*'): source = get_source(ident) if cron: minute, hour, day_of_month, month_of_year, day_of_week = cron.split() crontab = PeriodicTask.Crontab( minute=str(minute), hour=str(hour), day_of_week=str(day_of_week), day_of_month=str(day_of_month), month_of_year=str(month_of_year) ) if source.periodic_task: source.periodic_task.modify(crontab=crontab) else: source.modify(periodic_task=PeriodicTask.objects.create( task='harvest', name='Harvest {0}'.format(source.name), description='Periodic Harvesting', enabled=True, args=[str(source.id)], crontab=crontab, )) signals.harvest_source_scheduled.send(source) return source
Schedule an harvesting on a source given a crontab
def stop(self): if ( self.dev == None ): return '' buf = [REPORT_ID, ord('p'), 0, 0, 0, 0, 0, 0, 0] return self.write(buf);
Stop internal color pattern playing
def _get_column_ends(self): ends = collections.Counter() for line in self.text.splitlines(): for matchobj in re.finditer('\s{2,}', line.lstrip()): ends[matchobj.end()] += 1 return ends
Guess where the ends of the columns lie.
def _print_routes(api_provider, host, port): grouped_api_configs = {} for api in api_provider.get_all(): key = "{}-{}".format(api.function_name, api.path) config = grouped_api_configs.get(key, {}) config.setdefault("methods", []) config["function_name"] = api.function_name config["path"] = api.path config["methods"].append(api.method) grouped_api_configs[key] = config print_lines = [] for _, config in grouped_api_configs.items(): methods_str = "[{}]".format(', '.join(config["methods"])) output = "Mounting {} at http://{}:{}{} {}".format( config["function_name"], host, port, config["path"], methods_str) print_lines.append(output) LOG.info(output) return print_lines
Helper method to print the APIs that will be mounted. This method is purely for printing purposes. This method takes in a list of Route Configurations and prints out the Routes grouped by path. Grouping routes by Function Name + Path is the bulk of the logic. Example output: Mounting Product at http://127.0.0.1:3000/path1/bar [GET, POST, DELETE] Mounting Product at http://127.0.0.1:3000/path2/bar [HEAD] :param samcli.commands.local.lib.provider.ApiProvider api_provider: API Provider that can return a list of APIs :param string host: Host name where the service is running :param int port: Port number where the service is running :returns list(string): List of lines that were printed to the console. Helps with testing
def get_driver(secret_key=config.DEFAULT_SECRET_KEY, userid=config.DEFAULT_USERID, provider=config.DEFAULT_PROVIDER): if hasattr(config, 'get_driver'): logger.debug('get_driver %s' % config.get_driver) return config.get_driver() else: logger.debug('get_driver {0}@{1}'.format(userid, provider)) return libcloud.compute.providers.get_driver( config.PROVIDERS[provider])(userid, secret_key)
A driver represents successful authentication. They become stale, so obtain them as late as possible, and don't cache them.
def _sanitize_url_components(comp_list, field): if not comp_list: return '' elif comp_list[0].startswith('{0}='.format(field)): ret = '{0}=XXXXXXXXXX&'.format(field) comp_list.remove(comp_list[0]) return ret + _sanitize_url_components(comp_list, field) else: ret = '{0}&'.format(comp_list[0]) comp_list.remove(comp_list[0]) return ret + _sanitize_url_components(comp_list, field)
Recursive function to sanitize each component of the url.
def register(self, src, trg, trg_mask=None, src_mask=None): ccreg = registration.CrossCorr() model = ccreg.fit(src, reference=trg) translation = [-x for x in model.toarray().tolist()[0]] warp_matrix = np.eye(2, 3) warp_matrix[0, 2] = translation[1] warp_matrix[1, 2] = translation[0] return warp_matrix
Implementation of pair-wise registration using thunder-registration For more information on the model estimation, refer to https://github.com/thunder-project/thunder-registration This function takes two 2D single channel images and estimates a 2D translation that best aligns the pair. The estimation is done by maximising the correlation of the Fourier transforms of the images. Once, the translation is estimated, it is applied to the (multi-channel) image to warp and, possibly, ot hte ground-truth. Different interpolations schemes could be more suitable for images and ground-truth values (or masks). :param src: 2D single channel source moving image :param trg: 2D single channel target reference image :param src_mask: Mask of source image. Not used in this method. :param trg_mask: Mask of target image. Not used in this method. :return: Estimated 2D transformation matrix of shape 2x3
def update_ports(self, ports, id_or_uri): ports = merge_default_values(ports, {'type': 'port'}) uri = self._client.build_uri(id_or_uri) + "/update-ports" return self._client.update(uri=uri, resource=ports)
Updates the switch ports. Only the ports under the management of OneView and those that are unlinked are supported for update. Note: This method is available for API version 300 or later. Args: ports: List of Switch Ports. id_or_uri: Can be either the switch id or the switch uri. Returns: dict: Switch
def interpolate_xml_array(data, low_res_coords, shape, chunks): xpoints, ypoints = low_res_coords return interpolate_xarray_linear(xpoints, ypoints, data, shape, chunks=chunks)
Interpolate arbitrary size dataset to a full sized grid.
def stop(self): self._flush() filesize = self.file.tell() super(BLFWriter, self).stop() header = [b"LOGG", FILE_HEADER_SIZE, APPLICATION_ID, 0, 0, 0, 2, 6, 8, 1] header.extend([filesize, self.uncompressed_size, self.count_of_objects, 0]) header.extend(timestamp_to_systemtime(self.start_timestamp)) header.extend(timestamp_to_systemtime(self.stop_timestamp)) with open(self.file.name, "r+b") as f: f.write(FILE_HEADER_STRUCT.pack(*header))
Stops logging and closes the file.
def error_leader(self, infile=None, lineno=None): "Emit a C-compiler-like, Emacs-friendly error-message leader." if infile is None: infile = self.infile if lineno is None: lineno = self.lineno return "\"%s\", line %d: " % (infile, lineno)
Emit a C-compiler-like, Emacs-friendly error-message leader.
def run(self): while self.should_run: try: self.logger.debug('Sending heartbeat, seq ' + last_sequence) self.ws.send(json.dumps({ 'op': 1, 'd': last_sequence })) except Exception as e: self.logger.error(f'Got error in heartbeat: {str(e)}') finally: elapsed = 0.0 while elapsed < self.interval and self.should_run: time.sleep(self.TICK_INTERVAL) elapsed += self.TICK_INTERVAL
Runs the thread This method handles sending the heartbeat to the Discord websocket server, so the connection can remain open and the bot remain online for those commands that require it to be. Args: None
def drawing_end(self): from MAVProxy.modules.mavproxy_map import mp_slipmap if self.draw_callback is None: return self.draw_callback(self.draw_line) self.draw_callback = None self.map.add_object(mp_slipmap.SlipDefaultPopup(self.default_popup, combine=True)) self.map.add_object(mp_slipmap.SlipClearLayer('Drawing'))
end line drawing
def readrows(self): num_rows = 0 while True: for row in self.log_reader.readrows(): yield self.replace_timestamp(row) time.sleep(next(self.eps_timer)) num_rows += 1 if self.max_rows and (num_rows >= self.max_rows): return
Using the BroLogReader this method yields each row of the log file replacing timestamps, looping and emitting rows based on EPS rate
def plot_f(self, plot_limits=None, fixed_inputs=None, resolution=None, apply_link=False, which_data_ycols='all', which_data_rows='all', visible_dims=None, levels=20, samples=0, lower=2.5, upper=97.5, plot_density=False, plot_data=True, plot_inducing=True, projection='2d', legend=True, predict_kw=None, **kwargs): return plot(self, plot_limits, fixed_inputs, resolution, True, apply_link, which_data_ycols, which_data_rows, visible_dims, levels, samples, 0, lower, upper, plot_data, plot_inducing, plot_density, predict_kw, projection, legend, **kwargs)
Convinience function for plotting the fit of a GP. This is the same as plot, except it plots the latent function fit of the GP! If you want fine graned control use the specific plotting functions supplied in the model. You can deactivate the legend for this one plot by supplying None to label. Give the Y_metadata in the predict_kw if you need it. :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input dimension i should be set to value v. :type fixed_inputs: a list of tuples :param int resolution: The resolution of the prediction [default:200] :param bool apply_link: whether to apply the link function of the GP to the raw prediction. :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice self.X, self.Y :param array-like visible_dims: an array specifying the input dimensions to plot (maximum two) :param int levels: the number of levels in the density (number bigger then 1, where 35 is smooth and 1 is the same as plot_confidence). You can go higher then 50 if the result is not smooth enough for you. :param int samples: the number of samples to draw from the GP and plot into the plot. This will allways be samples from the latent function. :param float lower: the lower percentile to plot :param float upper: the upper percentile to plot :param bool plot_data: plot the data into the plot? :param bool plot_inducing: plot inducing inputs? :param bool plot_density: plot density instead of the confidence interval? :param dict predict_kw: the keyword arguments for the prediction. If you want to plot a specific kernel give dict(kern=<specific kernel>) in here :param dict error_kwargs: kwargs for the error plot for the plotting library you are using :param kwargs plot_kwargs: kwargs for the data plot for the plotting library you are using
def get(self, key, default): with self._lock: try: return self._dict[key].copy() except KeyError: return default
If the key is set, return a copy of the list stored at key. Otherwise return default.
def _parent_tile(tiles): parent = None for t in tiles: if parent is None: parent = t else: parent = common_parent(parent, t) return parent
Find the common parent tile for a sequence of tiles.
def get_outcome_group(self, group): from canvasapi.outcome import OutcomeGroup outcome_group_id = obj_or_id(group, "group", (OutcomeGroup,)) response = self.__requester.request( 'GET', 'global/outcome_groups/{}'.format(outcome_group_id) ) return OutcomeGroup(self.__requester, response.json())
Returns the details of the Outcome Group with the given id. :calls: `GET /api/v1/global/outcome_groups/:id \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_ :param group: The outcome group object or ID to return. :type group: :class:`canvasapi.outcome.OutcomeGroup` or int :returns: An outcome group object. :rtype: :class:`canvasapi.outcome.OutcomeGroup`
def compile_msg_payload(self, invite): self.l.info("Compiling the outbound message payload") update_invite = False if "to_addr" in invite.invite: to_addr = invite.invite["to_addr"] else: update_invite = True to_addr = get_identity_address(invite.identity) if "content" in invite.invite: content = invite.invite["content"] else: update_invite = True content = settings.INVITE_TEXT if "metadata" in invite.invite: metadata = invite.invite["metadata"] else: update_invite = True metadata = {} msg_payload = { "to_addr": to_addr, "content": content, "metadata": metadata } if update_invite is True: self.l.info("Updating the invite.invite field") invite.invite = msg_payload invite.save() self.l.info("Compiled the outbound message payload") return msg_payload
Determine recipient, message content, return it as a dict that can be Posted to the message sender
def star_expr_check(self, original, loc, tokens): return self.check_py("35", "star unpacking (add 'match' to front to produce universal code)", original, loc, tokens)
Check for Python 3.5 star unpacking.
def convex_conj(self): r if self.operator is None: tmp = IndicatorZero(space=self.domain, constant=-self.constant) if self.vector is None: return tmp else: return tmp.translated(self.vector) if self.vector is None: return QuadraticForm(operator=self.operator.inverse, constant=-self.constant) else: opinv = self.operator.inverse vector = -opinv.adjoint(self.vector) - opinv(self.vector) constant = self.vector.inner(opinv(self.vector)) - self.constant return QuadraticForm(operator=opinv, vector=vector, constant=constant)
r"""The convex conjugate functional of the quadratic form. Notes ----- The convex conjugate of the quadratic form :math:`<x, Ax> + <b, x> + c` is given by .. math:: (<x, Ax> + <b, x> + c)^* (x) = <(x - b), A^-1 (x - b)> - c = <x , A^-1 x> - <x, A^-* b> - <x, A^-1 b> + <b, A^-1 b> - c. If the quadratic part of the functional is zero it is instead given by a translated indicator function on zero, i.e., if .. math:: f(x) = <b, x> + c, then .. math:: f^*(x^*) = \begin{cases} -c & \text{if } x^* = b \\ \infty & \text{else.} \end{cases} See Also -------- IndicatorZero
def _get_app_path(url): app_path = urlparse(url).path.rstrip("/") if not app_path.startswith("/"): app_path = "/" + app_path return app_path
Extract the app path from a Bokeh server URL Args: url (str) : Returns: str
def sysinit(systype, conf, project): click.secho(get_config( systype, conf=ConfModule(conf).configurations[0], conf_path=conf, project_name=project, ))
Outputs configuration for system initialization subsystem.
def _RunInTransaction(self, function, readonly=False): start_query = "START TRANSACTION;" if readonly: start_query = "START TRANSACTION WITH CONSISTENT SNAPSHOT, READ ONLY;" for retry_count in range(_MAX_RETRY_COUNT): with contextlib.closing(self.pool.get()) as connection: try: with contextlib.closing(connection.cursor()) as cursor: cursor.execute(start_query) ret = function(connection) if not readonly: connection.commit() return ret except MySQLdb.OperationalError as e: connection.rollback() if retry_count >= _MAX_RETRY_COUNT or not _IsRetryable(e): raise time.sleep(random.uniform(1.0, 2.0) * math.pow(1.5, retry_count)) raise Exception("Looped ended early - last exception swallowed.")
Runs function within a transaction. Allocates a connection, begins a transaction on it and passes the connection to function. If function finishes without raising, the transaction is committed. If function raises, the transaction will be rolled back, if a retryable database error is raised, the operation may be repeated. Args: function: A function to be run, must accept a single MySQLdb.connection parameter. readonly: Indicates that only a readonly (snapshot) transaction is required. Returns: The value returned by the last call to function. Raises: Any exception raised by function.
def set_dry_run(xml_root, value=True): value_str = str(value).lower() assert value_str in ("true", "false") if xml_root.tag == "testsuites": _set_property(xml_root, "polarion-dry-run", value_str) elif xml_root.tag in ("testcases", "requirements"): _set_property(xml_root, "dry-run", value_str) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
Sets dry-run so records are not updated, only log file is produced.
def crit_met(self): if True in (self.pulls < 3): return False else: return self.criteria[self.criterion](self.stop_value)
Determine if stopping criterion has been met. Returns ------- bool
def _bfs_from_cluster_tree(tree, bfs_root): result = [] to_process = [bfs_root] while to_process: result.extend(to_process) to_process = tree['child'][np.in1d(tree['parent'], to_process)].tolist() return result
Perform a breadth first search on a tree in condensed tree format
def read_inquiry_mode(sock): old_filter = sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14) flt = bluez.hci_filter_new() opcode = bluez.cmd_opcode_pack(bluez.OGF_HOST_CTL, bluez.OCF_READ_INQUIRY_MODE) bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT) bluez.hci_filter_set_event(flt, bluez.EVT_CMD_COMPLETE); bluez.hci_filter_set_opcode(flt, opcode) sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt ) bluez.hci_send_cmd(sock, bluez.OGF_HOST_CTL, bluez.OCF_READ_INQUIRY_MODE ) pkt = sock.recv(255) status,mode = struct.unpack("xxxxxxBB", pkt) if status != 0: mode = -1 sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter ) return mode
returns the current mode, or -1 on failure
def wait_while_exceptions( predicate, timeout_seconds=120, sleep_seconds=1, noisy=False): start_time = time_module.time() timeout = Deadline.create_deadline(timeout_seconds) while True: try: result = predicate() return result except Exception as e: if noisy: logger.exception("Ignoring error during wait.") if timeout.is_expired(): funname = __stringify_predicate(predicate) raise TimeoutExpired(timeout_seconds, funname) if noisy: header = '{}[{}/{}]'.format( shakedown.cli.helpers.fchr('>>'), pretty_duration(time_module.time() - start_time), pretty_duration(timeout_seconds) ) print('{} spinning...'.format(header)) time_module.sleep(sleep_seconds)
waits for a predicate, ignoring exceptions, returning the result. Predicate is a function. Exceptions will trigger the sleep and retry; any non-exception result will be returned. A timeout will throw a TimeoutExpired Exception.
def _validate_type_scalar(self, value): if isinstance( value, _int_types + (_str_type, float, date, datetime, bool) ): return True
Is not a list or a dict
def _new_temp_file(self, hint='warcrecsess'): return wpull.body.new_temp_file( directory=self._temp_dir, hint=hint )
Return new temp file.
def cmd(self, cmd_name): return "{0}.tube.{1}:{2}".format(self.queue.lua_queue_name, self.name, cmd_name)
Returns tarantool queue command name for current tube.
def edges_to_path(edges): if not edges: return None G = edges_to_graph(edges) path = nx.topological_sort(G) return path
Connect edges and return a path.
def zoomset_cb(self, setting, value, chviewer, info): return self.zoomset(chviewer, info.chinfo)
This callback is called when a channel window is zoomed.
def go_to(self, x, y, z, yaw, duration_s, relative=False, group_mask=ALL_GROUPS): self._send_packet(struct.pack('<BBBfffff', self.COMMAND_GO_TO, group_mask, relative, x, y, z, yaw, duration_s))
Go to an absolute or relative position :param x: x (m) :param y: y (m) :param z: z (m) :param yaw: yaw (radians) :param duration_s: time it should take to reach the position (s) :param relative: True if x, y, z is relative to the current position :param group_mask: mask for which CFs this should apply to
def create_db_instance(self, params): if not self.connect_to_aws_rds(): return False try: database = self.rdsc.create_dbinstance( id=params['id'], allocated_storage=params['size'], instance_class='db.t1.micro', engine='MySQL', master_username=params['username'], master_password=params['password'], db_name=params['dbname'], multi_az=False ) except: return False else: return True
Create db instance
def inject_closure_values(func, **kwargs): wrapped_by = None if isinstance(func, property): fget, fset, fdel = func.fget, func.fset, func.fdel if fget: fget = fix_func(fget, **kwargs) if fset: fset = fix_func(fset, **kwargs) if fdel: fdel = fix_func(fdel, **kwargs) wrapped_by = type(func) return wrapped_by(fget, fset, fdel) elif isinstance(func, (staticmethod, classmethod)): func = func.__func__ wrapped_by = type(func) newfunc = _inject_closure_values(func, **kwargs) if wrapped_by: newfunc = wrapped_by(newfunc) return newfunc
Returns a new function identical to the previous one except that it acts as though global variables named in `kwargs` have been closed over with the values specified in the `kwargs` dictionary. Works on properties, class/static methods and functions. This can be useful for mocking and other nefarious activities.
def iterateBlocksBackFrom(block): count = 0 while block.isValid() and count < MAX_SEARCH_OFFSET_LINES: yield block block = block.previous() count += 1
Generator, which iterates QTextBlocks from block until the Start of a document But, yields not more than MAX_SEARCH_OFFSET_LINES
def _cron_profile(): from os import path cronpath = path.expanduser("~/.cron_profile") if not path.isfile(cronpath): from os import getenv xmlpath = getenv("PYCI_XML") contents = ['source /usr/local/bin/virtualenvwrapper.sh', 'export PYCI_XML="{}"'.format(xmlpath)] with open(cronpath, 'w') as f: f.write('\n'.join(contents))
Sets up the .cron_profile file if it does not already exist.
def save(self, filename, strip_prefix=''): arg_dict = {} for param in self.values(): weight = param._reduce() if not param.name.startswith(strip_prefix): raise ValueError( "Prefix '%s' is to be striped before saving, but Parameter's " "name '%s' does not start with '%s'. " "this may be due to your Block shares parameters from other " "Blocks or you forgot to use 'with name_scope()' when creating " "child blocks. For more info on naming, please see " "http://mxnet.incubator.apache.org/tutorials/basic/naming.html"%( strip_prefix, param.name, strip_prefix)) arg_dict[param.name[len(strip_prefix):]] = weight ndarray.save(filename, arg_dict)
Save parameters to file. Parameters ---------- filename : str Path to parameter file. strip_prefix : str, default '' Strip prefix from parameter names before saving.
def models_of_config(config): resources = resources_of_config(config) models = [] for resource in resources: if not hasattr(resource, '__table__') and hasattr(resource, 'model'): models.append(resource.model) else: models.append(resource) return models
Return list of models from all resources in config.
def _deduplicate_items(cls, items): "Deduplicates assigned paths by incrementing numbering" counter = Counter([path[:i] for path, _ in items for i in range(1, len(path)+1)]) if sum(counter.values()) == len(counter): return items new_items = [] counts = defaultdict(lambda: 0) for i, (path, item) in enumerate(items): if counter[path] > 1: path = path + (util.int_to_roman(counts[path]+1),) elif counts[path]: path = path[:-1] + (util.int_to_roman(counts[path]+1),) new_items.append((path, item)) counts[path] += 1 return new_items
Deduplicates assigned paths by incrementing numbering
def get_day(self): year = super(BuildableDayArchiveView, self).get_year() month = super(BuildableDayArchiveView, self).get_month() day = super(BuildableDayArchiveView, self).get_day() fmt = self.get_day_format() dt = date(int(year), int(month), int(day)) return dt.strftime(fmt)
Return the day from the database in the format expected by the URL.
def path_fraction_id_offset(points, fraction, relative_offset=False): if not (0. <= fraction <= 1.0): raise ValueError("Invalid fraction: %.3f" % fraction) pts = np.array(points)[:, COLS.XYZ] lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1) cum_lengths = np.cumsum(lengths) offset = cum_lengths[-1] * fraction seg_id = np.argmin(cum_lengths < offset) if seg_id > 0: offset -= cum_lengths[seg_id - 1] if relative_offset: offset /= lengths[seg_id] return seg_id, offset
Find the segment which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0.0 <= fraction <= 1.0) relative_offset: return absolute or relative segment distance Returns: (segment ID, segment offset) pair.
def ginput(self, data_set=0, **kwargs): import warnings import matplotlib.cbook warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation) _s.tweaks.raise_figure_window(data_set+self['first_figure']) return _p.ginput(**kwargs)
Pops up the figure for the specified data set. Returns value from pylab.ginput(). kwargs are sent to pylab.ginput()
def read_count(self, space, start, end): read_counts = 0 for read in self._bam.fetch(space, start, end): read_counts += 1 return self._normalize(read_counts, self._total)
Retrieve the normalized read count in the provided region.