code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def pretty_echo(cls, message): if cls.intty(): if message: from pprint import pprint pprint(message)
Display message using pretty print formatting.
def max_id_length(self): if config().identifiers() == "text": return max_id_length(len(self._todos)) else: try: return math.ceil(math.log(len(self._todos), 10)) except ValueError: return 0
Returns the maximum length of a todo ID, used for formatting purposes.
def new(cls, settings, *args, **kwargs): logger.debug('Initializing new "%s" Instance object' % settings['CLOUD']) cloud = settings['CLOUD'] if cloud == 'bare': self = BareInstance(settings=settings, *args, **kwargs) elif cloud == 'aws': self = AWSInstance(settings=settings, *args, **kwargs) elif cloud == 'gcp': self = GCPInstance(settings=settings, *args, **kwargs) else: raise DSBException('Cloud "%s" not supported' % cloud) return self
Create a new Cloud instance based on the Settings
def connected(func): @wraps(func) def wrapper(*args, **kwargs): self = args[0] if not self.connected: self.show_output("Not connected.") else: try: return func(*args, **kwargs) except APIError: self.show_output("ZooKeeper internal error.") except AuthFailedError: self.show_output("Authentication failed.") except NoAuthError: self.show_output("Not authenticated.") except BadVersionError: self.show_output("Bad version.") except ConnectionLoss: self.show_output("Connection loss.") except NotReadOnlyCallError: self.show_output("Not a read-only operation.") except BadArgumentsError: self.show_output("Bad arguments.") except SessionExpiredError: self.show_output("Session expired.") except UnimplementedError as ex: self.show_output("Not implemented by the server: %s." % str(ex)) except ZookeeperError as ex: self.show_output("Unknown ZooKeeper error: %s" % str(ex)) return wrapper
check connected, fails otherwise
def could_collide_hor(self, vpos, adsb_pkt): margin = self.asterix_settings.filter_dist_xy timeout = self.asterix_settings.filter_time alat = adsb_pkt.lat * 1.0e-7 alon = adsb_pkt.lon * 1.0e-7 avel = adsb_pkt.hor_velocity * 0.01 vvel = sqrt(vpos.vx**2 + vpos.vy**2) dist = mp_util.gps_distance(vpos.lat, vpos.lon, alat, alon) dist -= avel * timeout dist -= vvel * timeout if dist <= margin: return True return False
return true if vehicle could come within filter_dist_xy meters of adsb vehicle in timeout seconds
def create_dhcprelay_ipv6(self): return DHCPRelayIPv6( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of DHCPRelayIPv6 services facade.
def add_image_history(self, data): self._ef['0th'][piexif.ImageIFD.ImageHistory] = json.dumps(data)
Add arbitrary string to ImageHistory tag.
def clean_up_datetime(obj_map): clean_map = {} for key, value in obj_map.items(): if isinstance(value, datetime.datetime): clean_map[key] = { 'year': value.year, 'month': value.month, 'day': value.day, 'hour': value.hour, 'minute': value.minute, 'second': value.second, 'microsecond': value.microsecond, 'tzinfo': value.tzinfo } elif isinstance(value, dict): clean_map[key] = clean_up_datetime(value) elif isinstance(value, list): if key not in clean_map: clean_map[key] = [] if len(value) > 0: for index, list_value in enumerate(value): if isinstance(list_value, dict): clean_map[key].append(clean_up_datetime(list_value)) else: clean_map[key].append(list_value) else: clean_map[key] = value else: clean_map[key] = value return clean_map
convert datetime objects to dictionaries for storage
def exclude_samples(in_file, out_file, to_exclude, ref_file, config, filters=None): include, exclude = _get_exclude_samples(in_file, to_exclude) if len(exclude) == 0: out_file = in_file elif not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" include_str = ",".join(include) filter_str = "-f %s" % filters if filters is not None else "" cmd = "{bcftools} view -O {output_type} -s {include_str} {filter_str} {in_file} > {tx_out_file}" do.run(cmd.format(**locals()), "Exclude samples: {}".format(to_exclude)) return out_file
Exclude specific samples from an input VCF file.
def extract_xyz_matrix_from_chain(self, chain_id, atoms_of_interest = []): chains = [l[21] for l in self.structure_lines if len(l) > 21] chain_lines = [l for l in self.structure_lines if len(l) > 21 and l[21] == chain_id] return PDB.extract_xyz_matrix_from_pdb(chain_lines, atoms_of_interest = atoms_of_interest, include_all_columns = True)
Create a pandas coordinates dataframe from the lines in the specified chain.
def init_state(self): self.in_warc_response = False self.in_http_response = False self.in_payload = False
Sets the initial state of the state machine.
def _from_any(cls, spec): if isinstance(spec, str): spec = cls.from_file(spec) elif isinstance(spec, dict): spec = cls.from_dict(spec) elif not isinstance(spec, cls): raise context.TypeError("spec must be either an ApplicationSpec, " "path, or dict, got " "%s" % type(spec).__name__) return spec
Generic creation method for all types accepted as ``spec``
def _massage_data(self): self._xdata_massaged, self._ydata_massaged, self._eydata_massaged = self.get_processed_data() return self
Processes the data and stores it.
def _get_json_response(self, resp): if resp is not None and resp.text is not None: try: text = resp.text.strip('\n') if len(text) > 0: return json.loads(text) except ValueError as e: if self.debug: print("Could not decode JSON response: \"%s\"" % resp.text) raise e
Parse a JSON response
def _get_id2upper(id2upper, item_id, item_obj): if item_id in id2upper: return id2upper[item_id] upper_ids = set() for upper_obj in item_obj.get_goterms_upper(): upper_id = upper_obj.item_id upper_ids.add(upper_id) upper_ids |= _get_id2upper(id2upper, upper_id, upper_obj) id2upper[item_id] = upper_ids return upper_ids
Add the parent item IDs for one item object and their upper.
def scroll_to_item(self, index): spacing_between_items = self.scene.verticalSpacing() height_view = self.scrollarea.viewport().height() height_item = self.scene.itemAt(index).sizeHint().height() height_view_excluding_item = max(0, height_view - height_item) height_of_top_items = spacing_between_items for i in range(index): item = self.scene.itemAt(i) height_of_top_items += item.sizeHint().height() height_of_top_items += spacing_between_items pos_scroll = height_of_top_items - height_view_excluding_item // 2 vsb = self.scrollarea.verticalScrollBar() vsb.setValue(pos_scroll)
Scroll to the selected item of ThumbnailScrollBar.
def _send_to_destination( self, destination, header, payload, transport_kwargs, add_path_step=True ): if header: header = header.copy() header["workflows-recipe"] = True else: header = {"workflows-recipe": True} dest_kwargs = transport_kwargs.copy() if ( "transport-delay" in self.recipe[destination] and "delay" not in transport_kwargs ): dest_kwargs["delay"] = self.recipe[destination]["transport-delay"] if self.recipe[destination].get("queue"): self.transport.send( self.recipe[destination]["queue"], self._generate_full_recipe_message(destination, payload, add_path_step), headers=header, **dest_kwargs ) if self.recipe[destination].get("topic"): self.transport.broadcast( self.recipe[destination]["topic"], self._generate_full_recipe_message(destination, payload, add_path_step), headers=header, **dest_kwargs )
Helper function to send a message to a specific recipe destination.
def hacked_pep257(to_lint): def ignore(*args, **kwargs): pass pep257.check_blank_before_after_class = ignore pep257.check_blank_after_last_paragraph = ignore pep257.check_blank_after_summary = ignore pep257.check_ends_with_period = ignore pep257.check_one_liners = ignore pep257.check_imperative_mood = ignore original_check_return_type = pep257.check_return_type def better_check_return_type(def_docstring, context, is_script): def_name = context.split()[1] if def_name.startswith('_') and not def_name.endswith('__'): original_check_return_type(def_docstring, context, is_script) pep257.check_return_type = better_check_return_type errors = [] for filename in to_lint: with open(filename) as f: source = f.read() if source: errors.extend(pep257.check_source(source, filename)) return '\n'.join([str(error) for error in sorted(errors)])
Check for the presence of docstrings, but ignore some of the options
def trionyx(request): return { 'TX_APP_NAME': settings.TX_APP_NAME, 'TX_LOGO_NAME_START': settings.TX_LOGO_NAME_START, 'TX_LOGO_NAME_END': settings.TX_LOGO_NAME_END, 'TX_LOGO_NAME_SMALL_START': settings.TX_LOGO_NAME_SMALL_START, 'TX_LOGO_NAME_SMALL_END': settings.TX_LOGO_NAME_SMALL_END, 'trionyx_menu_items': app_menu.get_menu_items(), }
Add trionyx context data
def invalidate_model_cache(self): logger.info('Invalidating cache for table {0}'.format(self.model._meta.db_table)) if django.VERSION >= (1, 8): related_tables = set( [f.related_model._meta.db_table for f in self.model._meta.get_fields() if ((f.one_to_many or f.one_to_one) and f.auto_created) or f.many_to_one or (f.many_to_many and not f.auto_created)]) else: related_tables = set([rel.model._meta.db_table for rel in self.model._meta.get_all_related_objects()]) related_tables |= set([field.rel.to._meta.db_table for field in self.model._meta.fields if issubclass(type(field), RelatedField)]) logger.debug('Related tables of model {0} are {1}'.format(self.model, related_tables)) update_model_cache(self.model._meta.db_table) for related_table in related_tables: update_model_cache(related_table)
Invalidate model cache by generating new key for the model.
def increment(self): self.value += self.increment_size return self.value - self.increment_size
Increment our value, return the previous value.
def bytes(self, *args): if len(args) == 1 and isinstance(args[0], BTree.Cursor): cur = args[0] else: cur = self.btree.find('eq', self.makekey(*args)) if cur: return cur.getval()
return a raw value for the given arguments
def call_operation(self, operation, **kwargs): data = {'operation': operation} data.update(kwargs) return self.invoke(data)
A generic method to call any operation supported by the Lambda handler
def add_empty_etd_ms_fields(etd_ms_dict): for element in ETD_MS_ORDER: if element not in etd_ms_dict: try: py_object = ETD_MS_CONVERSION_DISPATCH[element]( content='', qualifier='', ) except: try: py_object = ETD_MS_CONVERSION_DISPATCH[element](content='') except: try: py_object = ETD_MS_CONVERSION_DISPATCH[element]() except: raise PyuntlException( 'Could not add empty element field.' ) else: etd_ms_dict[element] = [{'content': {}}] else: if not py_object.contained_children: etd_ms_dict[element] = [{'content': ''}] else: etd_ms_dict[element] = [{'content': {}}] else: if py_object: if not py_object.contained_children: etd_ms_dict[element] = [{'content': '', 'qualifier': ''}] else: etd_ms_dict[element] = [{'content': {}, 'qualifier': ''}] if py_object: for child in py_object.contained_children: etd_ms_dict[element][0].setdefault('content', {}) etd_ms_dict[element][0]['content'][child] = '' return etd_ms_dict
Add empty values for ETD_MS fields that don't have values.
def insert(self, thread): thread_id = thread['id'] title = thread['title'] self.db.threads.new(thread_id, title) comments = list(map(self._build_comment, thread['comments'])) comments.sort(key=lambda comment: comment['id']) self.count += len(comments) for comment in comments: self.db.comments.add(thread_id, comment)
Process a thread and insert its comments in the DB.
def _get_relationship_value(self, obj, column): if column['__col__'].uselist: value = self._get_to_many_relationship_value(obj, column) else: value = self._get_to_one_relationship_value(obj, column) return value
Compute datas produced for a given relationship
def long_press(self, locator, duration=1000): driver = self._current_application() element = self._element_find(locator, True, True) action = TouchAction(driver) action.press(element).wait(duration).release().perform()
Long press the element with optional duration
def worker(job): ret = False try: if job.full_url is not None: req = requests.get(job.full_url, stream=True) ret = save_and_check(req, job.local_file, job.expected_checksum) if not ret: return ret ret = create_symlink(job.local_file, job.symlink_path) except KeyboardInterrupt: logging.debug("Ignoring keyboard interrupt.") return ret
Run a single download job.
def _get_loader_for_url(self, url): parts = url.split('://', 1) if len(parts) < 2: type_ = 'file' else: type_ = parts[0] if '+' in type_: profile_name, scheme = type_.split('+', 1) if len(parts) == 2: url = scheme + '://' + parts[1] else: profile_name = '' scheme = type_ loader = self.cached.get(type_) if loader: return loader, url loader_cls = self._get_loader_class_for_type(scheme) if not loader_cls: raise IOError('No Loader for type: ' + scheme) profile = self.kwargs if self.profile_loader: profile = self.profile_loader(profile_name, scheme) loader = loader_cls(**profile) self.cached[type_] = loader return loader, url
Determine loading method based on uri
def cummin(x): for i in range(1, len(x)): if x[i-1] < x[i]: x[i] = x[i-1] return x
A python implementation of the cummin function in R
def savez_two_column(matrix, row_offset, file_name, append=False): logging.info("Saving obj to file in two column .npz format %s.", file_name) tc = [] for u, items in enumerate(matrix): user = row_offset + u for item in items: tc.append([user, item]) np.savez_compressed(file_name, np.asarray(tc)) logging.info("Done saving to file %s.", file_name)
Savez_compressed obj to file_name.
def cos_r(self, N=None): if not hasattr(self, 'F') or self.F.shape[1] < self.rank: self.fs_r(N=self.rank) self.dr = norm(self.F, axis=1)**2 return apply_along_axis(lambda _: _/self.dr, 0, self.F[:, :N]**2)
Return the squared cosines for each row.
def signal_catcher(callback): def _catch_exit_signal(sig_num, _frame): print('Received signal {:d} invoking callback...'.format(sig_num)) callback() signal.signal(signal.SIGINT, _catch_exit_signal) signal.signal(signal.SIGQUIT, _catch_exit_signal) signal.signal(signal.SIGTERM, _catch_exit_signal) yield
Catch signals and invoke the callback method
def on_open(self, ws): def keep_alive(interval): while True: time.sleep(interval) self.ping() start_new_thread(keep_alive, (self.keep_alive_interval, ))
Websocket on_open event handler
def generate_password(mode, length): r = random.SystemRandom() length = length or RANDOM_PASSWORD_DEFAULT_LENGTH password = "".join(r.choice(RANDOM_PASSWORD_ALPHABET) for _ in range(length)) if mode == Mode.ECHO: click.echo(style_password(password)) elif mode == Mode.COPY: try: import pyperclip pyperclip.copy(password) result = style_success("*** PASSWORD COPIED TO CLIPBOARD ***") except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') click.echo(result) elif mode == Mode.RAW: click.echo(password)
generate a random password
def remove_argument(self, name): for index, arg in enumerate(self.args[:]): if name == arg.name: del self.args[index] break return self
Remove the argument matching the given name.
def run(self): self._setup_kafka() self._load_plugins() self._setup_stats() self._main_loop()
Set up and run
def dict_pick(dictionary, allowed_keys): return {key: value for key, value in viewitems(dictionary) if key in allowed_keys}
Return a dictionary only with keys found in `allowed_keys`
def UTF8ToUTF16BE(instr, setbom=True): "Converts UTF-8 strings to UTF16-BE." outstr = "".encode() if (setbom): outstr += "\xFE\xFF".encode("latin1") if not isinstance(instr, unicode): instr = instr.decode('UTF-8') outstr += instr.encode('UTF-16BE') if PY3K: outstr = outstr.decode("latin1") return outstr
Converts UTF-8 strings to UTF16-BE.
def user_to_request(handler): @wraps(handler) async def decorator(*args): request = _get_request(args) request[cfg.REQUEST_USER_KEY] = await get_cur_user(request) return await handler(*args) return decorator
Add user to request if user logged in
def add(self, resource, replace=False): uri = resource.uri for r in self: if (uri == r.uri): if (replace): r = resource return else: raise ResourceListDupeError( "Attempt to add resource already in resource_list") self.append(resource)
Add a single resource, check for dupes.
def add_output_arg(self, out): self.add_arg(out._dax_repr()) self._add_output(out)
Add an output as an argument
def string_to_datetime(self, obj): if isinstance(obj, six.string_types) and len(obj) == 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) > 19: try: return datetime.strptime(obj, "%Y-%m-%dT%H:%M:%S.%f") except ValueError: pass if isinstance(obj, six.string_types) and len(obj) == 10: try: return datetime.strptime(obj, "%Y-%m-%d") except ValueError: pass return obj
Decode a datetime string to a datetime object
def _LoadAuditEvents(handlers, get_report_args, actions=None, token=None, transformers=None): if transformers is None: transformers = {} if data_store.RelationalDBEnabled(): entries = data_store.REL_DB.ReadAPIAuditEntries( min_timestamp=get_report_args.start_time, max_timestamp=get_report_args.start_time + get_report_args.duration, router_method_names=list(handlers.keys())) rows = [_EntryToEvent(entry, handlers, transformers) for entry in entries] else: entries = report_utils.GetAuditLogEntries( offset=get_report_args.duration, now=get_report_args.start_time + get_report_args.duration, token=token) if actions is None: actions = set(handlers.values()) rows = [entry for entry in entries if entry.action in actions] rows.sort(key=lambda row: row.timestamp, reverse=True) return rows
Returns AuditEvents for given handlers, actions, and timerange.
def main(output): client = SubscriptionClient(Session().get_credentials()) subs = [sub.serialize(True) for sub in client.subscriptions.list()] results = [] for sub in subs: sub_info = { 'subscription_id': sub['subscriptionId'], 'name': sub['displayName'] } results.append(sub_info) print( yaml.safe_dump( {'subscriptions': results}, default_flow_style=False), file=output)
Generate a c7n-org subscriptions config file
def async_refresh(self, *args, **kwargs): try: enqueue_task( dict( klass_str=self.class_path, obj_args=self.get_init_args(), obj_kwargs=self.get_init_kwargs(), call_args=args, call_kwargs=kwargs ), task_options=self.task_options ) except Exception: logger.error("Unable to trigger task asynchronously - failing " "over to synchronous refresh", exc_info=True) try: return self.refresh(*args, **kwargs) except Exception as e: logger.error("Unable to refresh data synchronously: %s", e, exc_info=True) else: logger.debug("Failover synchronous refresh completed successfully")
Trigger an asynchronous job to refresh the cache
def _handle_config(self, data): self.room.config.update(data) self.conn.enqueue_data("config", data)
Handle initial config push and config changes
def security_code_date(self): return sa.Column( sa.TIMESTAMP(timezone=False), default=datetime(2000, 1, 1), server_default="2000-01-01 01:01", )
Date of user's security code update
def find_env_paths_in_basedirs(base_dirs): env_path = [] for base_dir in base_dirs: env_path.extend(glob.glob(os.path.join( os.path.expanduser(base_dir), '*', ''))) return env_path
Returns all potential envs in a basedir
def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None, likelihood=None, **predict_kwargs): return self.posterior_samples_f(X, size, full_cov=full_cov, **predict_kwargs)
Samples the posterior GP at the points X, equivalent to posterior_samples_f due to the absence of a likelihood.
def ttl(self, key, **opts): key, store = self._expand_opts(key, opts) if hasattr(store, 'ttl'): return store.ttl(key) data = store.get(key) if data is None: return None expiry = data[EXPIRY_INDEX] if expiry is not None: return max(0, expiry - time()) or None
Get the time-to-live of a given key; None if not set.
def alert_policy_condition_path(cls, project, alert_policy, condition): return google.api_core.path_template.expand( "projects/{project}/alertPolicies/{alert_policy}/conditions/{condition}", project=project, alert_policy=alert_policy, condition=condition, )
Return a fully-qualified alert_policy_condition string.
def _recv_nack(self, method_frame): if self._nack_listener: delivery_tag = method_frame.args.read_longlong() multiple, requeue = method_frame.args.read_bits(2) if multiple: while self._last_ack_id < delivery_tag: self._last_ack_id += 1 self._nack_listener(self._last_ack_id, requeue) else: self._last_ack_id = delivery_tag self._nack_listener(self._last_ack_id, requeue)
Receive a nack from the broker.
async def iterStormPodes(self, text, opts=None, user=None): if user is None: user = self.user dorepr = False dopath = False self.core._logStormQuery(text, user) if opts is not None: dorepr = opts.get('repr', False) dopath = opts.get('path', False) async for node, path in self.storm(text, opts=opts, user=user): pode = node.pack(dorepr=dorepr) pode[1]['path'] = path.pack(path=dopath) yield pode
Yield packed node tuples for the given storm query text.
def get(self, cid1, cid2, annotator_id): t = (cid1, cid2, annotator_id) for k, v in self.kvl.scan(self.TABLE, (t, t)): return self._label_from_kvlayer(k, v)
Retrieve a relation label from the store.
def map_legacy_frequencies(form, field): if field.data in LEGACY_FREQUENCIES: field.data = LEGACY_FREQUENCIES[field.data]
Map legacy frequencies to new ones
def build_payload(self, payload): for segment in self.segments: segment.pack(payload, commit=self.autocommit)
Build payload of message.
def wrap_all(self, rows: Iterable[Union[Mapping[str, Any], Sequence[Any]]]): return (self.wrap(r) for r in rows)
Return row tuple for each row in rows.
def _fix_json_agents(ag_obj): if isinstance(ag_obj, str): logger.info("Fixing string agent: %s." % ag_obj) ret = {'name': ag_obj, 'db_refs': {'TEXT': ag_obj}} elif isinstance(ag_obj, list): ret = [_fix_json_agents(ag) for ag in ag_obj] elif isinstance(ag_obj, dict) and 'TEXT' in ag_obj.keys(): ret = deepcopy(ag_obj) text = ret.pop('TEXT') ret['db_refs']['TEXT'] = text else: ret = ag_obj return ret
Fix the json representation of an agent.
def layer_norm_vars(filters): scale = tf.get_variable( "layer_norm_scale", [filters], initializer=tf.ones_initializer()) bias = tf.get_variable( "layer_norm_bias", [filters], initializer=tf.zeros_initializer()) return scale, bias
Create Variables for layer norm.
def create_cbz(directory): if not os.path.isdir(directory): print("ERROR: Directory", directory, "not found.") return base = os.path.basename(directory.rstrip(os.path.sep)) zipname = '%s.cbz' % base zipname = os.path.join(directory, zipname) d = os.path.join(directory, 'inorder') if os.path.isdir(d): directory = d if os.path.exists(zipname): os.remove(zipname) with zipfile.ZipFile(zipname, 'w') as myzip: for filename in sorted(os.listdir(d)): fullname = os.path.join(d, filename) if is_image(fullname): myzip.write(fullname) myzip.comment = get_cbz_comment() print("INFO: Created", zipname)
Creates or updates a CBZ from files in the given comic directory.
def normalize_std_array(vector): length = 1 n_samples = len(vector) mean = numpy.ndarray((length,), 'float64') std = numpy.ndarray((length,), 'float64') mean.fill(0) std.fill(0) for array in vector: x = array.astype('float64') mean += x std += (x ** 2) mean /= n_samples std /= n_samples std -= (mean ** 2) std = std ** 0.5 arrayset = numpy.ndarray(shape=(n_samples,mean.shape[0]), dtype=numpy.float64) for i in range (0, n_samples): arrayset[i,:] = (vector[i]-mean) / std return arrayset
Applies a unit mean and variance normalization to an arrayset
def placeholder_plugin_filter(self, request, queryset): if not request: return queryset if GLL.is_active: return queryset.filter(language=GLL.language_code) return queryset
This is only used on models which use placeholders from the django-cms
def close(self): if getattr(self, '_connection', None): logger.debug('Closing sqlite connection.') self._connection.close() self._connection = None
Closes connection to sqlite database.
def _get_voltage_magnitude_var(self, buses, generators): Vm = array([b.v_magnitude for b in buses]) for g in generators: Vm[g.bus._i] = g.v_magnitude Vmin = array([b.v_min for b in buses]) Vmax = array([b.v_max for b in buses]) return Variable("Vm", len(buses), Vm, Vmin, Vmax)
Returns the voltage magnitude variable set.
def container_running(self, id=None, name=None): running = False if id: running = self.inspect_container(id)['State']['Running'] elif name: running = self.inspect_container(name)['State']['Running'] return running
Checks if container is running
def upcaseTokens(s,l,t): return [ tt.upper() for tt in map(_ustr,t) ]
Helper parse action to convert tokens to upper case.
def app_score(self): precisions, pct_pred_pos, taus = self.precision_pct_pred_pos_curve(interval=False) app = 0 total = 0 for k in range(len(precisions)-1): cur_prec = precisions[k] cur_pp = pct_pred_pos[k] cur_tau = taus[k] next_prec = precisions[k+1] next_pp = pct_pred_pos[k+1] next_tau = taus[k+1] mid_prec = (cur_prec + next_prec) / 2.0 width_pp = np.abs(next_pp - cur_pp) app += mid_prec * width_pp total += width_pp return app
Computes the area under the app curve.
def apply_patch(document, patch): op = patch.op parent, idx = resolve_path(document, patch.path) if op == "add": return add(parent, idx, patch.value) elif op == "remove": return remove(parent, idx) elif op == "replace": return replace(parent, idx, patch.value, patch.src) elif op == "merge": return merge(parent, idx, patch.value) elif op == "copy": sparent, sidx = resolve_path(document, patch.src) return copy(sparent, sidx, parent, idx) elif op == "move": sparent, sidx = resolve_path(document, patch.src) return move(sparent, sidx, parent, idx) elif op == "test": return test(parent, idx, patch.value) elif op == "setremove": return set_remove(parent, idx, patch.value) elif op == "setadd": return set_add(parent, idx, patch.value) else: raise JSONPatchError("Invalid operator")
Apply a Patch object to a document.
def sync(self): output = get_profile(self.profile_id) output['response'].raise_if_error() for payment_profile in output['payment_profiles']: instance, created = CustomerPaymentProfile.objects.get_or_create( customer_profile=self, payment_profile_id=payment_profile['payment_profile_id'] ) instance.sync(payment_profile)
Overwrite local customer profile data with remote data
def lookup_ips (ips): hosts = set() for ip in ips: try: hosts.add(socket.gethostbyaddr(ip)[0]) except socket.error: hosts.add(ip) return hosts
Return set of host names that resolve to given ips.
def prettyXml(elem): roughString = ET.tostring(elem, "utf-8") reparsed = minidom.parseString(roughString) return reparsed.toprettyxml(indent=" ")
Return a pretty-printed XML string for the ElementTree Element.
def axes(self): return tuple(i for i in range(self.domain.ndim) if self.domain.shape[i] != self.range.shape[i])
Dimensions in which an actual resizing is performed.
def noop(*layers): def begin_update(X, drop=0.0): return X, lambda D, *a, **k: D return begin_update
Transform a sequences of layers into a null operation.
def tcp_reassembly(packet, *, count=NotImplemented): if 'TCP' in packet: ip = packet['IP'] if 'IP' in packet else packet['IPv6'] tcp = packet['TCP'] data = dict( bufid=( ipaddress.ip_address(ip.src), ipaddress.ip_address(ip.dst), tcp.sport, tcp.dport, ), num=count, ack=tcp.ack, dsn=tcp.seq, syn=bool(tcp.flags.S), fin=bool(tcp.flags.F), rst=bool(tcp.flags.R), payload=bytearray(bytes(tcp.payload)), ) raw_len = len(tcp.payload) data['first'] = tcp.seq data['last'] = tcp.seq + raw_len data['len'] = raw_len return True, data return False, None
Store data for TCP reassembly.
def save(self, bulk=False, id=None, parent=None, routing=None, force=False): meta = self._meta conn = meta['connection'] id = id or meta.get("id", None) parent = parent or meta.get('parent', None) routing = routing or meta.get('routing', None) qargs = None if routing: qargs={'routing': routing} version = meta.get('version', None) if force: version = None res = conn.index(self, meta.index, meta.type, id, parent=parent, bulk=bulk, version=version, force_insert=force, querystring_args=qargs) if not bulk: self._meta.id = res._id self._meta.version = res._version return res._id return id
Save the object and returns id
def bucket(things, key): ret = defaultdict(list) for thing in things: ret[key(thing)].append(thing) return ret
Return a map of key -> list of things.
def shuffle_data(X, y=None, sample_weight=None): if len(X) > 1: ind = np.arange(len(X), dtype=np.int) np.random.shuffle(ind) Xt = X[ind] yt = y swt = sample_weight if yt is not None: yt = yt[ind] if swt is not None: swt = swt[ind] return Xt, yt, swt else: return X, y, sample_weight
Shuffles indices X, y, and sample_weight together
def fetch_all_data(self, limit=50000): query = text( ) try: print("Querying the database, this could take a while") response = self.perform_query(query, limit=limit) master_df = pd.DataFrame(response.fetchall()) print("master_df created successfully.") self._master_df = master_df.copy() self.parse_all_data() except: raise ValueError("Error querying the database.")
Fetch data for all entities.
def show_tree(self): self.initialize_view() self.setItemsExpandable(True) self.setSortingEnabled(False) rootkey = self.find_root() if rootkey: self.populate_tree(self, self.find_callees(rootkey)) self.resizeColumnToContents(0) self.setSortingEnabled(True) self.sortItems(1, Qt.AscendingOrder) self.change_view(1)
Populate the tree with profiler data and display it.
def documentation(default=None, api_version=None, api=None, **kwargs): api_version = default or api_version if api: return api.http.documentation(base_url="", api_version=api_version)
returns documentation for the current api
def expand(sequence): expanse = [] for point in sequence: if 'sequence' in point: expanse.extend(expand(point['sequence'])) else: expanse.append(point) return sequence.__class__(expanse)
expands a tree of sequences into a single, flat sequence, recursively.
def _from_dict(cls, _dict): args = {} if 'documents' in _dict: args['documents'] = EnvironmentDocuments._from_dict( _dict.get('documents')) if 'disk_usage' in _dict: args['disk_usage'] = DiskUsage._from_dict(_dict.get('disk_usage')) if 'collections' in _dict: args['collections'] = CollectionUsage._from_dict( _dict.get('collections')) return cls(**args)
Initialize a IndexCapacity object from a json dictionary.
def _check_and_install_python(ret, python, default=False, user=None): ret = _python_installed(ret, python, user=user) if not ret['result']: if __salt__['pyenv.install_python'](python, runas=user): ret['result'] = True ret['changes'][python] = 'Installed' ret['comment'] = 'Successfully installed python' ret['default'] = default else: ret['result'] = False ret['comment'] = 'Could not install python.' return ret if default: __salt__['pyenv.default'](python, runas=user) return ret
Verify that python is installed, install if unavailable
def spots_at(self, x, y): for spot in self.spot.values(): if spot.collide_point(x, y): yield spot
Iterate over spots that collide the given point.
def ednde(self, x, params=None): params = self.params if params is None else params return np.squeeze(self.eval_ednde(x, params, self.scale, self.extra_params))
Evaluate E times differential flux.
def put (self, ch): if isinstance(ch, bytes): ch = self._decode(ch) self.put_abs (self.cur_r, self.cur_c, ch)
This puts a characters at the current cursor position.
def parsebool(el): txt = text(el) up = txt.upper() if up == "OUI": return True if up == "NON": return False return bool(parseint(el))
Parse a ``BeautifulSoup`` element as a bool
def editText(self, y, x, w, record=True, **kwargs): 'Wrap global editText with `preedit` and `postedit` hooks.' v = self.callHook('preedit') if record else None if not v or v[0] is None: with EnableCursor(): v = editText(self.scr, y, x, w, **kwargs) else: v = v[0] if kwargs.get('display', True): status('"%s"' % v) self.callHook('postedit', v) if record else None return v
Wrap global editText with `preedit` and `postedit` hooks.
def acquisition_function(self,x): f_acqu = self._compute_acq(x) cost_x, _ = self.cost_withGradients(x) return -(f_acqu*self.space.indicator_constraints(x))/cost_x
Takes an acquisition and weights it so the domain and cost are taken into account.
def all_mouse_sprites(self): def all_recursive(sprites): if not sprites: return for sprite in sprites: if sprite.visible: yield sprite for child in all_recursive(sprite.get_mouse_sprites()): yield child return all_recursive(self.get_mouse_sprites())
Returns flat list of the sprite tree for simplified iteration
def _load_file(self, log_file, message_name_filter_list): if isinstance(log_file, str): self._file_handle = open(log_file, "rb") else: self._file_handle = log_file self._read_file_header() self._last_timestamp = self._start_timestamp self._read_file_definitions() if self.has_data_appended and len(self._appended_offsets) > 0: if self._debug: print('This file has data appended') for offset in self._appended_offsets: self._read_file_data(message_name_filter_list, read_until=offset) self._file_handle.seek(offset) self._read_file_data(message_name_filter_list) self._file_handle.close() del self._file_handle
load and parse an ULog file into memory
def cache_return(func): _cache = [] def wrap(): if not _cache: _cache.append(func()) return _cache[0] return wrap
Cache the return value of a function without arguments
def __allocateBits(self): if self._bit_count < 0: raise Exception( "A margin cannot request negative number of bits" ) if self._bit_count == 0: return margins = self._qpart.getMargins() occupiedRanges = [] for margin in margins: bitRange = margin.getBitRange() if bitRange is not None: added = False for index in range( len( occupiedRanges ) ): r = occupiedRanges[ index ] if bitRange[ 1 ] < r[ 0 ]: occupiedRanges.insert(index, bitRange) added = True break if not added: occupiedRanges.append(bitRange) vacant = 0 for r in occupiedRanges: if r[ 0 ] - vacant >= self._bit_count: self._bitRange = (vacant, vacant + self._bit_count - 1) return vacant = r[ 1 ] + 1 self._bitRange = (vacant, vacant + self._bit_count - 1)
Allocates the bit range depending on the required bit count
def assert_page_source_contains(self, expected_value, failure_message='Expected page source to contain: "{}"'): assertion = lambda: expected_value in self.driver_wrapper.page_source() self.webdriver_assert(assertion, unicode(failure_message).format(expected_value))
Asserts that the page source contains the string passed in expected_value
def author_list(self): author_list = [self.submitter] + \ [author for author in self.authors.all().exclude(pk=self.submitter.pk)] return ",\n".join([author.get_full_name() for author in author_list])
The list of authors als text, for admin submission list overview.
def _disable_module(module): try: subprocess.check_call(['a2dismod', module]) except subprocess.CalledProcessError as e: log('Error occurred disabling module %s. ' 'Output is: %s' % (module, e.output), level=ERROR)
Disables the specified module in Apache.
def view_for(self, action='view'): app = current_app._get_current_object() view, attr = self.view_for_endpoints[app][action] return getattr(view(self), attr)
Return the classview viewhandler that handles the specified action
def checklink (form=None, env=os.environ): if form is None: form = {} try: checkform(form, env) except LCFormError as errmsg: log(env, errmsg) yield encode(format_error(errmsg)) return out = ThreadsafeIO() config = get_configuration(form, out) url = strformat.stripurl(formvalue(form, "url")) aggregate = director.get_aggregate(config) url_data = checker.get_url_from(url, 0, aggregate, extern=(0, 0)) aggregate.urlqueue.put(url_data) for html_str in start_check(aggregate, out): yield encode(html_str) out.close()
Validates the CGI form and checks the given links.
def parse_address(address: str) -> str: display_name, parsed_address = email.utils.parseaddr(address) return parsed_address or address
Parse an email address, falling back to the raw string given.