code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def check_player_collision(self): player_tiles = r.TileMapManager.active_map.grab_collisions(self.char.coords) enemy_tiles = r.TileMapManager.active_map.grab_collisions(self.coords) for ptile in player_tiles: for etile in enemy_tiles: if r.TileMapManager.active_map.pixels_to_tiles(ptile.coords) == r.TileMapManager.active_map.pixels_to_tiles(etile.coords): return True return False
Check to see if we are colliding with the player.
def _hijack_target(self): if self._target.is_class_or_module(): setattr(self._target.obj, self._method_name, self) elif self._attr.kind == 'property': proxy_property = ProxyProperty( double_name(self._method_name), self._original_method, ) setattr(self._target.obj.__class__, self._method_name, proxy_property) self._target.obj.__dict__[double_name(self._method_name)] = self else: self._target.obj.__dict__[self._method_name] = self if self._method_name in ['__call__', '__enter__', '__exit__']: self._target.hijack_attr(self._method_name)
Replaces the target method on the target object with the proxy method.
def scoped_session_decorator(func): @wraps(func) def wrapper(*args, **kwargs): with sessions_scope(session): logger.debug("Running worker %s in scoped DB session", func.__name__) return func(*args, **kwargs) return wrapper
Manage contexts and add debugging to db sessions.
def gps_message_arrived(self, m): gps_week = getattr(m, 'Week', None) gps_timems = getattr(m, 'TimeMS', None) if gps_week is None: gps_week = getattr(m, 'GWk', None) gps_timems = getattr(m, 'GMS', None) if gps_week is None: if getattr(m, 'GPSTime', None) is not None: return; t = self._gpsTimeToTime(gps_week, gps_timems) deltat = t - self.timebase if deltat <= 0: return for type in self.counts_since_gps: rate = self.counts_since_gps[type] / deltat if rate > self.msg_rate.get(type, 0): self.msg_rate[type] = rate self.msg_rate['IMU'] = 50.0 self.timebase = t self.counts_since_gps = {}
adjust time base from GPS message
def benchmark_command(cmd, progress): full_cmd = '/usr/bin/time --format="%U %M" {0}'.format(cmd) print '{0:6.2f}% Running {1}'.format(100.0 * progress, full_cmd) (_, err) = subprocess.Popen( ['/bin/sh', '-c', full_cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate('') values = err.strip().split(' ') if len(values) == 2: try: return (float(values[0]), float(values[1])) except: pass print err raise Exception('Error during benchmarking')
Benchmark one command execution
def _unpaginate(self, domain, initial_op, *, on_properties): request = initial_op(domain) while request is not None: result = self._retry_on_reset(request, request.execute) for on_property in on_properties: items = result.get(on_property) if items is not None: yield on_property, items request = domain.list_next(request, result)
Iterate thru the request pages until all items have been processed
def failed_extra_capabilities(self): failed = [] for capability, f_name in self.extra_capability_checks.items(): f = getattr(self, f_name) instance_capable = f() if not instance_capable: failed.append(capability) return failed
Check to see if instance passes its `extra_capability_checks`.
def interpolation_change_cb(self, setting, value): canvas_img = self.get_canvas_image() canvas_img.interpolation = value canvas_img.reset_optimize() self.redraw(whence=0)
Handle callback related to changes in interpolation.
def _sim_texture(r1, r2): return sum([min(a, b) for a, b in zip(r1["hist_t"], r2["hist_t"])])
calculate the sum of histogram intersection of texture
def check_nonstandard_section_name(self): std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata', '.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls', '.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata'] for i in range(200): std_sections.append('/'+str(i)) non_std_sections = [] for section in self.pefile_handle.sections: name = convert_to_ascii_null_term(section.Name).lower() if (name not in std_sections): non_std_sections.append(name) if non_std_sections: return{'description': 'Section(s) with a non-standard name, tamper indication', 'severity': 3, 'category': 'MALFORMED', 'attributes': non_std_sections} return None
Checking for an non-standard section name
def inputtemplate(self,template_id): for profile in self.profiles: for inputtemplate in profile.input: if inputtemplate.id == template_id: return inputtemplate raise Exception("No such input template: " + repr(template_id))
Return the inputtemplate with the specified ID. This is used to resolve a inputtemplate ID to an InputTemplate object instance
def create_graph(): with tf.gfile.FastGFile(os.path.join( FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) _ = tf.import_graph_def(graph_def, name='')
Creates a graph from saved GraphDef file and returns a saver.
def write(self, data): m = self._convert_structure(data) self._file.write(self._format_mol(*m)) self._file.write('M END\n') for k, v in data.meta.items(): self._file.write(f'> <{k}>\n{v}\n') self._file.write('$$$$\n')
write single molecule into file
def link(source_path): if not os.path.isfile(source_path): raise SourceNotFound(source_path) with open(source_path, 'r') as f: content = f.read() block_map = BlockMap() all_block = convert_lines_to_block( content.splitlines(), block_map, LinkStack(source_path), source_path) return all_block, block_map.get_variables()
Links the content found at source_path and represents a Block that represents the content.
def _ExportFileContent(self, aff4_object, result): if self.options.export_files_contents: try: result.content = aff4_object.Read(self.MAX_CONTENT_SIZE) result.content_sha256 = hashlib.sha256(result.content).hexdigest() except (IOError, AttributeError) as e: logging.warning("Can't read content of %s: %s", aff4_object.urn, e)
Add file content from aff4_object to result.
def polish_model(model): onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) model = onnx.optimizer.optimize(model) onnx.checker.check_model(model) return model
This function combines several useful utility functions together.
def _get_insert_commands(self, rows, cols): insert_queries = {} for table in tqdm(list(rows.keys()), total=len(list(rows.keys())), desc='Getting insert rows queries'): insert_queries[table] = {} _rows = rows.pop(table) _cols = cols.pop(table) if len(_rows) > 1: insert_queries[table]['insert_many'] = self.insert_many(table, _cols, _rows, execute=False) elif len(_rows) == 1: insert_queries[table]['insert'] = self.insert(table, _cols, _rows, execute=False) return insert_queries
Retrieve dictionary of insert statements to be executed.
def gdbgui(): interpreter = "lldb" if app.config["LLDB"] else "gdb" gdbpid = request.args.get("gdbpid", 0) initial_gdb_user_command = request.args.get("initial_gdb_user_command", "") add_csrf_token_to_session() THEMES = ["monokai", "light"] initial_data = { "csrf_token": session["csrf_token"], "gdbgui_version": __version__, "gdbpid": gdbpid, "initial_gdb_user_command": initial_gdb_user_command, "interpreter": interpreter, "initial_binary_and_args": app.config["initial_binary_and_args"], "p": pbkdf2_hex(str(app.config.get("l")), "Feo8CJol") if app.config.get("l") else "", "project_home": app.config["project_home"], "remap_sources": app.config["remap_sources"], "rr": app.config["rr"], "show_gdbgui_upgrades": app.config["show_gdbgui_upgrades"], "themes": THEMES, "signals": SIGNAL_NAME_TO_OBJ, "using_windows": USING_WINDOWS, } return render_template( "gdbgui.html", version=__version__, debug=app.debug, interpreter=interpreter, initial_data=initial_data, themes=THEMES, )
Render the main gdbgui interface
def start_write(self, frame, node=None): if frame.buffer is None: self.writeline('yield ', node) else: self.writeline('%s.append(' % frame.buffer, node)
Yield or write into the frame buffer.
def delete(self, id): resp = self.client.accounts.delete(id) self.display(resp)
Delete an tenant id
def action(self, *action_names): def action_wrapper(decorated): @functools.wraps(decorated) def wrapper(argv): kwargs = dict(arg.split('=') for arg in argv) try: return decorated(**kwargs) except TypeError as e: if decorated.__doc__: e.args += (decorated.__doc__,) raise self.register_action(decorated.__name__, wrapper) if '_' in decorated.__name__: self.register_action( decorated.__name__.replace('_', '-'), wrapper) return wrapper return action_wrapper
Decorator, registering them as actions
def datetime_at_loc(self, loc): return pd.Timestamp(self._zdt_to_nanos(self._jdt_index.dateTimeAtLoc(loc)))
Returns the timestamp at the given integer location as a Pandas Timestamp.
def _check_patch_type_mismatch(self, patched_item, existing_item): def raise_mismatch_error(patched_item, existing_item, data_type_name): error_msg = ('Type mismatch. Patch {} corresponds to pre-existing ' 'data_type {} ({}:{}) that has type other than {}.') raise InvalidSpec(error_msg.format( quote(patched_item.name), quote(existing_item.name), existing_item.path, existing_item.lineno, quote(data_type_name)), patched_item.lineno, patched_item.path) if isinstance(patched_item, AstStructPatch): if not isinstance(existing_item, AstStructDef): raise_mismatch_error(patched_item, existing_item, 'struct') elif isinstance(patched_item, AstUnionPatch): if not isinstance(existing_item, AstUnionDef): raise_mismatch_error(patched_item, existing_item, 'union') else: if existing_item.closed != patched_item.closed: raise_mismatch_error( patched_item, existing_item, 'union_closed' if existing_item.closed else 'union') else: raise AssertionError( 'Unknown Patch Object Type {}'.format(patched_item.__class__.__name__))
Enforces that each patch has a corresponding, already-defined data type.
def chunker(ensemble_list, ncpu): length = int(len(ensemble_list) / ncpu) for i in range(0, len(ensemble_list), length): yield ensemble_list[i:i + length]
Generate successive chunks of ensemble_list.
def _post_init(self): self._led_type_code = self.manager.get_typecode('LED') self.device_path = os.path.realpath(os.path.join(self.path, 'device')) if '::' in self.name: chardev, code_name = self.name.split('::') if code_name in self.manager.codes['LED_type_codes']: self.code = self.manager.codes['LED_type_codes'][code_name] try: event_number = chardev.split('input')[1] except IndexError: print("Failed with", self.name) raise else: self._character_device_path = '/dev/input/event' + event_number self._match_device()
Set up the device path and type code.
def restore_type(self, type): mapping = { ARRAY: 'array', sa.Boolean: 'boolean', sa.Date: 'date', sa.DateTime: 'datetime', sa.Float: 'number', sa.Integer: 'integer', JSONB: 'object', JSON: 'object', sa.Numeric: 'number', sa.Text: 'string', sa.Time: 'time', sa.VARCHAR: 'string', UUID: 'string', } field_type = None for key, value in mapping.items(): if isinstance(type, key): field_type = value if field_type is None: message = 'Type "%s" is not supported' raise tableschema.exceptions.StorageError(message % type) return field_type
Restore type from SQL
def ls(dataset_uri): dataset = dtoolcore.DataSet.from_uri(dataset_uri) for overlay_name in dataset.list_overlay_names(): click.secho(overlay_name)
List the overlays in the dataset.
def backend_to_retrieve(self, namespace, stream): if namespace not in self.namespaces: raise NamespaceMissing('`{}` namespace is not configured' .format(namespace)) stream_prefix = self.get_matching_prefix(namespace, stream) read_backend = self.prefix_read_backends[namespace][stream_prefix] return (read_backend, self.prefix_confs[namespace][stream_prefix][read_backend])
Return backend enabled for reading for `stream`.
def find_postaggs_for(postagg_names, metrics_dict): postagg_metrics = [ metrics_dict[name] for name in postagg_names if metrics_dict[name].metric_type == POST_AGG_TYPE ] for postagg in postagg_metrics: postagg_names.remove(postagg.metric_name) return postagg_metrics
Return a list of metrics that are post aggregations
def list_custom_images(call=None): if call != 'function': raise SaltCloudSystemExit( 'The list_vlans function must be called with -f or --function.' ) ret = {} conn = get_conn('SoftLayer_Account') response = conn.getBlockDeviceTemplateGroups() for image in response: if 'globalIdentifier' not in image: continue ret[image['name']] = { 'id': image['id'], 'name': image['name'], 'globalIdentifier': image['globalIdentifier'], } if 'note' in image: ret[image['name']]['note'] = image['note'] return ret
Return a dict of all custom VM images on the cloud provider.
def extract_error_message(cls, e): message = str(e) try: if isinstance(e.args, tuple) and len(e.args) > 1: message = e.args[1] except Exception: pass return message
Extract error message for queries
def release(self): if self.errored: self.pool.delete_resource(self) else: self.pool.release(self)
Releases this resource back to the pool it came from.
def _diff_bounds(bounds, coord): try: return bounds[:, 1] - bounds[:, 0] except IndexError: diff = np.diff(bounds, axis=0) return xr.DataArray(diff, dims=coord.dims, coords=coord.coords)
Get grid spacing by subtracting upper and lower bounds.
def position(self): position = self.topic.posts.filter(Q(created__lt=self.created) | Q(id=self.id)).count() return position
Returns an integer corresponding to the position of the post in the topic.
def _populate(self, soup): tables = soup.select('table[rules=all]') if not tables: return trs = tables[0].select('tr')[1:] if len(trs[0]) == 5: self._populate_small_table(trs) else: self._populate_large_table(trs)
Populate the list, assuming ``soup`` is a ``BeautifulSoup`` object.
def open_organisation_logo_path(self): file_name, __ = QFileDialog.getOpenFileName( self, self.tr('Set organisation logo file'), self.organisation_logo_path_line_edit.text(), self.tr( 'Portable Network Graphics files (*.png *.PNG);;' 'JPEG Images (*.jpg *.jpeg);;' 'GIF Images (*.gif *.GIF);;' 'SVG Images (*.svg *.SVG);;')) if file_name: self.organisation_logo_path_line_edit.setText(file_name)
Open File dialog to choose the organisation logo path.
def _get_solar_flux(self, band): solar_flux = self.cal['solar_flux'].isel(bands=band).values d_index = self.cal['detector_index'].fillna(0).astype(int) return da.map_blocks(self._get_items, d_index.data, solar_flux=solar_flux, dtype=solar_flux.dtype)
Get the solar flux for the band.
def register_warning_code(code, exception_type, domain='core'): Logger._warning_code_to_exception[code] = (exception_type, domain) Logger._domain_codes[domain].add(code)
Register a new warning code
def could_collide_ver(self, vpos, adsb_pkt): if adsb_pkt.emitter_type < 100 or adsb_pkt.emitter_type > 104: return True margin = self.asterix_settings.filter_dist_z vtype = adsb_pkt.emitter_type - 100 valt = vpos.alt aalt1 = adsb_pkt.altitude * 0.001 if vtype == 2: return True if vtype == 4: return True aalt2 = aalt1 + adsb_pkt.ver_velocity * 0.01 * self.asterix_settings.filter_time altsep1 = abs(valt - aalt1) altsep2 = abs(valt - aalt2) if altsep1 > 150 + margin and altsep2 > 150 + margin: return False return True
return true if vehicle could come within filter_dist_z meters of adsb vehicle in timeout seconds
def _save_json(self, filename): with open(filename, 'w') as file_handle: json.dump(self._sensors, file_handle, cls=MySensorsJSONEncoder, indent=4) file_handle.flush() os.fsync(file_handle.fileno())
Save sensors to json file.
def close(self): if self._local.conn: self._local.conn.transport.close() self._local.conn = None
If a connection is open, close its transport.
def _send_periodic_internal(self, msg, period, duration=None): if self._scheduler is None: self._scheduler = HANDLE() _canlib.canSchedulerOpen(self._device_handle, self.channel, self._scheduler) caps = structures.CANCAPABILITIES() _canlib.canSchedulerGetCaps(self._scheduler, caps) self._scheduler_resolution = float(caps.dwClockFreq) / caps.dwCmsDivisor _canlib.canSchedulerActivate(self._scheduler, constants.TRUE) return CyclicSendTask(self._scheduler, msg, period, duration, self._scheduler_resolution)
Send a message using built-in cyclic transmit list functionality.
def _verify_page(self): title_date = self._get_date_in_title().lower() split_date = self.date.lower().split() split_date[0] = split_date[0][:3] return all(term in title_date for term in split_date)
Verify the ratings page matches the correct date
def parse_float_literal(ast, _variables=None): if isinstance(ast, (FloatValueNode, IntValueNode)): return float(ast.value) return INVALID
Parse a float value node in the AST.
def check_existing_filename (filename, onlyfiles=True): if not os.path.exists(filename): raise PatoolError("file `%s' was not found" % filename) if not os.access(filename, os.R_OK): raise PatoolError("file `%s' is not readable" % filename) if onlyfiles and not os.path.isfile(filename): raise PatoolError("`%s' is not a file" % filename)
Ensure that given filename is a valid, existing file.
def evaluate_tour_M(self, tour): from .chic import score_evaluate_M return score_evaluate_M(tour, self.active_sizes, self.M)
Use Cythonized version to evaluate the score of a current tour
def enforce_filetype_file(form, field): if form._fields.get('filetype').data != RESOURCE_FILETYPE_FILE: return domain = urlparse(field.data).netloc allowed_domains = current_app.config['RESOURCES_FILE_ALLOWED_DOMAINS'] allowed_domains += [current_app.config.get('SERVER_NAME')] if current_app.config.get('CDN_DOMAIN'): allowed_domains.append(current_app.config['CDN_DOMAIN']) if '*' in allowed_domains: return if domain and domain not in allowed_domains: message = _('Domain "{domain}" not allowed for filetype "{filetype}"') raise validators.ValidationError(message.format( domain=domain, filetype=RESOURCE_FILETYPE_FILE ))
Only allowed domains in resource.url when filetype is file
def close(self): try: self.connection.close() self.connection = None except Exception: if not self.fail_silently: raise
Close any open HTTP connections to the API server.
def _construct_functions(self, coefs, **kwargs): return [self.basis_functions.functions_factory(coef, **kwargs) for coef in coefs]
Return a list of functions given a list of coefficients.
def cdk_module_matches_env(env_name, env_config, env_vars): if env_config.get(env_name): current_env_config = env_config[env_name] if isinstance(current_env_config, type(True)) and current_env_config: return True if isinstance(current_env_config, six.string_types): (account_id, region) = current_env_config.split('/') if region == env_vars['AWS_DEFAULT_REGION']: boto_args = extract_boto_args_from_env(env_vars) sts_client = boto3.client( 'sts', region_name=env_vars['AWS_DEFAULT_REGION'], **boto_args ) if sts_client.get_caller_identity()['Account'] == account_id: return True if isinstance(current_env_config, dict): return True return False
Return bool on whether cdk command should continue in current env.
def restrict_to_parent(self, target, parent): if not (parent['start'] < target < parent['end']): target = parent['end'] return target
Restrict target to parent structure boundaries.
def single_read(self, register): comm_reg = (0b00010 << 3) + register if register == self.AD7730_STATUS_REG: bytes_num = 1 elif register == self.AD7730_DATA_REG: bytes_num = 3 elif register == self.AD7730_MODE_REG: bytes_num = 2 elif register == self.AD7730_FILTER_REG: bytes_num = 3 elif register == self.AD7730_DAC_REG: bytes_num = 1 elif register == self.AD7730_OFFSET_REG: bytes_num = 3 elif register == self.AD7730_GAIN_REG: bytes_num = 3 elif register == self.AD7730_TEST_REG: bytes_num = 3 command = [comm_reg] + ([0x00] * bytes_num) spi.SPI_write(self.CS, command) data = spi.SPI_read(bytes_num + 1) return data[1:]
Reads data from desired register only once.
def state_names(): names = set() fname = pkg_resources.resource_filename(__name__, 'resources/States.csv') with open(fname, 'rU') as csvfile: reader = csv.reader(csvfile, delimiter = ',') for row in reader: names.add(row[0]) return names
Get the set of all US state names
def SelfReferenceProperty(label=None, collection_name=None, **attrs): if 'reference_class' in attrs: raise ConfigurationError( 'Do not provide reference_class to self-reference.') return ReferenceProperty(_SELF_REFERENCE, label, collection_name, **attrs)
Create a self reference.
def getPropagationBit(self, t, p): try: return self.validPropagations[t][p]['BITS'] except KeyError: raise CommandExecutionError(( 'No propagation type of "{0}". It should be one of the following: {1}' ).format(p, ', '.join(self.validPropagations[t])))
returns the propagation bit of a text value
def encompassed_by(self, span): if isinstance(span, list): return [sp for sp in span if sp.encompasses(self)] return span.encompasses(self)
Returns true if the given span encompasses this span.
def interface_ip(iface): iface_info, error = _get_iface_info(iface) if error is False: inet = iface_info.get(iface, {}).get('inet', None) return inet[0].get('address', '') if inet else '' else: return error
Return `iface` IPv4 addr or an error if `iface` does not exist
def commitAndCloseEditor(self): editor = self.sender() try: self.commitData.emit(editor) except AttributeError: pass self.closeEditor.emit(editor, QAbstractItemDelegate.NoHint)
Commit and close editor
def from_socket(controller, host=None, port=None, track_path=None, log_level=logging.ERROR): rocket = Rocket(controller, track_path=track_path, log_level=log_level) rocket.connector = SocketConnector(controller=controller, tracks=rocket.tracks, host=host, port=port) return rocket
Create rocket instance using socket connector
def iscm_md_update_dict(self, keypath, data): current = self.metadata for k in string.split(keypath, "."): if not current.has_key(k): current[k] = {} current = current[k] current.update(data)
Update a metadata dictionary entry
def _delete_doc_from_index(index_writer, docid): query = whoosh.query.Term("docid", docid) index_writer.delete_by_query(query)
Remove a document from the index
def _unstructure_mapping(self, mapping): dispatch = self._unstructure_func.dispatch return mapping.__class__( (dispatch(k.__class__)(k), dispatch(v.__class__)(v)) for k, v in mapping.items() )
Convert a mapping of attr classes to primitive equivalents.
def superclass(self, klass): return bool(lib.EnvSuperclassP(self._env, self._cls, klass._cls))
True if the Class is a superclass of the given one.
def setdummies(self,e): v0,v1 = e.v r0,r1 = self.grx[v0].rank,self.grx[v1].rank if r0>r1: assert e in self.alt_e v0,v1 = v1,v0 r0,r1 = r1,r0 if (r1-r0)>1: ctrl=self.ctrls[e]={} ctrl[r0]=v0 ctrl[r1]=v1 for r in xrange(r0+1,r1): self.dummyctrl(r,ctrl)
creates and defines all needed dummy vertices for edge e.
def split_docstring(self, block): try: first_line, rest_of_lines = block.split("\n", 1) except ValueError: pass else: raw_first_line = split_leading_trailing_indent(rem_comment(first_line))[1] if match_in(self.just_a_string, raw_first_line): return first_line, rest_of_lines return None, block
Split a code block into a docstring and a body.
def search_fields(self): search_fields = self.request.get("search_fields", None) if not search_fields: return [] search_fields = json.loads(_u(search_fields)) return search_fields
Returns the object field names to search against
def model_saved(sender, instance, created, raw, using, **kwargs): opts = get_opts(instance) model = '.'.join([opts.app_label, opts.object_name]) action = 'created' if created else 'updated' distill_model_event(instance, model, action)
Automatically triggers "created" and "updated" actions.
def takeoff(self): self.send(at.REF(at.REF.input.start))
Sends the takeoff command.
def freeze(self): self.in_connections = frozenset(self.in_connections) self.out_connections = frozenset(self.out_connections)
Make the SchemaElement's connections immutable.
def wait(self): while True: if not self.greenlet_watch: break if self.stopping: gevent.sleep(0.1) else: gevent.sleep(1)
Waits for the pool to be fully stopped
def __load_section(self, section_key): if self._sections[section_key] is not None: return articles = [] for page in count(1): if page > 50: raise Exception('Last page detection is probably broken') url = '{domain}{section}&iMenuID=1&iSubMenuID={page}'.format( domain = DOMAIN, section = SECTIONS[section_key], page = page ) body = self._session.get(url).content if "์•Œ์ˆ˜ ์—†๋Š” ์ฃผ์†Œ" in body: break root = html.fromstring(body) title_lines = root.find_class('ListNewsLineTitle') for title_line in title_lines: title_link = title_line.find('a') href = title_link.get('href') match = re.match("javascript:article_open\('(.+)'\)", href) if not match: raise Exception("The site's link format has changed and is not compatible") path = match.group(1).decode('string_escape') articles.append(Article( self._session, title_link.text_content().strip(), DOMAIN + '/en/' + path )) self._sections[section_key] = articles
Reads the set of article links for a section if they are not cached.
def _fill(self): types_to_exclude = ['module', 'function', 'builtin_function_or_method', 'instance', '_Feature', 'type', 'ufunc'] values = self.namespace.who_ls() def eval(expr): return self.namespace.shell.ev(expr) var = [(v, type(eval(v)).__name__, str(_getsizeof(eval(v))), str(_getshapeof(eval(v))) if _getshapeof(eval(v)) else '', str(eval(v))[:200]) for v in values if (v not in ['_html', '_nms', 'NamespaceMagics', '_Jupyter']) & (type(eval(v)).__name__ not in types_to_exclude)] self._table.value = '<div class="rendered_html jp-RenderedHTMLCommon"><table><thead><tr><th>Name</th><th>Type</th><th>Size</th><th>Shape</th><th>Value</th></tr></thead><tr><td>' + \ '</td></tr><tr><td>'.join(['{0}</td><td>{1}</td><td>{2}</td><td>{3}</td><td>{4}'.format(v1, v2, v3, v4, v5) for v1, v2, v3, v4, v5 in var]) + \ '</td></tr></table></div>'
Fill self with variable information.
def plot_lc(calc_id, aid=None): dstore = util.read(calc_id) dset = dstore['agg_curves-rlzs'] if aid is None: plt = make_figure(dset.attrs['return_periods'], dset.value) else: sys.exit('Not implemented yet') plt.show()
Plot loss curves given a calculation id and an asset ordinal.
def install_signal_handlers(self): self.graceful_stop = False def request_shutdown_now(): self.shutdown_now() def request_shutdown_graceful(): if self.graceful_stop: self.shutdown_now() else: self.graceful_stop = True self.shutdown_graceful() gevent.signal(signal.SIGINT, request_shutdown_graceful) gevent.signal(signal.SIGTERM, request_shutdown_now)
Handle events like Ctrl-C from the command line.
def put_file(client, source_file, destination_file): try: sftp_client = client.open_sftp() sftp_client.put(source_file, destination_file) except Exception as error: raise IpaUtilsException( 'Error copying file to instance: {0}.'.format(error) ) finally: with ignored(Exception): sftp_client.close()
Copy file to instance using Paramiko client connection.
def _retrieve_grains_cache(proxy=None): global GRAINS_CACHE if not GRAINS_CACHE: if proxy and salt.utils.napalm.is_proxy(__opts__): GRAINS_CACHE = proxy['napalm.get_grains']() elif not proxy and salt.utils.napalm.is_minion(__opts__): GRAINS_CACHE = salt.utils.napalm.call( DEVICE_CACHE, 'get_facts', **{} ) return GRAINS_CACHE
Retrieves the grains from the network device if not cached already.
def getLevel(self): lvl = 0 p = self while True: p = p.parent if not isinstance(p, LPort): break lvl += 1 return lvl
Get nest-level of this port
def user_pass(self, func=None, location=None, **rkwargs): def wrapper(view): view = to_coroutine(view) @functools.wraps(view) async def handler(request, *args, **kwargs): await self.check_user(request, func, location, **rkwargs) return await view(request, *args, **kwargs) return handler return wrapper
Decorator ensures that user pass the given func.
def domain_unblock(self, domain=None): params = self.__generate_params(locals()) self.__api_request('DELETE', '/api/v1/domain_blocks', params)
Remove a domain block for the logged-in user.
async def _get_cdn_client(self, cdn_redirect): raise NotImplementedError session = self._exported_sessions.get(cdn_redirect.dc_id) if not session: dc = await self._get_dc(cdn_redirect.dc_id, cdn=True) session = self.session.clone() await session.set_dc(dc.id, dc.ip_address, dc.port) self._exported_sessions[cdn_redirect.dc_id] = session self._log[__name__].info('Creating new CDN client') client = TelegramBareClient( session, self.api_id, self.api_hash, proxy=self._sender.connection.conn.proxy, timeout=self._sender.connection.get_timeout() ) client.connect(_sync_updates=False) return client
Similar to ._borrow_exported_client, but for CDNs
def single_node_env(num_gpus=1): import tensorflow as tf if 'HADOOP_PREFIX' in os.environ and 'TFOS_CLASSPATH_UPDATED' not in os.environ: classpath = os.environ['CLASSPATH'] hadoop_path = os.path.join(os.environ['HADOOP_PREFIX'], 'bin', 'hadoop') hadoop_classpath = subprocess.check_output([hadoop_path, 'classpath', '--glob']).decode() os.environ['CLASSPATH'] = classpath + os.pathsep + hadoop_classpath os.environ['TFOS_CLASSPATH_UPDATED'] = '1' if tf.test.is_built_with_cuda(): gpus_to_use = gpu_info.get_gpus(num_gpus) logging.info("Using gpu(s): {0}".format(gpus_to_use)) os.environ['CUDA_VISIBLE_DEVICES'] = gpus_to_use else: logging.info("Using CPU") os.environ['CUDA_VISIBLE_DEVICES'] = ''
Setup environment variables for Hadoop compatibility and GPU allocation
def queue_purge(self, queue, **kwargs): qsize = mqueue.qsize() mqueue.queue.clear() return qsize
Discard all messages in the queue.
def create_kernel_instance(self, kernel_options, params, verbose): instance_string = util.get_instance_string(params) grid_div = (kernel_options.grid_div_x, kernel_options.grid_div_y, kernel_options.grid_div_z) if not kernel_options.block_size_names: kernel_options.block_size_names = util.default_block_size_names threads, grid = util.setup_block_and_grid(kernel_options.problem_size, grid_div, params, kernel_options.block_size_names) if numpy.prod(threads) > self.dev.max_threads: if verbose: print("skipping config", instance_string, "reason: too many threads per block") return None temp_files = dict() kernel_source = kernel_options.kernel_string if not isinstance(kernel_source, list): kernel_source = [kernel_source] name, kernel_string, temp_files = util.prepare_list_of_files(kernel_options.kernel_name, kernel_source, params, grid, threads, kernel_options.block_size_names) return KernelInstance(name, kernel_string, temp_files, threads, grid, params, kernel_options.arguments)
create kernel instance from kernel source, parameters, problem size, grid divisors, and so on
def clip_image(image, clip_min, clip_max): return np.minimum(np.maximum(clip_min, image), clip_max)
Clip an image, or an image batch, with upper and lower threshold.
def prepare_date(data, schema): if isinstance(data, datetime.date): return data.toordinal() - DAYS_SHIFT else: return data
Converts datetime.date to int timestamp
def segment_volumes(neurites, neurite_type=NeuriteType.all): def _func(sec): return [morphmath.segment_volume(seg) for seg in zip(sec.points[:-1], sec.points[1:])] return map_segments(_func, neurites, neurite_type)
Volumes of the segments in a collection of neurites
def load_reg(): reg_dir = _reg_dir() regfile = os.path.join(reg_dir, 'register') try: with salt.utils.files.fopen(regfile, 'r') as fh_: return salt.utils.msgpack.load(fh_) except Exception: log.error('Could not write to msgpack file %s', __opts__['outdir']) raise
Load the register from msgpack files
def time_between_updates(self): if 'last_updated' not in self._original: return 0 last_update = self._original['last_updated'] this_update = self.last_updated return this_update - last_update
Time between current `last_updated` and previous `last_updated`
def create_weights(nodes, dist): poly = chaospy.quad.generate_stieltjes(dist, len(nodes)-1, retall=True)[0] poly = chaospy.poly.flatten(chaospy.poly.Poly(poly)) weights_inverse = poly(nodes) weights = numpy.linalg.inv(weights_inverse) return weights[:, 0]
Create weights for the Laja method.
def call(function): message = 'call:%s.%s' % (function.__module__,function.__name__) @functools.wraps(function) def wrapper(*args, **kwargs): _collect(message) return function(*args, **kwargs) return wrapper
decorator that collect function call count.
def delete_all_renditions(self): if self.renditions: for r in self.renditions.values(): default_storage.delete(r) self.renditions = {}
delete all renditions and rendition dict
def __set_values(self, values): array = tuple(tuple(self._clean_value(col) for col in row) for row in values) self._get_target().setDataArray(array)
Sets values in this cell range from an iterable of iterables.
def as_list(self, decode=False): items = self.database.lrange(self.key, 0, -1) return [_decode(item) for item in items] if decode else items
Return a list containing all the items in the list.
def display_grid_scores(grid_scores, top=None): grid_scores = sorted(grid_scores, key=lambda x: x[1], reverse=True) if top is not None: grid_scores = grid_scores[:top] _, best_mean, best_scores = grid_scores[0] threshold = best_mean - 2 * sem(best_scores) for params, mean_score, scores in grid_scores: append_star = mean_score + 2 * sem(scores) > threshold print(display_scores(params, scores, append_star=append_star))
Helper function to format a report on a grid of scores
def json_decode(data): if isinstance(data, six.binary_type): data = data.decode('utf-8') return json.loads(data)
Decodes the given JSON as primitives
def pattern_logic_aeidon(): if Config.options.pattern_files: return prep_patterns(Config.options.pattern_files) elif Config.options.regex: return Config.REGEX else: return Config.TERMS
Return patterns to be used for searching subtitles via aeidon.
def path(args): from .query import Database db = Database() output = sys.stdout if args.selftest: from bob.db.utils import null output = null() r = db.paths(args.id, prefix=args.directory, suffix=args.extension) for path in r: output.write('%s\n' % path) if not r: return 1 return 0
Returns a list of fully formed paths or stems given some file id
def _copy_update(sourcepath, destname): if sys.platform.startswith('linux'): return os.system("/bin/cp -ua '%s' '%s'" % (sourcepath, destname)) else: return os.system("rsync -ua '%s' '%s'" % (sourcepath, destname))
Copy source to dest only if source is newer.
def getReferenceSetByName(self, name): if name not in self._referenceSetNameMap: raise exceptions.ReferenceSetNameNotFoundException(name) return self._referenceSetNameMap[name]
Returns the reference set with the specified name.
def dePeriod(arr): diff= arr-nu.roll(arr,1,axis=1) w= diff < -6. addto= nu.cumsum(w.astype(int),axis=1) return arr+_TWOPI*addto
make an array of periodic angles increase linearly