code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def write(self, text): sys.stdout.write("\r") self._clear_line() _text = to_unicode(text) if PY2: _text = _text.encode(ENCODING) assert isinstance(_text, builtin_str) sys.stdout.write("{0}\n".format(_text))
Write text in the terminal without breaking the spinner.
def normalize(pw): pw_lower = pw.lower() return ''.join(helper.L33T.get(c, c) for c in pw_lower)
Lower case, and change the symbols to closest characters
def missing_count(self): if self.means: return self.means.missing_count return self._cube_dict["result"].get("missing", 0)
numeric representing count of missing rows in cube response.
def scramble_native_password(password, message): if not password: return b'' stage1 = sha1_new(password).digest() stage2 = sha1_new(stage1).digest() s = sha1_new() s.update(message[:SCRAMBLE_LENGTH]) s.update(stage2) result = s.digest() return _my_crypt(result, stage1)
Scramble used for mysql_native_password
def update_cache(self, data=None): if data: self.cache_data = data self.cache_updated = timezone.now() self.save()
call with new data or set data to self.cache_data and call this
def _clean_X_y(X, y): return make_2d(X, verbose=False).astype('float'), y.astype('float')
ensure that X and y data are float and correct shapes
def diff(self): self.mmb.complement(self.alphabet) self.mmb.minimize() print 'start intersection' self.mmc = self._intesect() print 'end intersection' return self.mmc
The Difference between a PDA and a DFA
def element(self, inp=None): if inp is not None: s = str(inp)[:self.length] s += ' ' * (self.length - len(s)) return s else: return ' ' * self.length
Return an element from ``inp`` or from scratch.
def _GenerateCSRFKey(config): secret_key = config.Get("AdminUI.csrf_secret_key", None) if not secret_key: secret_key = config.Get("AdminUI.django_secret_key", None) if secret_key: config.Set("AdminUI.csrf_secret_key", secret_key) if not secret_key: key = utils.GeneratePassphrase(length=100) ...
Update a config with a random csrf key.
def ruleName(self): return '%s %s' % (self.rentalRate, self.RateRuleChoices.values.get(self.applyRateRule,self.applyRateRule))
This should be overridden for child classes
def find_file(config_file=None, default_directories=None, default_bases=None): if config_file: if path.exists(path.expanduser(config_file)): return config_file else: raise FileNotFoundError('Config file not found: {}'.format(config_file)) dirs = default_directories or CON...
Search for a config file in a list of files.
def xywh_from_points(points): xys = [[int(p) for p in pair.split(',')] for pair in points.split(' ')] minx = sys.maxsize miny = sys.maxsize maxx = 0 maxy = 0 for xy in xys: if xy[0] < minx: minx = xy[0] if xy[0] > maxx: maxx = xy[0] if xy[1] < miny...
Constructs an dict representing a rectangle with keys x, y, w, h
def execute(self): try: for key, value in self._watched_keys.items(): if self.mock_redis.redis.get(self.mock_redis._encode(key)) != value: raise WatchError("Watched variable changed.") return [command() for command in self.commands] finally: ...
Execute all of the saved commands and return results.
def count(self, eventRegistry): self.setRequestedResult(RequestEventsInfo()) res = eventRegistry.execQuery(self) if "error" in res: print(res["error"]) count = res.get("events", {}).get("totalResults", 0) return count
return the number of events that match the criteria
def _find_usage_spot_instances(self): logger.debug('Getting spot instance request usage') try: res = self.conn.describe_spot_instance_requests() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'UnsupportedOperation': return ...
calculate spot instance request usage and update Limits
def predict_y(self, Xnew): pred_f_mean, pred_f_var = self._build_predict(Xnew) return self.likelihood.predict_mean_and_var(pred_f_mean, pred_f_var)
Compute the mean and variance of held-out data at the points Xnew
def rm_rf(path): if os.path.isfile(path): os.unlink(path) elif os.path.isdir(path): for root, dirs, files in os.walk(path, topdown=False): for filename in files: filepath = os.path.join(root, filename) logger.info("Deleting file %s" % filepath) ...
Act as 'rm -rf' in the shell
def register(self): log.info('Installing ssh key, %s' % self.name) self.consul.create_ssh_pub_key(self.name, self.key)
Registers SSH key with provider.
def _new_replica(self, instance_id: int, is_master: bool, bls_bft: BlsBft) -> Replica: return self._replica_class(self._node, instance_id, self._config, is_master, bls_bft, self._metrics)
Create a new replica with the specified parameters.
def load_jupyter_server_extension(nb_server_app): app = nb_server_app.web_app host_pattern = '.*$' app.add_handlers(host_pattern, [ (utils.url_path_join(app.settings['base_url'], '/http_over_websocket'), handlers.HttpOverWebSocketHandler), (utils.url_path_join(app.settings['base_url'], ...
Called by Jupyter when this module is loaded as a server extension.
def generate(self, request, **kwargs): self.method_check(request, allowed=['get']) basic_bundle = self.build_bundle(request=request) tileset = self.cached_obj_get( bundle=basic_bundle, **self.remove_api_resource_names(kwargs)) return self.create_response(request, ...
proxy for the tileset.generate method
def data_filler_customer(self, number_of_rows, db): try: customer = db data_list = list() for i in range(0, number_of_rows): post_cus_reg = { "id": rnd_id_generator(self), "name": self.faker.first_name(), ...
creates and fills the table with customer data
def _rd_fld_vals(name, val, set_list_ft=True, qty_min=0, qty_max=None): if not val and qty_min == 0: return [] if set_list_ft else set() vals = val.split('|') num_vals = len(vals) assert num_vals >= qty_min, \ "FLD({F}): MIN QUANTITY({Q}) WASN'T MET: {V}".format( ...
Further split a GPAD value within a single field.
def petl(self, *args, **kwargs): import petl t = self.resolved_url.get_resource().get_target() if t.target_format == 'txt': return petl.fromtext(str(t.fspath), *args, **kwargs) elif t.target_format == 'csv': return petl.fromcsv(str(t.fspath), *args, **kwargs) ...
Return a PETL source object
def extract_imports(script): if not os.path.isfile(script): raise ValueError('Not a file: %s' % script) parse_tree = parse_python(script) result = find_imports(parse_tree) result.path = script return result
Extract all imports from a python script
def _map_unity_proxy_to_object(value): vtype = type(value) if vtype in _proxy_map: return _proxy_map[vtype](value) elif vtype == list: return [_map_unity_proxy_to_object(v) for v in value] elif vtype == dict: return {k:_map_unity_proxy_to_object(v) for k,v in value.items()} e...
Map returning value, if it is unity SFrame, SArray, map it
def _build_url(self, shorten=True): self.url = URL_FORMAT.format(*self._get_url_params(shorten=shorten))
Build the url for a cable ratings page
def check_py(self, version, name, original, loc, tokens): internal_assert(len(tokens) == 1, "invalid " + name + " tokens", tokens) if self.target_info < get_target_info(version): raise self.make_err(CoconutTargetError, "found Python " + ".".join(version) + " " + name, original, loc, target=v...
Check for Python-version-specific syntax.
def create_cnn_model(base_arch:Callable, nc:int, cut:Union[int,Callable]=None, pretrained:bool=True, lin_ftrs:Optional[Collection[int]]=None, ps:Floats=0.5, custom_head:Optional[nn.Module]=None, split_on:Optional[SplitFuncOrIdxList]=None, bn_final:bool=False, concat_pool:bool=True): "Create custom c...
Create custom convnet architecture
def zero_crossing_after(self, n): n_in_samples = int(n * self.samplerate) search_end = n_in_samples + self.samplerate if search_end > self.duration: search_end = self.duration frame = zero_crossing_first( self.range_as_mono(n_in_samples, search_end)) + n_in_sample...
Find nearest zero crossing in waveform after frame ``n``
def getResiduals(self): X = np.zeros((self.N*self.P,self.n_fixed_effs)) ip = 0 for i in range(self.n_terms): Ki = self.A[i].shape[0]*self.F[i].shape[1] X[:,ip:ip+Ki] = np.kron(self.A[i].T,self.F[i]) ip += Ki y = np.reshape(self.Y,(self.Y.size,1),order=...
regress out fixed effects and results residuals
def enabled_checker(func): @wraps(func) def wrap(self, *args, **kwargs): if self.allowed_methods and isinstance(self.allowed_methods, list) and func.__name__ not in self.allowed_methods: raise Exception("Method {} is disabled".format(func.__name__)) return func(se...
Access decorator which checks if a RPC method is enabled by our configuration
def token(self, token_address: Address) -> Token: if not is_binary_address(token_address): raise ValueError('token_address must be a valid address') with self._token_creation_lock: if token_address not in self.address_to_token: self.address_to_token[token_address]...
Return a proxy to interact with a token.
def getManagers(self): manager_ids = [] manager_list = [] for department in self.getDepartments(): manager = department.getManager() if manager is None: continue manager_id = manager.getId() if manager_id not in manager_ids: ...
Return all managers of responsible departments
def _parse_preseq_logs(f): lines = f['f'].splitlines() header = lines.pop(0) data_is_bases = False if header.startswith('TOTAL_READS EXPECTED_DISTINCT'): pass elif header.startswith('TOTAL_BASES EXPECTED_DISTINCT'): data_is_bases = True elif header.startswith('total_reads distinc...
Go through log file looking for preseq output
def as_sparse_array(self, kind=None, fill_value=None, copy=False): if fill_value is None: fill_value = self.fill_value if kind is None: kind = self.kind return SparseArray(self.values, sparse_index=self.sp_index, fill_value=fill_value, kind=kind...
return my self as a sparse array, do not copy by default
def make_coord_dict(coord): return dict( z=int_if_exact(coord.zoom), x=int_if_exact(coord.column), y=int_if_exact(coord.row), )
helper function to make a dict from a coordinate for logging
def _init_taxid2asscs(self): taxid2asscs = cx.defaultdict(list) for ntanno in self.associations: taxid2asscs[ntanno.tax_id].append(ntanno) assert len(taxid2asscs) != 0, "**FATAL: NO TAXIDS: {F}".format(F=self.filename) prt = sys.stdout num_taxids = len(taxid2asscs) ...
Create dict with taxid keys and annotation namedtuple list.
def _remove_overlaps(in_file, out_dir, data): out_file = os.path.join(out_dir, "%s-nooverlaps%s" % utils.splitext_plus(os.path.basename(in_file))) if not utils.file_uptodate(out_file, in_file): with file_transaction(data, out_file) as tx_out_file: with open(in_file) as in_handle: ...
Remove regions that overlap with next region, these result in issues with PureCN.
def rsi(series, window=14): deltas = np.diff(series) seed = deltas[:window + 1] ups = seed[seed > 0].sum() / window downs = -seed[seed < 0].sum() / window rsival = np.zeros_like(series) rsival[:window] = 100. - 100. / (1. + ups / downs) for i in range(window, len(series)): delta = de...
compute the n period relative strength indicator
def _list2Rlist(xs): if isinstance(xs, six.string_types): xs = [xs] rlist = ",".join([_quotestring(x) for x in xs]) return "c(" + rlist + ")"
convert a python list to an R list
def unindex_model_on_delete(sender, document, **kwargs): if current_app.config.get('AUTO_INDEX'): unindex.delay(document)
Unindex Mongo document on post_delete
def ingress_filter(self, response): data = self.data_getter(response) if isinstance(data, dict): data = m_data.DictResponse(data) elif isinstance(data, list): data = m_data.ListResponse(data) else: return data data.meta = self.meta_getter(respo...
Flatten a response with meta and data keys into an object.
def flush(): try: sys.stdout.flush() sys.stderr.flush() except (AttributeError, ValueError, IOError): pass try: libc.fflush(None) except (AttributeError, ValueError, IOError): pass
Try to flush all stdio buffers, both from python and from C.
def parse_changes(): with open('CHANGES') as changes: for match in re.finditer(RE_CHANGES, changes.read(1024), re.M): if len(match.group(1)) != len(match.group(3)): error('incorrect underline in CHANGES') date = datetime.datetime.strptime(match.group(4), ...
grab version from CHANGES and validate entry
def doIteration(self, delay=None, fromqt=False): 'This method is called by a Qt timer or by network activity on a file descriptor' if not self.running and self._blockApp: self._blockApp.quit() self._timer.stop() delay = max(delay, 1) if not fromqt: self.qA...
This method is called by a Qt timer or by network activity on a file descriptor
def merge_objects(self, mujoco_objects): self.mujoco_objects = mujoco_objects self.objects = {} self.max_horizontal_radius = 0 for obj_name, obj_mjcf in mujoco_objects.items(): self.merge_asset(obj_mjcf) obj = obj_mjcf.get_collision(name=obj_name, site=True) ...
Adds physical objects to the MJCF model.
def __get_region(conn, vm_): location = __get_location(conn, vm_) region = '-'.join(location.name.split('-')[:2]) return conn.ex_get_region(region)
Return a GCE libcloud region object with matching name.
def load_contents(self): with open(METADATA_FILE) as f: lines = f.readlines() lines = map(lambda x: x.strip(), lines) exclude_strings = ['<begin_table>', '<end_table>'] list_of_databases_and_columns = filter( lambda x: not x[0] in exclude_strings, [ ...
Loads contents of the tables into database.
def _filter_bad_reads(in_bam, ref_file, data): bam.index(in_bam, data["config"]) out_file = "%s-gatkfilter.bam" % os.path.splitext(in_bam)[0] if not utils.file_exists(out_file): with tx_tmpdir(data) as tmp_dir: with file_transaction(data, out_file) as tx_out_file: params ...
Use GATK filter to remove problem reads which choke GATK and Picard.
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS): if isinstance(webvtt, str): captions = WebVTT().read(webvtt).captions elif not self._validate_webvtt(webvtt): raise InvalidCaptionsError('The captions provided are invalid') else: captions =...
Segments the captions based on a number of seconds.
def matches(self, filter_props): if filter_props is None: return False found_one = False for key, value in filter_props.items(): if key in self.properties and value != self.properties[key]: return False elif key in self.properties and value == ...
Check if the filter matches the supplied properties.
def list_images(self, repository_name, registry_id=None): repository = None found = False if repository_name in self.repositories: repository = self.repositories[repository_name] if registry_id: if repository.registry_id == registry_id: ...
maxResults and filtering not implemented
def weighted_hamming(b1, b2): assert(len(b1) == len(b2)) hamming = 0 for i in range(len(b1)): if b1[i] != b2[i]: if i > 0: hamming += 1 + 1.0/i return hamming
Hamming distance that emphasizes differences earlier in strings.
def init(self): url = 'https://finance.yahoo.com/quote/%s/history' % (self.ticker) r = requests.get(url) txt = r.content cookie = r.cookies['B'] pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}') for line in txt.splitlines(): m = pattern.mat...
Returns a tuple pair of cookie and crumb used in the request
def item_link(self, item): return reverse_lazy( 'forum_conversation:topic', kwargs={ 'forum_slug': item.forum.slug, 'forum_pk': item.forum.pk, 'slug': item.slug, 'pk': item.id, }, )
Generates a link for a specific item of the feed.
def _check_independent_residues(topology): for res in topology.residues(): atoms_in_residue = set([atom for atom in res.atoms()]) bond_partners_in_residue = [item for sublist in [atom.bond_partners for atom in res.atoms()] for item in sublist] if not bond_partners_in_residue: con...
Check to see if residues will constitute independent graphs.
def apply( self ): font = self.value('font') try: font.setPointSize(self.value('fontSize')) except TypeError: pass palette = self.value('colorSet').palette() if ( unwrapVariant(QApplication.instance().property('useScheme')) ): QApplication....
Applies the scheme to the current application.
def format(self, password: str = '') -> str: return MARKER_START + \ self.name + \ self.action + \ self.args + \ password + \ MARKER_END
Format command along with any arguments, ready to be sent.
def _get_current_tags(name, runas=None): try: return list(__salt__['rabbitmq.list_users'](runas=runas)[name]) except CommandExecutionError as err: log.error('Error: %s', err) return []
Whether Rabbitmq user's tags need to be changed
def _start_vibration_win(self, left_motor, right_motor): xinput_set_state = self.manager.xinput.XInputSetState xinput_set_state.argtypes = [ ctypes.c_uint, ctypes.POINTER(XinputVibration)] xinput_set_state.restype = ctypes.c_uint vibration = XinputVibration( int(l...
Start the vibration, which will run until stopped.
def fval(self, instance): try: val = instance.__dict__[self.instance_field_name] except KeyError as e: val = None return val
return the raw value that this property is holding internally for instance
def next_code_is_indented(lines): for line in lines: if _BLANK_LINE.match(line) or _PY_COMMENT.match(line): continue return _PY_INDENTED.match(line) return False
Is the next unescaped line indented?
def _domain_differs(self, href): target = utils.get_domain(href) if not target: return False origin = utils.get_domain(self.url) return target != origin
Check that a link is not on the same domain as the source URL
def generate(env): try: bld = env['BUILDERS']['Zip'] except KeyError: bld = ZipBuilder env['BUILDERS']['Zip'] = bld env['ZIP'] = 'zip' env['ZIPFLAGS'] = SCons.Util.CLVar('') env['ZIPCOM'] = zipAction env['ZIPCOMPRESSION'] = zipcompression env['ZIPSUFFIX'...
Add Builders and construction variables for zip to an Environment.
def corr_coeff(x1, x2, t, tau1, tau2): dt = t[1] - t[0] tau = np.arange(tau1, tau2+dt, dt) rho = np.zeros(len(tau)) for n in range(len(tau)): i = np.abs(int(tau[n]/dt)) if tau[n] >= 0: seg2 = x2[0:-1-i] seg1 = x1[i:-1] elif tau[n] < 0: ...
Compute lagged correlation coefficient for two time series.
def delayed_close(self): self.state = CLOSING self.server.io_loop.add_callback(self.close)
Delayed close - won't close immediately, but on next ioloop tick.
def add_url(self, post_data): img_desc = post_data['desc'] img_path = post_data['file1'] cur_uid = tools.get_uudd(4) while MEntity.get_by_uid(cur_uid): cur_uid = tools.get_uudd(4) MEntity.create_entity(cur_uid, img_path, img_desc, kind=post_data['kind'] if 'kind' in p...
Adding the URL as entity.
def cfarray_to_list(cfarray): count = cf.CFArrayGetCount(cfarray) return [cftype_to_value(c_void_p(cf.CFArrayGetValueAtIndex(cfarray, i))) for i in range(count)]
Convert CFArray to python list.
def deployAll(self): targets = [Target.getTarget(iid) for iid, n, p in self.db.listTargets()] for target in targets: target.deploy() verbose('Deploy all complete')
Deploys all the items from the vault. Useful after a format
def _load_words(self): with open(self._words_file, 'r') as f: self._censor_list = [line.strip() for line in f.readlines()]
Loads the list of profane words from file.
def update_field(uid, post_id=None, tag_id=None, par_id=None): if post_id: entry = TabPost2Tag.update( post_id=post_id ).where(TabPost2Tag.uid == uid) entry.execute() if tag_id: entry2 = TabPost2Tag.update( par_id=tag_id[:2]...
Update the field of post2tag.
def _get_sensor_names(self): if not self.attrs.get('sensor'): return set([sensor for reader_instance in self.readers.values() for sensor in reader_instance.sensor_names]) elif not isinstance(self.attrs['sensor'], (set, tuple, list)): return set([self.attrs...
Join the sensors from all loaded readers.
def _retrieve_revisions(self): response = self._swimlane.request( 'get', 'history', params={ 'type': 'Records', 'id': self._record.id } ) raw_revisions = response.json() return [Revision(self._record, raw) fo...
Retrieve and populate Revision instances from history API endpoint
def print_status(raw_status, strip_units=False): lines = split(raw_status) if strip_units: lines = strip_units_from_lines(lines) for line in lines: print(line)
Print the status to stdout in the same format as the original apcaccess.
def get(self, request, slug): matching_datasets = self.generate_matching_datasets(slug) if matching_datasets is None: raise Http404("Datasets meeting these criteria do not exist.") base_context = { 'datasets': matching_datasets, 'num_datasets': matching_datase...
Basic functionality for GET request to view.
def _prepare_sample(data, run_folder): want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _se...
Extract passed keywords from input LIMS information.
def send(self): if self.prepare(): print('sending message') lg.record_process('comms.py', 'Sending message ' + self.title) return True else: return False
this handles the message transmission
def read(self, filename): try: with open(filename, 'r') as _file: self._filename = filename self.readstream(_file) return True except IOError: self._filename = None return False
Reads the file specified and tokenizes the data for parsing.
def native(s, encoding='utf-8', fallback='iso-8859-1'): if isinstance(s, str): return s if str is unicode: return unicodestr(s, encoding, fallback) return bytestring(s, encoding, fallback)
Convert a given string into a native string.
def load_items(): filename = os.path.join(os.path.dirname(__file__), "data", "items.json") with open(filename) as f: items = json.loads(f.read())["result"]["items"] for item in items: ITEMS_CACHE[item["id"]] = item
Load item details fom JSON file into memory
def generate(self, url, browsers=None, orientation=None, mac_res=None, win_res=None, quality=None, local=None, wait_time=None, callback_url=None): if isinstance(browsers, dict): browsers = [browsers] if browsers is None: browsers = [self.default_brows...
Generates screenshots for a URL.
def _create_channel(self, channel): super()._create_channel(channel) if 'EXCEPTS' in self._isupport: self.channels[channel]['exceptlist'] = None if 'INVEX' in self._isupport: self.channels[channel]['inviteexceptlist'] = None
Create channel with optional ban and invite exception lists.
def comment_set(self): ct = ContentType.objects.get_for_model(self.__class__) qs = Comment.objects.filter( content_type=ct, object_pk=self.pk) qs = qs.exclude(is_removed=True) qs = qs.order_by('-submit_date') return qs
Get the comments that have been submitted for the chat
def scale_image(self): new_width = int(self.figcanvas.fwidth * self._scalestep ** self._scalefactor) new_height = int(self.figcanvas.fheight * self._scalestep ** self._scalefactor) self.figcanvas.setFixedSize(new_width, new_height)
Scale the image size.
def move_id_ahead(element_id, reference_id, idstr_list): if element_id == reference_id: return idstr_list idstr_list.remove(str(element_id)) reference_index = idstr_list.index(str(reference_id)) idstr_list.insert(reference_index, str(element_id)) return idstr_list
Moves element_id ahead of reference_id in the list
def FetchDiscoveryDoc(discovery_url, retries=5): discovery_urls = _NormalizeDiscoveryUrls(discovery_url) discovery_doc = None last_exception = None for url in discovery_urls: for _ in range(retries): try: content = _GetURLContent(url) if isinstance(con...
Fetch the discovery document at the given url.
def append_checksum(self, f, checksum): align_file_position(f, 16) f.write(struct.pack(b'B', checksum))
Append ESPLoader checksum to the just-written image
def init_command(default_output_format, default_myproxy_username): if not default_output_format: safeprint( textwrap.fill( 'This must be one of "json" or "text". Other values will be ' "ignored. ENTER to skip." ) ) default_output_format...
Executor for `globus config init`
def setup(self, app): super().setup(app) self.enabled = len(self.cfg.backends) self.default = self.cfg.default if not self.default and self.enabled: self.default = self.cfg.backends[0][0] self.backends_hash = {name: parse.urlparse(loc) for (name, loc) in self.cfg.back...
Parse and prepare the plugin's configuration.
def namedb_get_all_revealed_namespace_ids( self, current_block ): query = "SELECT namespace_id FROM namespaces WHERE op = ? AND reveal_block < ?;" args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE ) namespace_rows = namedb_query_execute( cur, query, args ) ret = [] for namespace_row ...
Get all non-expired revealed namespaces.
def _max_args(self, f): if f.func_defaults is None: return f.func_code.co_argcount return f.func_code.co_argcount + len(f.func_defaults)
Returns maximum number of arguments accepted by given function.
def expose(self, binder, interface, annotation=None): private_module = self class Provider(object): def get(self): return private_module.private_injector.get_instance( interface, annotation) self.original_binder.bind(interface, annotated_with=a...
Expose the child injector to the parent inject for a binding.
def tracked(self): results = json.loads(self.client('track')) results['jobs'] = [Job(self, **job) for job in results['jobs']] return results
Return an array of job objects that are being tracked
def iterate(self, word): for p in reversed(self.positions(word)): if p.data: change, index, cut = p.data if word.isupper(): change = change.upper() c1, c2 = change.split('=') yield word[:p+index] + c1, c2 + word[p+in...
Iterate over all hyphenation possibilities, the longest first.
def calcPosition(self,parent_circle): if r not in self: raise AttributeError("radius must be calculated before position.") if theta not in self: raise AttributeError("theta must be set before position can be calculated.") x_offset = math.cos(t_radians) * (parent_circle.r ...
Position the circle tangent to the parent circle with the line connecting the centers of the two circles meeting the x axis at angle theta.
def add_column(connection, column): stmt = alembic.ddl.base.AddColumn(_State.table.name, column) connection.execute(stmt) _State.reflect_metadata()
Add a column to the current table.
def _run_arvados(args): assert not args.no_container, "Arvados runs require containers" assert "ARVADOS_API_TOKEN" in os.environ and "ARVADOS_API_HOST" in os.environ, \ "Need to set ARVADOS_API_TOKEN and ARVADOS_API_HOST in environment to run" main_file, json_file, project_name = _get_main_and_json(...
Run CWL on Arvados.
def run_kernel(self, func, gpu_args, instance): logging.debug('run_kernel %s', instance.name) logging.debug('thread block dims (%d, %d, %d)', *instance.threads) logging.debug('grid dims (%d, %d, %d)', *instance.grid) try: self.dev.run_kernel(func, gpu_args, instance.threads, ...
Run a compiled kernel instance on a device
def _did_timeout(self): bambou_logger.debug('Bambou %s on %s has timeout (timeout=%ss)..' % (self._request.method, self._request.url, self.timeout)) self._has_timeouted = True if self.async: self._callback(self) else: return self
Called when a resquest has timeout