code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def just_log(*texts, sep = ""): if config.silent: return text = _color_sep + "default" + _color_sep2 + sep.join(texts) array = text.split(_color_sep) for part in array: parts = part.split(_color_sep2, 1) if len(parts) != 2 or not parts[1]: continue if not config.color: print(parts[1], end='') else: colors.foreground(parts[0]) print(parts[1], end='', flush=colors.is_win32) if config.color: colors.foreground("default") print()
Log a text without adding the current time.
def remove_nesting(dom, tag_name): for node in dom.getElementsByTagName(tag_name): for ancestor in ancestors(node): if ancestor is node: continue if ancestor is dom.documentElement: break if ancestor.tagName == tag_name: unwrap(node) break
Unwrap items in the node list that have ancestors with the same tag.
def height(poly): num = len(poly) - 1 if abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]): return dist(poly[num], poly[0]) elif abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]): return dist(poly[1], poly[0]) else: return min(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
Height of a polygon poly
def clean_email(self): email = self.cleaned_data.get("email") qs = User.objects.exclude(id=self.instance.id).filter(email=email) if len(qs) == 0: return email raise forms.ValidationError( ugettext("This email is already registered"))
Ensure the email address is not already registered.
def __init_chunked_upload(self): headers = { 'x-ton-content-type': self.content_type, 'x-ton-content-length': str(self._file_size), 'x-ton-expires': http_time(self.options.get('x-ton-expires', self._DEFAULT_EXPIRE)), 'content-length': str(0), 'content-type': self.content_type } resource = "{0}{1}?resumable=true".format(self._DEFAULT_RESOURCE, self._DEFAULT_BUCKET) return Request(self._client, 'post', resource, domain=self._DEFAULT_DOMAIN, headers=headers).perform()
Initialization for a multi-chunk upload.
def guess_path_encoding(file_path, default=DEFAULT_ENCODING): with io.open(file_path, 'rb') as fh: return guess_file_encoding(fh, default=default)
Wrapper to open that damn file for you, lazy bastard.
def exit(self, signal=None, frame=None): self.input_channel.close() self.client_queue.close() self.connection.close() log.info("Worker exiting") sys.exit(0)
Properly close the AMQP connections
def execute(self, **kwargs): headers = self.header.copy() headers['soapaction'] = '%s data = self.envelope.strip() % self._body_builder(kwargs) url = 'http://%s:%s%s' % (self.address, self.port, self.control_url) auth = None if self.password: auth=HTTPDigestAuth(self.user, self.password) response = requests.post(url, data=data, headers=headers, auth=auth) result = self.parse_response(response.content) return result
Calls the FritzBox action and returns a dictionary with the arguments.
def sendgmail(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = 'kortemmelab@gmail.com', pw_filepath = None): smtpserver = smtplib.SMTP("smtp.gmail.com", 587) smtpserver.ehlo() smtpserver.starttls() smtpserver.ehlo gmail_account = 'kortemmelab@gmail.com' if pw_filepath: smtpserver.login(gmail_account, read_file(pw_filepath)) else: smtpserver.login(gmail_account, read_file('pw')) for recipient in recipients: if htmltext: msg = MIMEText(htmltext, 'html') msg['From'] = gmail_account msg['To'] = recipient msg['Subject'] = subject smtpserver.sendmail(gmail_account, recipient, msg.as_string()) else: header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n' msg = header + '\n ' + plaintext + '\n\n' smtpserver.sendmail(gmail_account, recipient, msg) smtpserver.close()
For this function to work, the password for the gmail user must be colocated with this file or passed in.
def value_loss(self, model, observations, discounted_rewards): value_outputs = model.value(observations) value_loss = 0.5 * F.mse_loss(value_outputs, discounted_rewards) return value_loss
Loss of value estimator
def substring_search(query, list_of_strings, limit_results=DEFAULT_LIMIT): matching = [] query_words = query.split(' ') query_words.sort(key=len, reverse=True) counter = 0 for s in list_of_strings: target_words = s.split(' ') if(anyword_substring_search(target_words, query_words)): matching.append(s) counter += 1 if(counter == limit_results): break return matching
main function to call for searching
def palindromic_substrings_iter(s): if not s: yield [] return for i in range(len(s), 0, -1): sub = s[:i] if sub == sub[::-1]: for rest in palindromic_substrings_iter(s[i:]): yield [sub] + rest
A slightly more Pythonic approach with a recursive generator
def copy(self): missing = object() result = object.__new__(self.__class__) for name in self.__slots__: val = getattr(self, name, missing) if val is not missing: setattr(result, name, val) return result
Create a flat copy of the dict.
def recentEvents(self): return Event.objects.filter( Q(pk__in=self.individualEvents.values_list('pk',flat=True)) | Q(session__in=self.eventSessions.all()) | Q(publicevent__category__in=self.eventCategories.all()) | Q(series__category__in=self.seriesCategories.all()) ).filter( Q(startTime__lte=timezone.now() + timedelta(days=60)) & Q(endTime__gte=timezone.now() - timedelta(days=60)) )
Get the set of recent and upcoming events to which this list applies.
def gethash(compiled): lines = compiled.splitlines() if len(lines) < 3 or not lines[2].startswith(hash_prefix): return None else: return lines[2][len(hash_prefix):]
Retrieve a hash from a header.
def output_image(gandi, image, datacenters, output_keys, justify=14, warn_deprecated=True): for key in output_keys: if key in image: if (key == 'label' and image['visibility'] == 'deprecated' and warn_deprecated): image[key] = '%s /!\ DEPRECATED' % image[key] output_line(gandi, key, image[key], justify) dc_name = 'Nowhere' if 'dc' in output_keys: for dc in datacenters: if dc['id'] == image['datacenter_id']: dc_name = dc.get('dc_code', dc.get('iso', '')) break output_line(gandi, 'datacenter', dc_name, justify)
Helper to output a disk image.
def _ref_bus_angle_constraint(self, buses, Va, xmin, xmax): refs = [bus._i for bus in buses if bus.type == REFERENCE] Varefs = array([b.v_angle for b in buses if b.type == REFERENCE]) xmin[Va.i1 - 1 + refs] = Varefs xmax[Va.iN - 1 + refs] = Varefs return xmin, xmax
Adds a constraint on the reference bus angles.
def _recv_timeout_loop(self): while self._detect_time: last_wait = time.time() self._lock = hub.Event() self._lock.wait(timeout=self._detect_time) if self._lock.is_set(): if getattr(self, "_auth_seq_known", 0): if last_wait > time.time() + 2 * self._detect_time: self._auth_seq_known = 0 else: LOG.info("[BFD][%s][RECV] BFD Session timed out.", hex(self._local_discr)) if self._session_state not in [bfd.BFD_STATE_DOWN, bfd.BFD_STATE_ADMIN_DOWN]: self._set_state(bfd.BFD_STATE_DOWN, bfd.BFD_DIAG_CTRL_DETECT_TIME_EXPIRED) if getattr(self, "_auth_seq_known", 0): self._auth_seq_known = 0
A loop to check timeout of receiving remote BFD packet.
def merge_bytes(binder_strings): output = None for byte_string in binder_strings: binder = Binder().from_bytes(byte_string) if output is None: output = binder else: output.merge(binder) return output.to_bytes()
Concatenate multiple serialized binders into one byte string.
def received_winch(self): def process_winch(): if self._callbacks: self._callbacks.terminal_size_changed() self.call_from_executor(process_winch)
Notify the event loop that SIGWINCH has been received
def accounts(self) -> AccountsAggregate: if not self.__accounts_aggregate: self.__accounts_aggregate = AccountsAggregate(self.book) return self.__accounts_aggregate
Returns the Accounts aggregate
def edate(ctx, date, months): return conversions.to_date_or_datetime(date, ctx) + relativedelta(months=conversions.to_integer(months, ctx))
Moves a date by the given number of months
def from_(cls, gsim): ltbranch = N('logicTreeBranch', {'branchID': 'b1'}, nodes=[N('uncertaintyModel', text=str(gsim)), N('uncertaintyWeight', text='1.0')]) lt = N('logicTree', {'logicTreeID': 'lt1'}, nodes=[N('logicTreeBranchingLevel', {'branchingLevelID': 'bl1'}, nodes=[N('logicTreeBranchSet', {'applyToTectonicRegionType': '*', 'branchSetID': 'bs1', 'uncertaintyType': 'gmpeModel'}, nodes=[ltbranch])])]) return cls(repr(gsim), ['*'], ltnode=lt)
Generate a trivial GsimLogicTree from a single GSIM instance.
def read(self, path, encoding=None): b = common.read(path) if encoding is None: encoding = self.file_encoding return self.unicode(b, encoding)
Read the template at the given path, and return it as a unicode string.
def getMaskIndices(mask): return [ list(mask).index(True), len(mask) - 1 - list(mask)[::-1].index(True) ]
get lower and upper index of mask
def update_context(app, pagename, templatename, context, doctree): if doctree is None: return visitor = _FindTabsDirectiveVisitor(doctree) doctree.walk(visitor) if not visitor.found_tabs_directive: paths = [posixpath.join('_static', 'sphinx_tabs/' + f) for f in FILES] if 'css_files' in context: context['css_files'] = context['css_files'][:] for path in paths: if path.endswith('.css'): context['css_files'].remove(path) if 'script_files' in context: context['script_files'] = context['script_files'][:] for path in paths: if path.endswith('.js'): context['script_files'].remove(path)
Remove sphinx-tabs CSS and JS asset files if not used in a page
def ftp_get(fin_src, fout): assert fin_src[:6] == 'ftp://', fin_src dir_full, fin_ftp = os.path.split(fin_src[6:]) pt0 = dir_full.find('/') assert pt0 != -1, pt0 ftphost = dir_full[:pt0] chg_dir = dir_full[pt0+1:] print('FTP RETR {HOST} {DIR} {SRC} -> {DST}'.format( HOST=ftphost, DIR=chg_dir, SRC=fin_ftp, DST=fout)) ftp = FTP(ftphost) ftp.login() ftp.cwd(chg_dir) cmd = 'RETR {F}'.format(F=fin_ftp) ftp.retrbinary(cmd, open(fout, 'wb').write) ftp.quit()
Download a file from an ftp server
def bound_elems(elems): group_x0 = min(map(lambda l: l.x0, elems)) group_y0 = min(map(lambda l: l.y0, elems)) group_x1 = max(map(lambda l: l.x1, elems)) group_y1 = max(map(lambda l: l.y1, elems)) return (group_x0, group_y0, group_x1, group_y1)
Finds the minimal bbox that contains all given elems
def _parse_uri_ssh(unt): if '@' in unt.netloc: user, host_port = unt.netloc.split('@', 1) else: user, host_port = None, unt.netloc if ':' in host_port: host, port = host_port.split(':', 1) else: host, port = host_port, None if not user: user = None if not port: port = smart_open_ssh.DEFAULT_PORT else: port = int(port) return Uri(scheme=unt.scheme, uri_path=unt.path, user=user, host=host, port=port)
Parse a Uri from a urllib namedtuple.
def initialize_logging(args): log_handler = logging.StreamHandler() log_formatter = logging.Formatter( "%(levelname)s %(asctime)s %(name)s:%(lineno)04d - %(message)s") log_handler.setFormatter(log_formatter) root_logger = logging.getLogger() root_logger.addHandler(log_handler) root_logger.setLevel(getattr(logging, args.loglevel))
Configure the root logger with some sensible defaults.
def Handle(self, args, token=None): if data_store.RelationalDBEnabled(): flow_iterator = iteritems(registry.FlowRegistry.FLOW_REGISTRY) else: flow_iterator = iteritems(registry.AFF4FlowRegistry.FLOW_REGISTRY) result = [] for name, cls in sorted(flow_iterator): if not getattr(cls, "category", None): continue try: if self.access_check_fn: self.access_check_fn(token.username, name) except access_control.UnauthorizedAccess: continue result.append(ApiFlowDescriptor().InitFromFlowClass(cls, token=token)) return ApiListFlowDescriptorsResult(items=result)
Renders list of descriptors for all the flows.
def validate_metadata(self, handler): if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if (new_metadata is not None and cur_metadata is not None and not array_equivalent(new_metadata, cur_metadata)): raise ValueError("cannot append a categorical with " "different categories to the existing")
validate that kind=category does not change the categories
def extract_emails(text): text = text.replace(u'\u2024', '.') emails = [] for m in EMAIL_RE.findall(text): emails.append(m[0]) return emails
Return a list of email addresses extracted from the string.
def _print_code(self, code): for key in self.variables.keys(): for arg in self.variables[key]: code = code.replace(arg.name, 'self.'+arg.name) return code
Prepare code for string writing.
def mark_for_update(self): self.pub_statuses.exclude(status=UNPUBLISHED).update(status=NEEDS_UPDATE) push_key.delay(self)
Note that a change has been made so all Statuses need update
def find_template(self, name): deftemplate = lib.EnvFindDeftemplate(self._env, name.encode()) if deftemplate == ffi.NULL: raise LookupError("Template '%s' not found" % name) return Template(self._env, deftemplate)
Find the Template by its name.
def read(self, address, size): value = 0x0 for i in range(0, size): value |= self._read_byte(address + i) << (i * 8) return value
Read arbitrary size content from memory.
def format_who_when(fields): offset = fields[3] if offset < 0: offset_sign = b'-' offset = abs(offset) else: offset_sign = b'+' offset_hours = offset // 3600 offset_minutes = offset // 60 - offset_hours * 60 offset_str = offset_sign + ('%02d%02d' % (offset_hours, offset_minutes)).encode('ascii') name = fields[0] if name == b'': sep = b'' else: sep = b' ' name = utf8_bytes_string(name) email = fields[1] email = utf8_bytes_string(email) return b''.join((name, sep, b'<', email, b'> ', ("%d" % fields[2]).encode('ascii'), b' ', offset_str))
Format a tuple of name,email,secs-since-epoch,utc-offset-secs as a string.
def __set_config_value(self, key, value): self.check_owner() params = {"room": self.room_id, "config": to_json({key: value})} resp = self.conn.make_api_call("setRoomConfig", params) if "error" in resp: raise RuntimeError(f"{resp['error'].get('message') or resp['error']}") return resp
Sets a value for a room config
def makeAla(segID, N, CA, C, O, geo): CA_CB_length=geo.CA_CB_length C_CA_CB_angle=geo.C_CA_CB_angle N_C_CA_CB_diangle=geo.N_C_CA_CB_diangle carbon_b= calculateCoordinates(N, C, CA, CA_CB_length, C_CA_CB_angle, N_C_CA_CB_diangle) CB= Atom("CB", carbon_b, 0.0 , 1.0, " "," CB", 0,"C") res = Residue((' ', segID, ' '), "ALA", ' ') res.add(N) res.add(CA) res.add(C) res.add(O) res.add(CB) return res
Creates an Alanine residue
def merge(self, other): new_node = self.copy() new_node.size += other.size new_node.instruction_addrs += other.instruction_addrs if new_node.byte_string is None or other.byte_string is None: new_node.byte_string = None else: new_node.byte_string += other.byte_string return new_node
Merges this node with the other, returning a new node that spans the both.
def read(self, size=-1): if size == 0: return b'' elif size < 0: from_buf = self._read_from_buffer() self._current_pos = self._content_length return from_buf + self._raw_reader.read() if len(self._buffer) >= size: return self._read_from_buffer(size) if self._eof: return self._read_from_buffer() self._fill_buffer(size) return self._read_from_buffer(size)
Read up to size bytes from the object and return them.
def _parse_xfs_info(data): ret = {} spr = re.compile(r'\s+') entry = None for line in [spr.sub(" ", l).strip().replace(", ", " ") for l in data.split("\n")]: if not line: continue nfo = _xfs_info_get_kv(line) if not line.startswith("="): entry = nfo.pop(0) ret[entry[0]] = {'section': entry[(entry[1] != '***' and 1 or 0)]} ret[entry[0]].update(dict(nfo)) return ret
Parse output from "xfs_info" or "xfs_growfs -n".
def _match_type(self, i): self.col_match = self.RE_TYPE.match(self._source[i]) if self.col_match is not None: self.section = "types" self.el_type = CustomType self.el_name = self.col_match.group("name") return True else: return False
Looks at line 'i' to see if the line matches a module user type def.
def build_github_url( repo, branch=None, path='requirements.txt', token=None ): repo = re.sub(r"^http(s)?://github.com/", "", repo).strip('/') if not path: path = 'requirements.txt' if not branch: branch = get_default_branch(repo) url = 'https://raw.githubusercontent.com/{}/{}/{}'.format( repo, branch, path ) if token: url = '{}?token={}'.format(url, token) return url
Builds a URL to a file inside a Github repository.
def _tseitin(ex, auxvarname, auxvars=None): if isinstance(ex, Literal): return ex, list() else: if auxvars is None: auxvars = list() lits = list() constraints = list() for x in ex.xs: lit, subcons = _tseitin(x, auxvarname, auxvars) lits.append(lit) constraints.extend(subcons) auxvarindex = len(auxvars) auxvar = exprvar(auxvarname, auxvarindex) auxvars.append(auxvar) f = ASTOPS[ex.ASTOP](*lits) constraints.append((auxvar, f)) return auxvar, constraints
Convert a factored expression to a literal, and a list of constraints.
def update_pop(self): candidates = [] for ind in self.population: candidates.append(self.crossover(ind)) self._params['model_count'] += len(candidates) self.assign_fitnesses(candidates) for i in range(len(self.population)): if candidates[i].fitness > self.population[i].fitness: self.population[i] = candidates[i]
Updates the population according to crossover and fitness criteria.
def _init_org(self): self.logger.info( "Verifying and refreshing credentials for the specified org: {}.".format( self.org_config.name ) ) orig_config = self.org_config.config.copy() self.org_config.refresh_oauth_token(self.project_config.keychain) if self.org_config.config != orig_config: self.logger.info("Org info has changed, updating org in keychain") self.project_config.keychain.set_org(self.org_config)
Test and refresh credentials to the org specified.
def load_adjusted_array(self, domain, columns, dates, sids, mask): out = {} for col in columns: try: loader = self._loaders.get(col) if loader is None: loader = self._loaders[col.unspecialize()] except KeyError: raise ValueError("Couldn't find loader for %s" % col) out.update( loader.load_adjusted_array(domain, [col], dates, sids, mask) ) return out
Load by delegating to sub-loaders.
def simplices(self): return [Simplex([self.points[i] for i in v]) for v in self.vertices]
Returns the simplices of the triangulation.
def _get_package_path(self): if not self.package: return [] if not hasattr(self, 'package_path'): m = __import__(self.package) parts = self.package.split('.')[1:] self.package_path = os.path.join(os.path.dirname(m.__file__), *parts) return [self.package_path]
Gets the path of a Python package
def even_mults(start:float, stop:float, n:int)->np.ndarray: "Build log-stepped array from `start` to `stop` in `n` steps." mult = stop/start step = mult**(1/(n-1)) return np.array([start*(step**i) for i in range(n)])
Build log-stepped array from `start` to `stop` in `n` steps.
def staticdir(): root = os.path.abspath(os.path.dirname(__file__)) return os.path.join(root, "static")
Return the location of the static data directory.
def visit_annassign(self, node): target = node.target.accept(self) annotation = node.annotation.accept(self) if node.value is None: return "%s: %s" % (target, annotation) return "%s: %s = %s" % (target, annotation, node.value.accept(self))
Return an astroid.AugAssign node as string
def handle_presentation(msg): if msg.child_id == SYSTEM_CHILD_ID: sensorid = msg.gateway.add_sensor(msg.node_id) if sensorid is None: return None msg.gateway.sensors[msg.node_id].type = msg.sub_type msg.gateway.sensors[msg.node_id].protocol_version = msg.payload msg.gateway.sensors[msg.node_id].reboot = False msg.gateway.alert(msg) return msg if not msg.gateway.is_sensor(msg.node_id): _LOGGER.error('Node %s is unknown, will not add child %s', msg.node_id, msg.child_id) return None child_id = msg.gateway.sensors[msg.node_id].add_child_sensor( msg.child_id, msg.sub_type, msg.payload) if child_id is None: return None msg.gateway.alert(msg) return msg
Process a presentation message.
def raw_path_qs(self): if not self.raw_query_string: return self.raw_path return "{}?{}".format(self.raw_path, self.raw_query_string)
Encoded path of URL with query.
def confusion_matrix(self): confusion_matrix = self.pixel_classification_sum.astype(np.float) confusion_matrix = np.divide(confusion_matrix.T, self.pixel_truth_sum.T).T return confusion_matrix * 100.0
Returns the normalised confusion matrix
def read_mm_header(fh, byteorder, dtype, count, offsetsize): mmh = fh.read_record(TIFF.MM_HEADER, byteorder=byteorder) mmh = recarray2dict(mmh) mmh['Dimensions'] = [ (bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) for d in mmh['Dimensions']] d = mmh['GrayChannel'] mmh['GrayChannel'] = ( bytes2str(d[0]).strip(), d[1], d[2], d[3], bytes2str(d[4]).strip()) return mmh
Read FluoView mm_header tag from file and return as dict.
def delete_all_secrets(cls, user, client_id): can_delete = yield cls(client_id=client_id).can_delete(user) if not can_delete: raise exceptions.Unauthorized('User may not delete {} secrets' .format(client_id)) results = yield cls.view.get(key=client_id, include_docs=True) if results['rows']: db = cls.db_client() docs = [{ '_rev': doc['doc']['_rev'], '_id': doc['doc']['_id'], '_deleted': True } for doc in results['rows']] yield db.save_docs(docs)
Delete all of the client's credentials
def extract_stats(neurons, config): stats = defaultdict(dict) for ns, modes in config['neurite'].items(): for n in config['neurite_type']: n = _NEURITE_MAP[n] for mode in modes: stat_name = _stat_name(ns, mode) stat = eval_stats(nm.get(ns, neurons, neurite_type=n), mode) if stat is None or not stat.shape: stats[n.name][stat_name] = stat else: assert stat.shape in ((3, ), ), \ 'Statistic must create a 1x3 result' for i, suffix in enumerate('XYZ'): compound_stat_name = stat_name + '_' + suffix stats[n.name][compound_stat_name] = stat[i] for ns, modes in config['neuron'].items(): for mode in modes: stat_name = _stat_name(ns, mode) stats[stat_name] = eval_stats(nm.get(ns, neurons), mode) return stats
Extract stats from neurons
def _match(self, regex): cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match raise Exception('No "{0}" line in {1}.cpp'.format( regex_to_error_msg(regex), self.name ))
Find the first line matching regex and return the match object
def run(self): self.count += 1 print('FailTwicePlug: Run number %s' % (self.count)) if self.count < 3: raise RuntimeError('Fails a couple times') return True
Increments counter and raises an exception for first two runs.
def write_flash(self, addr, page_buffer, target_page, page_count): pk = None pk = self.link.receive_packet(0) while pk is not None: pk = self.link.receive_packet(0) retry_counter = 5 while ((not pk or pk.header != 0xFF or struct.unpack('<BB', pk.data[0:2]) != (addr, 0x18)) and retry_counter >= 0): pk = CRTPPacket() pk.set_header(0xFF, 0xFF) pk.data = struct.pack('<BBHHH', addr, 0x18, page_buffer, target_page, page_count) self.link.send_packet(pk) pk = self.link.receive_packet(1) retry_counter -= 1 if retry_counter < 0: self.error_code = -1 return False self.error_code = pk.data[3] return pk.data[2] == 1
Initiate flashing of data in the buffer to flash.
def create_hooks(use_tfdbg=False, use_dbgprofile=False, dbgprofile_kwargs=None, use_validation_monitor=False, validation_monitor_kwargs=None, use_early_stopping=False, early_stopping_kwargs=None): train_hooks = [] eval_hooks = [] if use_tfdbg: hook = debug.LocalCLIDebugHook() train_hooks.append(hook) eval_hooks.append(hook) if use_dbgprofile: tf.logging.info("Using ProfilerHook") defaults = dict(save_steps=10, show_dataflow=True, show_memory=True) defaults.update(dbgprofile_kwargs) train_hooks.append(tf.train.ProfilerHook(**defaults)) if use_validation_monitor: tf.logging.info("Using ValidationMonitor") train_hooks.append( tf.contrib.learn.monitors.ValidationMonitor( hooks=eval_hooks, **validation_monitor_kwargs)) if use_early_stopping: tf.logging.info("Using EarlyStoppingHook") hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs) train_hooks.append(hook) eval_hooks.append(hook) return train_hooks, eval_hooks
Create train and eval hooks for Experiment.
def FilePattern(pattern, settings, **kwargs): url = _urlparse(pattern) if url.scheme == 'gs': return GoogleStorageFilePattern(pattern, settings, **kwargs) else: assert url.scheme == 'file' return LocalFilePattern(pattern, settings, **kwargs)
Factory method returns LocalFilePattern or GoogleStorageFilePattern
def create_index(self): self.reset() counter = 0 pre_time = time.time() while True: if counter % 1000 == 0: cur_time = time.time() print('time:', cur_time - pre_time, ' count:', counter) pos = self.tell() cont = self.read() if cont is None: break key = self.key_type(counter) self.fidx.write('%s\t%d\n'%(str(key), pos)) counter = counter + 1
Creates the index file from open record file
def _production(self): return self._nuclear + self._diesel + self._gas + self._wind + self._combined + self._vapor + self._solar + self._hydraulic + self._carbon + self._waste + self._other
Calculate total energy production. Not rounded
def _set(self, value): super(AttachmentsField, self)._set(value) self._cursor = None
Override setter, allow clearing cursor
def _load_config_file(self): LOGGER.info('Loading configuration from %s', self._file_path) if self._file_path.endswith('json'): config = self._load_json_config() else: config = self._load_yaml_config() for key, value in [(k, v) for k, v in config.items()]: if key.title() != key: config[key.title()] = value del config[key] return flatdict.FlatDict(config)
Load the configuration file into memory, returning the content.
def _requested_name(self, name, action=None, func=None): if name is not None: if name in self._used_names: n = 2 while True: pn = name + '_' + str(n) if pn not in self._used_names: self._used_names.add(pn) return pn n += 1 else: self._used_names.add(name) return name if func is not None: if hasattr(func, '__name__'): name = func.__name__ if name == '<lambda>': name = action + '_lambda' elif hasattr(func, '__class__'): name = func.__class__.__name__ if name is None: if action is not None: name = action else: name = self.name return self._requested_name(name)
Create a unique name for an operator or a stream.
def _get_comments_to_export(self, last_export_id=None): qs = comments.get_model().objects.order_by('pk')\ .filter(is_public=True, is_removed=False) if last_export_id is not None: print("Resuming after comment %s" % str(last_export_id)) qs = qs.filter(id__gt=last_export_id) return qs
Return comments which should be exported.
def _git_dir(repo, path): name = "%s" % (path,) if name in ['HEAD', 'ORIG_HEAD', 'FETCH_HEAD', 'index', 'logs']: return repo.git_dir return repo.common_dir
Find the git dir that's appropriate for the path
def _center_window(self, result, window): if self.axis > result.ndim - 1: raise ValueError("Requested axis is larger then no. of argument " "dimensions") offset = _offset(window, True) if offset > 0: if isinstance(result, (ABCSeries, ABCDataFrame)): result = result.slice_shift(-offset, axis=self.axis) else: lead_indexer = [slice(None)] * result.ndim lead_indexer[self.axis] = slice(offset, None) result = np.copy(result[tuple(lead_indexer)]) return result
Center the result in the window.
def _put(self, uri, data): headers = self._get_headers() logging.debug("URI=" + str(uri)) logging.debug("BODY=" + json.dumps(data)) response = self.session.put(uri, headers=headers, data=json.dumps(data)) if response.status_code in [201, 204]: return data else: logging.error(response.content) response.raise_for_status()
Simple PUT operation for a given path.
def _toggle_term_protect(name, value): instance_id = _get_node(name)['instanceId'] params = {'Action': 'ModifyInstanceAttribute', 'InstanceId': instance_id, 'DisableApiTermination.Value': value} result = aws.query(params, location=get_location(), provider=get_provider(), return_root=True, opts=__opts__, sigver='4') return show_term_protect(name=name, instance_id=instance_id, call='action')
Enable or Disable termination protection on a node
def classify(self, table, weighted_choice=False, transform=None): assert table.shape[1] == self.numgrp if weighted_choice: if transform is not None: probs = transform_fn(table.copy(), transform) else: probs = table.copy() cmprobs = probs.cumsum(1) logger.info('Probabilities\n{}'.format(probs)) r = np.random.random(cmprobs.shape[0]) search = np.apply_along_axis(np.searchsorted, 1, cmprobs, r) assignment = np.diag(search) else: probs = table assignment = np.where(probs==probs.max(1)[:, np.newaxis])[1] logger.info('Assignment\n{}'.format(assignment)) assignment = self._fill_empty_groups(probs, assignment) new_partition = Partition(tuple(assignment)) self.set_partition(new_partition)
The Classification step of the CEM algorithm
def _validate_operator_name(operator, supported_operators): if not isinstance(operator, six.text_type): raise TypeError(u'Expected operator as unicode string, got: {} {}'.format( type(operator).__name__, operator)) if operator not in supported_operators: raise GraphQLCompilationError(u'Unrecognized operator: {}'.format(operator))
Ensure the named operator is valid and supported.
def sameAddr(self, ha, ha2) -> bool: if ha == ha2: return True if ha[1] != ha2[1]: return False return ha[0] in self.localips and ha2[0] in self.localips
Check whether the two arguments correspond to the same address
def union(self, other): return Interval(min(self.low, other.low), max(self.high, other.high))
Intersect current range with other.
def tags(self): tags = self.workbench.get_all_tags() if not tags: return tag_df = pd.DataFrame(tags) tag_df = self.vectorize(tag_df, 'tags') print '\n%sSamples in Database%s' % (color.LightPurple, color.Normal) self.top_corr(tag_df)
Display tag information for all samples in database
def extract_metrics(self, metrics_files): extension_maps = dict( align_metrics=(self._parse_align_metrics, "AL"), dup_metrics=(self._parse_dup_metrics, "DUP"), hs_metrics=(self._parse_hybrid_metrics, "HS"), insert_metrics=(self._parse_insert_metrics, "INS"), rnaseq_metrics=(self._parse_rnaseq_metrics, "RNA")) all_metrics = dict() for fname in metrics_files: ext = os.path.splitext(fname)[-1][1:] try: parse_fn, prefix = extension_maps[ext] except KeyError: parse_fn = None if parse_fn: with open(fname) as in_handle: for key, val in parse_fn(in_handle).items(): if not key.startswith(prefix): key = "%s_%s" % (prefix, key) all_metrics[key] = val return all_metrics
Return summary information for a lane of metrics files.
def total_write_throughput(self): total = self.write_throughput for index in itervalues(self.global_indexes): total += index.write_throughput return total
Combined write throughput of table and global indexes
def dedent(s): head, _, tail = s.partition('\n') dedented_tail = textwrap.dedent(tail) result = "{head}\n{tail}".format( head=head, tail=dedented_tail) return result
Removes the hanging dedent from all the first line of a string.
def mtf_unitransformer_base(): hparams = mtf_transformer2_base() hparams.add_hparam("autoregressive", True) hparams.add_hparam("layers", ["self_att", "drd"] * 6) hparams.add_hparam("num_heads", 8) hparams.add_hparam("num_memory_heads", 0) hparams.add_hparam("shared_kv", False) hparams.add_hparam("local_attention_radius", 128) return hparams
Hyperparameters for single-stack Transformer.
def nonwhitelisted_allowed_principals(self, whitelist=None): if not whitelist: return [] nonwhitelisted = [] for statement in self.statements: if statement.non_whitelisted_principals(whitelist) and statement.effect == "Allow": nonwhitelisted.append(statement) return nonwhitelisted
Find non whitelisted allowed principals.
def process(self, makeGlyphs=True, makeKerning=True, makeInfo=True): if self.logger: self.logger.info("Reading %s", self.path) self.readInstances(makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo) self.reportProgress("done", 'stop')
Process the input file and generate the instances.
def declare_namespace(packageName): _imp.acquire_lock() try: if packageName in _namespace_packages: return path = sys.path parent, _, _ = packageName.rpartition('.') if parent: declare_namespace(parent) if parent not in _namespace_packages: __import__(parent) try: path = sys.modules[parent].__path__ except AttributeError: raise TypeError("Not a package:", parent) _namespace_packages.setdefault(parent or None, []).append(packageName) _namespace_packages.setdefault(packageName, []) for path_item in path: _handle_ns(packageName, path_item) finally: _imp.release_lock()
Declare that package 'packageName' is a namespace package
def add_menu(self, name, link=None): if self.menu_began: if self.menu_separator_tag: self.write(self.menu_separator_tag) else: self.write('<ul class="horizontal">') self.menu_began = True self.write('<li>') if link: self.write('<a href="{}">', self._rel(link)) self.write(name) if link: self.write('</a>') self.write('</li>')
Adds a menu entry, will create it if it doesn't exist yet
def _convertTZ(self): tz = timezone.get_current_timezone() dtstart = self['DTSTART'] dtend = self['DTEND'] if dtstart.zone() == "UTC": dtstart.dt = dtstart.dt.astimezone(tz) if dtend.zone() == "UTC": dtend.dt = dtend.dt.astimezone(tz)
Will convert UTC datetimes to the current local timezone
def include_raw_constructor(self, loader, node): path = convert_path(node.value) with open(path, 'r') as f: config = f.read() config = self.inject_include_info(path, config, include_type='include-raw') self.add_file(path, config) return config
Called when PyYaml encounters '!include-raw'
def strip_ip_port(ip_address): if '.' in ip_address: cleaned_ip = ip_address.split(':')[0] elif ']:' in ip_address: cleaned_ip = ip_address.rpartition(':')[0][1:-1] else: cleaned_ip = ip_address return cleaned_ip
Strips the port from an IPv4 or IPv6 address, returns a unicode object.
def _get_cu_and_fu_status(self): headers = HEADERS.copy() headers['Accept'] = '*/*' headers['X-Requested-With'] = 'XMLHttpRequest' headers['X-CSRFToken'] = self._parent.csrftoken args = '?controller_serial=' + self.serial \ + '&faucet_serial=' + self.faucet.serial req = self._parent.client.get(STATUS_ENDPOINT + args, headers=headers) if req.status_code == 403: self._parent.login() self.update() elif req.status_code == 200: self.attributes = req.json() else: req.raise_for_status()
Submit GET request to update information.
def retract(self, e, a, v): ta = datetime.datetime.now() ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v)) rs = self.tx(ret) tb = datetime.datetime.now() - ta print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan') return rs
redact the value of an attribute
def populateFromRow(self, callSetRecord): self._biosampleId = callSetRecord.biosampleid self.setAttributesJson(callSetRecord.attributes)
Populates this CallSet from the specified DB row.
def parse_output(self, line): try: key, value = line.split(":") self.update_value(key.strip(), value.strip()) except ValueError: pass
Convert output to key value pairs
def combine_recs(rec_list, key): final_recs = {} for rec in rec_list: rec_key = rec[key] if rec_key in final_recs: for k, v in rec.iteritems(): if k in final_recs[rec_key] and final_recs[rec_key][k] != v: raise Exception("Mis-match for key '%s'" % k) final_recs[rec_key][k] = v else: final_recs[rec_key] = rec return final_recs.values()
Use a common key to combine a list of recs
def worker(namespace, name, branch='master'): with repository(namespace, name, branch) as (path, latest, cache): if cache.get(latest, None) and json.loads(cache[latest])['status'] != 'starting': return 'Build already started' data = {'status': 'in_progress', 'result': ''} cache[latest] = json.dumps(data) test_output = tox(_iter=True) try: for line in test_output: data['result'] += line cache[latest] = json.dumps(data) data['build_success'] = True except Exception: data['build_success'] = False data['status'] = 'complete' cache[latest] = json.dumps(data) return 'Build successfully completed'
The simple_ci background worker process
def possible_completions(self, e): u completions = self._get_completions() self._display_completions(completions) self.finalize()
u"""List the possible completions of the text before point.
def strictly_increasing(values): return all(x < y for x, y in zip(values, values[1:]))
True if values are stricly increasing.
def cli(env): table = formatting.Table([ 'Id', 'Name', 'Created', 'Expiration', 'Status', 'Package Name', 'Package Id' ]) table.align['Name'] = 'l' table.align['Package Name'] = 'r' table.align['Package Id'] = 'l' manager = ordering.OrderingManager(env.client) items = manager.get_quotes() for item in items: package = item['order']['items'][0]['package'] table.add_row([ item.get('id'), item.get('name'), clean_time(item.get('createDate')), clean_time(item.get('modifyDate')), item.get('status'), package.get('keyName'), package.get('id') ]) env.fout(table)
List all active quotes on an account