text
stringlengths
78
104k
score
float64
0
0.18
def primary_avatar(user, size=AVATAR_DEFAULT_SIZE): """ This tag tries to get the default avatar for a user without doing any db requests. It achieve this by linking to a special view that will do all the work for us. If that special view is then cached by a CDN for instance, we will avoid many db calls. """ alt = unicode(user) url = reverse('avatar_render_primary', kwargs={'user' : user, 'size' : size}) return """<img src="%s" alt="%s" />""" % (url, alt, )
0.011858
def apply_trans_rot(ampal, translation, angle, axis, point, radians=False): """Applies a translation and rotation to an AMPAL object.""" if not numpy.isclose(angle, 0.0): ampal.rotate(angle=angle, axis=axis, point=point, radians=radians) ampal.translate(vector=translation) return
0.003289
def ximshow_unrectified(self, slitlet2d): """Display unrectified image with spectrails and frontiers. Parameters ---------- slitlet2d : numpy array Array containing the unrectified slitlet image. """ title = "Slitlet#" + str(self.islitlet) ax = ximshow(slitlet2d, title=title, first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig), show=False) xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1) ylower = self.list_spectrails[0](xdum) ax.plot(xdum, ylower, 'b-') ymiddle = self.list_spectrails[1](xdum) ax.plot(xdum, ymiddle, 'b--') yupper = self.list_spectrails[2](xdum) ax.plot(xdum, yupper, 'b-') ylower_frontier = self.list_frontiers[0](xdum) ax.plot(xdum, ylower_frontier, 'b:') yupper_frontier = self.list_frontiers[1](xdum) ax.plot(xdum, yupper_frontier, 'b:') pause_debugplot(debugplot=self.debugplot, pltshow=True)
0.001947
def gen_toy(f, nsample, bound, accuracy=10000, quiet=True, **kwd): """ generate ntoy :param f: :param nsample: :param ntoy: :param bound: :param accuracy: :param quiet: :param kwd: the rest of keyword argument will be passed to f :return: numpy.ndarray """ # based on inverting cdf this is fast but you will need to give it a reasonable range # unlike roofit which is based on accept reject vnames = describe(f) if not quiet: print(vnames) my_arg = [kwd[v] for v in vnames[1:]] # random number # if accuracy is None: accuracy=10*numtoys r = npr.random_sample(nsample) x = np.linspace(bound[0], bound[1], accuracy) pdf = _vector_apply(f, x, tuple(my_arg)) cdf = compute_cdf(pdf, x) if cdf[-1] < 0.01: warn(SmallIntegralWarning('Integral for given funcition is' ' really low. Did you give it a reasonable range?')) cdfnorm = cdf[-1] cdf /= cdfnorm # now convert that to toy ret = invert_cdf(r, cdf, x) if not quiet: # move this to plotting from matplotlib import pyplot as plt plt.figure() plt.title('comparison') numbin = 100 h, e = np.histogram(ret, bins=numbin) mp = (e[1:] + e[:-1]) / 2. err = np.sqrt(h) plt.errorbar(mp, h, err, fmt='.b') bw = e[1] - e[0] y = pdf * len(ret) / cdfnorm * bw ylow = y + np.sqrt(y) yhigh = y - np.sqrt(y) plt.plot(x, y, label='pdf', color='r') plt.fill_between(x, yhigh, ylow, color='g', alpha=0.2) plt.grid(True) plt.xlim(bound) plt.ylim(ymin=0) return ret
0.001762
def returner(load): ''' Return data to couchbase bucket ''' cb_ = _get_connection() hn_key = '{0}/{1}'.format(load['jid'], load['id']) try: ret_doc = {'return': load['return'], 'full_ret': salt.utils.json.dumps(load)} cb_.add(hn_key, ret_doc, ttl=_get_ttl(), ) except couchbase.exceptions.KeyExistsError: log.error( 'An extra return was detected from minion %s, please verify ' 'the minion, this could be a replay attack', load['id'] ) return False
0.006601
def get_language_tabs(self): """ Determine the language tabs to show. """ current_language = self.get_current_language() if self.object: available_languages = list(self.object.get_available_languages()) else: available_languages = [] return get_language_tabs(self.request, current_language, available_languages)
0.007653
def login(self, user, password, exe_path, comm_password=None, **kwargs): """ :param user: 用户名 :param password: 密码 :param exe_path: 客户端路径, 类似 :param comm_password: :param kwargs: :return: """ if comm_password is None: raise ValueError("华泰必须设置通讯密码") try: self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=1 ) # pylint: disable=broad-except except Exception: self._app = pywinauto.Application().start(exe_path) # wait login window ready while True: try: self._app.top_window().Edit1.wait("ready") break except RuntimeError: pass self._app.top_window().Edit1.type_keys(user) self._app.top_window().Edit2.type_keys(password) self._app.top_window().Edit3.type_keys(comm_password) self._app.top_window().button0.click() # detect login is success or not self._app.top_window().wait_not("exists", 10) self._app = pywinauto.Application().connect( path=self._run_exe_path(exe_path), timeout=10 ) self._close_prompt_windows() self._main = self._app.window(title="网上股票交易系统5.0")
0.001375
def _get_variants(data): """Retrieve variants from CWL and standard inputs for organizing variants. """ active_vs = [] if "variants" in data: variants = data["variants"] # CWL based list of variants if isinstance(variants, dict) and "samples" in variants: variants = variants["samples"] for v in variants: # CWL -- a single variant file if isinstance(v, six.string_types) and os.path.exists(v): active_vs.append(_add_filename_details(v)) elif (isinstance(v, (list, tuple)) and len(v) > 0 and isinstance(v[0], six.string_types) and os.path.exists(v[0])): for subv in v: active_vs.append(_add_filename_details(subv)) elif isinstance(v, dict): if v.get("vrn_file"): active_vs.append(v) elif v.get("population"): vrnfile = v.get("population").get("vcf") active_vs.append(_add_filename_details(vrnfile)) elif v.get("vcf"): active_vs.append(_add_filename_details(v.get("vcf"))) return active_vs
0.000835
def rinse_rpnexp(self, rpnexp, rpndict): """ replace valid keyword of rpnexp from rpndict e.g. rpnexp = 'b a /', rpndict = {'b': 10} then after rinsing, rpnexp = '10 a /' return rinsed rpnexp """ for wd in rpnexp.split(): if wd in rpndict: try: val = float(rpndict[wd]) rpnexp = rpnexp.replace(wd, str(val)) except: pass return rpnexp
0.005917
def add(self, name='', type='', agent='', scanner='', location='', language='en', *args, **kwargs): """ Simplified add for the most common options. Parameters: name (str): Name of the library agent (str): Example com.plexapp.agents.imdb type (str): movie, show, # check me location (str): /path/to/files language (str): Two letter language fx en kwargs (dict): Advanced options should be passed as a dict. where the id is the key. **Photo Preferences** * **agent** (str): com.plexapp.agents.none * **enableAutoPhotoTags** (bool): Tag photos. Default value false. * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Photo Scanner **Movie Preferences** * **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner **IMDB Movie Options** (com.plexapp.agents.imdb) * **title** (bool): Localized titles. Default value false. * **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true. * **only_trailers** (bool): Skip extras which aren't trailers. Default value false. * **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false. * **native_subs** (bool): Include extras with subtitles in Library language. Default value false. * **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **ratings** (int): Ratings Source, Default value 0 Possible options: 0:Rotten Tomatoes, 1:IMDb, 2:The Movie Database. * **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **country** (int): Default value 46 Possible options 0:Argentina, 1:Australia, 2:Austria, 3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica, 11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela. * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **usage** (bool): Send anonymous usage data to Plex. Default value true. **TheMovieDB Movie Options** (com.plexapp.agents.themoviedb) * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **country** (int): Country (used for release date and content rating). Default value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal, 36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. **Show Preferences** * **agent** (str): com.plexapp.agents.none, com.plexapp.agents.thetvdb, com.plexapp.agents.themoviedb * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **episodeSort** (int): Episode order. Default -1 Possible options: 0:Oldest first, 1:Newest first. * **flattenSeasons** (int): Seasons. Default value 0 Possible options: 0:Show,1:Hide. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Series Scanner **TheTVDB Show Options** (com.plexapp.agents.thetvdb) * **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true. * **native_subs** (bool): Include extras with subtitles in Library language. Default value false. **TheMovieDB Show Options** (com.plexapp.agents.themoviedb) * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **country** (int): Country (used for release date and content rating). Default value 47 options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal, 36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. **Other Video Preferences** * **agent** (str): com.plexapp.agents.none, com.plexapp.agents.imdb, com.plexapp.agents.themoviedb * **enableBIFGeneration** (bool): Enable video preview thumbnails. Default value true. * **enableCinemaTrailers** (bool): Enable Cinema Trailers. Default value true. * **includeInGlobal** (bool): Include in dashboard. Default value true. * **scanner** (str): Plex Movie Scanner, Plex Video Files Scanner **IMDB Other Video Options** (com.plexapp.agents.imdb) * **title** (bool): Localized titles. Default value false. * **extras** (bool): Find trailers and extras automatically (Plex Pass required). Default value true. * **only_trailers** (bool): Skip extras which aren't trailers. Default value false. * **redband** (bool): Use red band (restricted audiences) trailers when available. Default value false. * **native_subs** (bool): Include extras with subtitles in Library language. Default value false. * **cast_list** (int): Cast List Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **ratings** (int): Ratings Source Default value 0 Possible options: 0:Rotten Tomatoes,1:IMDb,2:The Movie Database. * **summary** (int): Plot Summary Source: Default value 1 Possible options: 0:IMDb,1:The Movie Database. * **country** (int): Country: Default value 46 Possible options: 0:Argentina, 1:Australia, 2:Austria, 3:Belgium, 4:Belize, 5:Bolivia, 6:Brazil, 7:Canada, 8:Chile, 9:Colombia, 10:Costa Rica, 11:Czech Republic, 12:Denmark, 13:Dominican Republic, 14:Ecuador, 15:El Salvador, 16:France, 17:Germany, 18:Guatemala, 19:Honduras, 20:Hong Kong SAR, 21:Ireland, 22:Italy, 23:Jamaica, 24:Korea, 25:Liechtenstein, 26:Luxembourg, 27:Mexico, 28:Netherlands, 29:New Zealand, 30:Nicaragua, 31:Panama, 32:Paraguay, 33:Peru, 34:Portugal, 35:Peoples Republic of China, 36:Puerto Rico, 37:Russia, 38:Singapore, 39:South Africa, 40:Spain, 41:Sweden, 42:Switzerland, 43:Taiwan, 44:Trinidad, 45:United Kingdom, 46:United States, 47:Uruguay, 48:Venezuela. * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **usage** (bool): Send anonymous usage data to Plex. Default value true. **TheMovieDB Other Video Options** (com.plexapp.agents.themoviedb) * **collections** (bool): Use collection info from The Movie Database. Default value false. * **localart** (bool): Prefer artwork based on library language. Default value true. * **adult** (bool): Include adult content. Default value false. * **country** (int): Country (used for release date and content rating). Default value 47 Possible options 0:, 1:Argentina, 2:Australia, 3:Austria, 4:Belgium, 5:Belize, 6:Bolivia, 7:Brazil, 8:Canada, 9:Chile, 10:Colombia, 11:Costa Rica, 12:Czech Republic, 13:Denmark, 14:Dominican Republic, 15:Ecuador, 16:El Salvador, 17:France, 18:Germany, 19:Guatemala, 20:Honduras, 21:Hong Kong SAR, 22:Ireland, 23:Italy, 24:Jamaica, 25:Korea, 26:Liechtenstein, 27:Luxembourg, 28:Mexico, 29:Netherlands, 30:New Zealand, 31:Nicaragua, 32:Panama, 33:Paraguay, 34:Peru, 35:Portugal, 36:Peoples Republic of China, 37:Puerto Rico, 38:Russia, 39:Singapore, 40:South Africa, 41:Spain, 42:Sweden, 43:Switzerland, 44:Taiwan, 45:Trinidad, 46:United Kingdom, 47:United States, 48:Uruguay, 49:Venezuela. """ part = '/library/sections?name=%s&type=%s&agent=%s&scanner=%s&language=%s&location=%s' % ( quote_plus(name), type, agent, quote_plus(scanner), language, quote_plus(location)) # noqa E126 if kwargs: part += urlencode(kwargs) return self._server.query(part, method=self._server._session.post)
0.007863
def get_vasp_kpoint_file_sym(structure): """ get a kpoint file ready to be ran in VASP along the symmetry lines of the Brillouin Zone """ output = run_aconvasp_command(["aconvasp", "--kpath"], structure) if "ERROR" in output[1]: raise AconvaspError(output[1]) started = False kpoints_string = "" for line in output[0].split("\n"): #print line if started or line.find("END") != -1: kpoints_string = kpoints_string + line + "\n" if line.find("KPOINTS TO RUN") != -1: started = True if line.find("END") != -1: started = False return kpoints_string
0.00303
def bambus(args): """ %prog bambus bambus.bed bambus.mates total.fasta Insert unplaced scaffolds based on mates. """ from jcvi.utils.iter import pairwise from jcvi.formats.bed import BedLine from jcvi.formats.posmap import MatesFile p = OptionParser(bambus.__doc__) p.add_option("--prefix", default="scaffold", help="Prefix of the unplaced scaffolds [default: %default]") p.add_option("--minlinks", default=3, type="int", help="Minimum number of links to place [default: %default]") opts, args = p.parse_args(args) if len(args) != 3: sys.exit(not p.print_help()) bedfile, matesfile, fastafile = args pf = matesfile.rsplit(".", 1)[0] logfile = pf + ".log" log = open(logfile, "w") mf = MatesFile(matesfile) maxdist = max(x.max for x in mf.libraries.values()) logging.debug("Max separation: {0}".format(maxdist)) prefix = opts.prefix minlinks = opts.minlinks is_unplaced = lambda x: x.startswith(prefix) bed = Bed(bedfile, sorted=False) beds = [] unplaced = defaultdict(list) for a, b in pairwise(bed): aname, bname = a.accn, b.accn aseqid, bseqid = a.seqid, b.seqid if aname not in mf: continue pa, la = mf[aname] if pa != bname: continue ia = is_unplaced(aseqid) ib = is_unplaced(bseqid) if ia == ib: continue if ia: a, b = b, a unplaced[b.seqid].append((a, b)) beds.extend([a, b]) sizes = Sizes(fastafile) candidatebed = Bed() cbeds = [] # For each unplaced scaffold, find most likely placement and orientation for scf, beds in sorted(unplaced.items()): print(file=log) ranges = [] for a, b in beds: aname, astrand = a.accn, a.strand bname, bstrand = b.accn, b.strand aseqid, bseqid = a.seqid, b.seqid pa, lib = mf[aname] print(a, file=log) print(b, file=log) flip_b = (astrand == bstrand) fbstrand = '-' if flip_b else '+' if flip_b: b.reverse_complement(sizes) lmin, lmax = lib.min, lib.max L = sizes.get_size(scf) assert astrand in ('+', '-') if astrand == '+': offset = a.start - b.end sstart, sstop = offset + lmin, offset + lmax else: offset = a.end - b.start + L sstart, sstop = offset - lmax, offset - lmin # Prevent out of range error size = sizes.get_size(aseqid) sstart = max(0, sstart) sstop = max(0, sstop) sstart = min(size - 1, sstart) sstop = min(size - 1, sstop) start_range = (aseqid, sstart, sstop, scf, 1, fbstrand) print("*" + "\t".join(str(x) for x in start_range), file=log) ranges.append(start_range) mranges = [x[:3] for x in ranges] # Determine placement by finding the interval with the most support rd = ranges_depth(mranges, sizes.mapping, verbose=False) alldepths = [] for depth in rd: alldepths.extend(depth) print(alldepths, file=log) maxdepth = max(alldepths, key=lambda x: x[-1])[-1] if maxdepth < minlinks: print("Insufficient links ({0} < {1})".format(maxdepth, minlinks), file=log) continue candidates = [x for x in alldepths if x[-1] == maxdepth] nseqids = len(set(x[0] for x in candidates)) if nseqids != 1: msg = "Multiple conflicting candidates found" print(msg, file=log) continue seqid, mmin, mmax, depth = candidates[0] mmin, mmax = range_minmax([x[1:3] for x in candidates]) if mmin >= mmax: msg = "Invalid (min, max) range" print("Invalid (min, max) range", file=log) continue if (mmax - mmin) > maxdist: msg = "(min, max) distance greater than library maxdist" print(msg, file=log) continue # Determine orientation by voting nplus, nminus = 0, 0 arange = (seqid, mmin, mmax) for sid, start, end, sf, sc, fbstrand in ranges: brange = (sid, start, end) if range_overlap(arange, brange): if fbstrand == '+': nplus += 1 else: nminus += 1 fbstrand = '+' if nplus >= nminus else '-' candidate = (seqid, mmin, mmax, scf, depth, fbstrand) bedline = BedLine("\t".join((str(x) for x in candidate))) cbeds.append(bedline) print("Plus: {0}, Minus: {1}".format(nplus, nminus), file=log) print(candidate, file=log) candidatebed.extend(cbeds) logging.debug("A total of {0} scaffolds can be placed.".\ format(len(candidatebed))) log.close() candidatebedfile = pf + ".candidate.bed" candidatebed.print_to_file(candidatebedfile, sorted=True)
0.000968
def include_file(filename): """Load another yaml file (no recursion).""" if os.path.isfile(filename): with open(filename) as handle: return safe_load(handle) raise RuntimeError("Include file %s doesn't exist!" % filename)
0.007326
def pyramid( input_raster, output_dir, pyramid_type=None, output_format=None, resampling_method=None, scale_method=None, zoom=None, bounds=None, overwrite=False, debug=False ): """Create tile pyramid out of input raster.""" bounds = bounds if bounds else None options = dict( pyramid_type=pyramid_type, scale_method=scale_method, output_format=output_format, resampling=resampling_method, zoom=zoom, bounds=bounds, overwrite=overwrite ) raster2pyramid(input_raster, output_dir, options)
0.001661
def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp): """Read the zero-lag and time-lag triggers identified by templates in a specified range of chirp mass. Parameters ---------- hdfile: File that stores all the triggers rhomin: float Minimum value of SNR threhold (will need including ifar) mass1: array First mass of the waveform in the template bank mass2: array Second mass of the waveform in the template bank lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for the template Returns ------- dictionary containing foreground triggers and background information """ with h5py.File(fname, 'r') as bulk: id_bkg = bulk['background_exc/template_id'][:] id_fg = bulk['foreground/template_id'][:] mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg]) bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg)) idx_bkg = np.where(bound == 1) mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg]) bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg)) idx_fg = np.where(bound == 1) zerolagstat = bulk['foreground/stat'][:][idx_fg] cstat_back_exc = bulk['background_exc/stat'][:][idx_bkg] dec_factors = bulk['background_exc/decimation_factor'][:][idx_bkg] return {'zerolagstat': zerolagstat[zerolagstat > rhomin], 'dec_factors': dec_factors[cstat_back_exc > rhomin], 'cstat_back_exc': cstat_back_exc[cstat_back_exc > rhomin]}
0.001725
def unpack(cls, msg, client, server, request_id): """Parse message and return an `OpMsg`. Takes the client message as bytes, the client and server socket objects, and the client request id. """ payload_document = OrderedDict() flags, = _UNPACK_UINT(msg[:4]) pos = 4 if flags != 0 and flags != 2: raise ValueError('OP_MSG flag must be 0 or 2 not %r' % (flags,)) while pos < len(msg): payload_type, = _UNPACK_BYTE(msg[pos:pos + 1]) pos += 1 payload_size, = _UNPACK_INT(msg[pos:pos + 4]) if payload_type == 0: doc = bson.decode_all(msg[pos:pos + payload_size], CODEC_OPTIONS)[0] payload_document.update(doc) pos += payload_size elif payload_type == 1: section_size, = _UNPACK_INT(msg[pos:pos + 4]) pos += 4 identifier, pos = _get_c_string(msg, pos) # Section starts w/ 4-byte size prefix, identifier ends w/ nil. documents_len = section_size - len(identifier) - 1 - 4 documents = bson.decode_all(msg[pos:pos + documents_len], CODEC_OPTIONS) payload_document[identifier] = documents pos += documents_len database = payload_document['$db'] return OpMsg(payload_document, namespace=database, flags=flags, _client=client, request_id=request_id, _server=server)
0.001856
def save(self, indexes, parent_id): """ Save the selected section. This will save the selected section as well as its direct child pages obtained through the ?child_of query parameter. The ?descendant_of query parameter is probably better suited because it all pages under that part of the tree will be obtained. The problem , however, is that that will require being able to traverse the tree and recreate parent-child relationships after they are imported """ if self.content(): parent = Page.objects.get(id=parent_id) # Save the selected section page response = requests.get( self._base_url + API_PAGES_ENDPOINT + str(indexes[0]) + "/" ) section_page = response.json() self.process_child_section(section_page["id"], parent)
0.002222
def _get_application_settings(self, application_id, settings_key, error_message): """Legacy behaviour""" if not application_id: value = SETTINGS.get(settings_key, empty) if value is empty: raise ImproperlyConfigured(error_message) return value else: msg = ( "LegacySettings does not support application_id. To enable " "multiple application support, use push_notifications.conf.AppSettings." ) raise ImproperlyConfigured(msg)
0.032468
def diff_mtime_map(map1, map2): ''' Is there a change to the mtime map? return a boolean ''' # check if the mtimes are the same if sorted(map1) != sorted(map2): return True # map1 and map2 are guaranteed to have same keys, # so compare mtimes for filename, mtime in six.iteritems(map1): if map2[filename] != mtime: return True # we made it, that means we have no changes return False
0.002208
def Kdiag(self, X, target): """Compute the diagonal of the covariance matrix for X.""" self._K_diag_computations(X) target+= self.variance*self._K_diag_dvar
0.016667
def POST(self, **kwargs): r''' Easily generate keys for a minion and auto-accept the new key Accepts all the same parameters as the :py:func:`key.gen_accept <salt.wheel.key.gen_accept>`. .. note:: A note about ``curl`` Avoid using the ``-i`` flag or HTTP headers will be written and produce an invalid tar file. Example partial kickstart script to bootstrap a new minion: .. code-block:: text %post mkdir -p /etc/salt/pki/minion curl -sSk https://localhost:8000/keys \ -d mid=jerry \ -d username=kickstart \ -d password=kickstart \ -d eauth=pam \ | tar -C /etc/salt/pki/minion -xf - mkdir -p /etc/salt/minion.d printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf %end .. http:post:: /keys Generate a public and private key and return both as a tarball Authentication credentials must be passed in the request. :status 200: |200| :status 401: |401| :status 406: |406| **Example request:** .. code-block:: bash curl -sSk https://localhost:8000/keys \ -d mid=jerry \ -d username=kickstart \ -d password=kickstart \ -d eauth=pam \ -o jerry-salt-keys.tar .. code-block:: text POST /keys HTTP/1.1 Host: localhost:8000 **Example response:** .. code-block:: text HTTP/1.1 200 OK Content-Length: 10240 Content-Disposition: attachment; filename="saltkeys-jerry.tar" Content-Type: application/x-tar jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000 ''' lowstate = cherrypy.request.lowstate lowstate[0].update({ 'client': 'wheel', 'fun': 'key.gen_accept', }) if 'mid' in lowstate[0]: lowstate[0]['id_'] = lowstate[0].pop('mid') result = self.exec_lowstate() ret = next(result, {}).get('data', {}).get('return', {}) pub_key = ret.get('pub', '') pub_key_file = tarfile.TarInfo('minion.pub') pub_key_file.size = len(pub_key) priv_key = ret.get('priv', '') priv_key_file = tarfile.TarInfo('minion.pem') priv_key_file.size = len(priv_key) fileobj = BytesIO() tarball = tarfile.open(fileobj=fileobj, mode='w') if six.PY3: pub_key = pub_key.encode(__salt_system_encoding__) priv_key = priv_key.encode(__salt_system_encoding__) tarball.addfile(pub_key_file, BytesIO(pub_key)) tarball.addfile(priv_key_file, BytesIO(priv_key)) tarball.close() headers = cherrypy.response.headers headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_']) headers['Content-Type'] = 'application/x-tar' headers['Content-Length'] = len(fileobj.getvalue()) headers['Cache-Control'] = 'no-cache' fileobj.seek(0) return fileobj
0.001206
def to_sql(self, instring, schema, use_bag_semantics=False): """ Translate a relational algebra string into a SQL string. :param instring: a relational algebra string to translate :param schema: a mapping of relation names to their attributes :param use_bag_semantics: flag for using relational algebra bag semantics :return: a SQL translation string """ root_list = self.to_syntax_tree(instring, schema) return sql_translator.translate(root_list, use_bag_semantics)
0.005566
def get_row_generator(self, ref, cache=None): """Return a row generator for a reference""" from inspect import isgenerator from rowgenerators import get_generator g = get_generator(ref) if not g: raise GenerateError("Cant figure out how to generate rows from {} ref: {}".format(type(ref), ref)) else: return g
0.007813
def extension_supported(request, extension_name): """This method will determine if Cinder supports a given extension name.""" for extension in list_extensions(request): if extension.name == extension_name: return True return False
0.003817
def find_output_with_tag(self, tag): """ Find all files who have tag in self.tags """ # Enforce upper case tag = tag.upper() return FileList([i for i in self if tag in i.tags])
0.008929
def decode_to_shape(inputs, shape, scope): """Encode the given tensor to given image shape.""" with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): x = inputs x = tfl.flatten(x) x = tfl.dense(x, shape[2], activation=None, name="dec_dense") x = tf.expand_dims(x, axis=1) return x
0.009967
def listBlockParents(self, **kwargs): """ API to list block parents. :param block_name: name of block who's parents needs to be found (Required) :type block_name: str :returns: List of dictionaries containing following keys (block_name) :rtype: list of dicts """ validParameters = ['block_name'] requiredParameters = {'forced': validParameters} checkInputParameter(method="listBlockParents", parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) if isinstance(kwargs["block_name"], list): return self.__callServer("blockparents", data=kwargs, callmethod='POST') else: return self.__callServer("blockparents", params=kwargs)
0.007273
def wait(hotkey=None, suppress=False, trigger_on_release=False): """ Blocks the program execution until the given hotkey is pressed or, if given no parameters, blocks forever. """ if hotkey: lock = _Event() remove = add_hotkey(hotkey, lambda: lock.set(), suppress=suppress, trigger_on_release=trigger_on_release) lock.wait() remove_hotkey(remove) else: while True: _time.sleep(1e6)
0.004376
def run(cmd, stdout=None, stderr=None, **kwargs): """ A blocking wrapper around subprocess.Popen(), but with a simpler interface for the stdout/stderr arguments: stdout=False / stderr=False stdout/stderr will be redirected to /dev/null (or discarded in some other suitable manner) stdout=True / stderr=True stdout/stderr will be captured and returned as a list of lines. stdout=None stdout will be redirected to the python process's stdout, which may be a tty (same as using stdout=subprocess.None) stderr=None: stderr will be redirected to the python process's stderr, which may be a tty (same as using stderr=subprocess.None) stderr="STDOUT" Same as using stderr=subprocess.STDOUT The return value will be a tuple of (exitcode, stdout, stderr) If stdout and/or stderr were not captured, they will be None instead. """ devnull = None try: stdoutfilter = None stderrfilter = None wantstdout = False wantstderr = False if stdout is False: devnull = open('/dev/null', 'w') stdout = devnull elif stdout is True: stdout = subprocess.PIPE wantstdout = True elif callable(stdout): stdoutfilter = partial(stdout) stdout = subprocess.PIPE else: assert stdout is None, "Invalid stdout %r" % stdout if stderr is False: if devnull is None: devnull = open('/dev/null', 'w') stderr = devnull elif stderr is True: stderr = subprocess.PIPE wantstderr = True elif stderr == "STDOUT": stderr = subprocess.STDOUT elif callable(stderr): stderrfilter = partial(stderr) stderr = subprocess.PIPE else: assert stderr is None, "Invalid stderr %r" % stderr if (stdoutfilter or stderrfilter) and asyncio: # run background process asynchronously and filter output as # it is running exitcode, out, err, = _runasync(stdoutfilter, stderrfilter, cmd, stdout=stdout, stderr=stderr, **kwargs) if not wantstdout: out = None if not wantstderr: err = None return exitcode, out, err proc = subprocess.Popen(cmd, stdout=stdout, stderr=stderr, **kwargs) out, err = proc.communicate() if not wantstdout: if stdoutfilter: stdoutfilter(out, True) out = None if not wantstderr: if stderrfilter: stderrfilter(err, True) err = None return proc.returncode, out, err finally: if devnull is not None: devnull.close()
0.000326
def remove_objects(code, count=1): """ This function replaces objects with OBJECTS_LVALS, returns new code, replacement dict and count. count arg is the number that should be added to the LVAL of the first replaced object """ replacements = {} #replacement dict br = bracket_split(code, ['{}', '[]']) res = '' last = '' for e in br: #test whether e is an object if e[0] == '{': n, temp_rep, cand_count = remove_objects(e[1:-1], count) # if e was not an object then n should not contain any : if is_object(n, last): #e was an object res += ' ' + OBJECT_LVAL % count replacements[OBJECT_LVAL % count] = e count += 1 else: # e was just a code block but could contain objects inside res += '{%s}' % n count = cand_count replacements.update(temp_rep) elif e[0] == '[': if is_array(last): res += e # will be translated later else: # prop get n, rep, count = remove_objects(e[1:-1], count) res += '[%s]' % n replacements.update(rep) else: # e does not contain any objects res += e last = e #needed to test for this stipid empty object return res, replacements, count
0.00493
def asset(self): """ Returns the asset as instance of :class:`.asset.Asset` """ if not self["asset"]: self["asset"] = self.asset_class( self["symbol"], blockchain_instance=self.blockchain ) return self["asset"]
0.007092
def get_public_key(self): """ Parse the scriptSig and extract the public key. Raises ValueError if this is a multisig-controlled subdomain. """ res = self.get_public_key_info() if 'error' in res: raise ValueError(res['error']) if res['type'] != 'singlesig': raise ValueError(res['error']) return res['public_keys'][0]
0.004914
def is_pattern_valid(pattern): """Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern """ result = True translator = Globster.pattern_info[Globster.identify(pattern)]["translator"] tpattern = '(%s)' % translator(pattern) try: re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE) re_obj.search("") # force compile except Exception as e: result = False return result
0.006734
def _mysqld_process_checkpoint(): '''this helper method checks if mysql server is available in the sys if not fires up one ''' try: subprocess.check_output("pgrep mysqld", shell=True) except Exception: logger.warning( 'Your mysql server is offline, fake2db will try to launch it now!', extra=extra_information) # close_fds = True argument is the flag that is responsible # for Popen to launch the process completely independent subprocess.Popen("mysqld", close_fds=True, shell=True) time.sleep(3)
0.001689
def get_aligned_abi_inputs(abi, args): """ Takes a function ABI (``abi``) and a sequence or mapping of args (``args``). Returns a list of type strings for the function's inputs and a list of arguments which have been aligned to the layout of those types. The args contained in ``args`` may contain nested mappings or sequences corresponding to tuple-encoded values in ``abi``. """ input_abis = abi.get('inputs', []) if isinstance(args, abc.Mapping): # `args` is mapping. Align values according to abi order. args = tuple(args[abi['name']] for abi in input_abis) return ( tuple(collapse_if_tuple(abi) for abi in input_abis), type(args)( _align_abi_input(abi, arg) for abi, arg in zip(input_abis, args) ), )
0.003672
def parse_file( self, filename ): """parse a C source file, and add its blocks to the processor's list""" self.reset() self.filename = filename fileinput.close() self.format = None self.lineno = 0 self.lines = [] for line in fileinput.input( filename ): # strip trailing newlines, important on Windows machines! if line[-1] == '\012': line = line[0:-1] if self.format == None: self.process_normal_line( line ) else: if self.format.end.match( line ): # that's a normal block end, add it to 'lines' and # create a new block self.lines.append( line ) self.add_block_lines() elif self.format.column.match( line ): # that's a normal column line, add it to 'lines' self.lines.append( line ) else: # humm.. this is an unexpected block end, # create a new block, but don't process the line self.add_block_lines() # we need to process the line again self.process_normal_line( line ) # record the last lines self.add_block_lines()
0.015487
def wrap_rankboost(job, rsem_files, merged_mhc_calls, transgene_out, univ_options, rankboost_options): """ A wrapper for boost_ranks. :param dict rsem_files: Dict of results from rsem :param dict merged_mhc_calls: Dict of results from merging mhc peptide binding predictions :param dict transgene_out: Dict of results from running Transgene :param dict univ_options: Dict of universal options used by almost all tools :param dict rankboost_options: Options specific to rankboost :return: Dict of concise and detailed results for mhci and mhcii output_files: |- 'mhcii_rankboost_concise_results.tsv': fsID |- 'mhcii_rankboost_detailed_results.txt': fsID |- 'mhci_rankboost_concise_results.tsv': fsID +- 'mhci_rankboost_detailed_results.txt': fsID :rtype: dict """ rankboost = job.addChildJobFn(boost_ranks, rsem_files['rsem.isoforms.results'], merged_mhc_calls, transgene_out, univ_options, rankboost_options) return rankboost.rv()
0.005405
def get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs): '''calculate magnetic field strength from raw magnetometer''' import mavutil self = mavutil.mavfile_global m = SERVO_OUTPUT_RAW motor_pwm = m.servo1_raw + m.servo2_raw + m.servo3_raw + m.servo4_raw motor_pwm *= 0.25 rc3_min = self.param('RC3_MIN', 1100) rc3_max = self.param('RC3_MAX', 1900) motor = (motor_pwm - rc3_min) / (rc3_max - rc3_min) if motor > 1.0: motor = 1.0 if motor < 0.0: motor = 0.0 motor_offsets0 = motor_ofs[0] * motor motor_offsets1 = motor_ofs[1] * motor motor_offsets2 = motor_ofs[2] * motor ofs = (ofs[0] + motor_offsets0, ofs[1] + motor_offsets1, ofs[2] + motor_offsets2) return ofs
0.002681
def stop_instance(self): """Stop the instance for this Streaming Analytics service. Returns: dict: JSON response for the instance stop operation. """ stop_url = self._get_url('stop_path') res = self.rest_client.session.put(stop_url, json={}) _handle_http_errors(res) return res.json()
0.005666
def BL(self, params): """ BL label Branch to the label, storing the next instruction in the Link Register """ label = self.get_one_parameter(self.ONE_PARAMETER, params) self.check_arguments(label_exists=(label,)) # TODO check if label is within +- 16 MB # BL label def BL_func(): self.register['LR'] = self.register['PC'] # No need for the + 1, PC already points to the next instruction self.register['PC'] = self.labels[label] return BL_func
0.005445
def generate_network(nl_model, handler, seed=1234, always_include_props=False, include_connections=True, include_inputs=True, base_dir=None): """ Generate the network model as described in NeuroMLlite in a specific handler, e.g. NeuroMLHandler, PyNNHandler, etc. """ pop_locations = {} cell_objects = {} synapse_objects = {} print_v("Starting net generation for %s%s..." % (nl_model.id, ' (base dir: %s)' % base_dir if base_dir else '')) rng = random.Random(seed) if nl_model.network_reader: exec('from neuromllite.%s import %s' % (nl_model.network_reader.type, nl_model.network_reader.type)) exec('network_reader = %s()' % (nl_model.network_reader.type)) network_reader.parameters = nl_model.network_reader.parameters network_reader.parse(handler) pop_locations = network_reader.get_locations() else: notes = "Generated network: %s" % nl_model.id notes += "\n Generation seed: %i" % (seed) if nl_model.parameters: notes += "\n NeuroMLlite parameters: " for p in nl_model.parameters: notes += "\n %s = %s" % (p, nl_model.parameters[p]) handler.handle_document_start(nl_model.id, notes) temperature = '%sdegC' % nl_model.temperature if nl_model.temperature else None handler.handle_network(nl_model.id, nl_model.notes, temperature=temperature) nml2_doc_temp = _extract_pynn_components_to_neuroml(nl_model) for c in nl_model.cells: if c.neuroml2_source_file: from pyneuroml import pynml nml2_doc = pynml.read_neuroml2_file(_locate_file(c.neuroml2_source_file, base_dir), include_includes=True) cell_objects[c.id] = nml2_doc.get_by_id(c.id) if c.pynn_cell: cell_objects[c.id] = nml2_doc_temp.get_by_id(c.id) for s in nl_model.synapses: if s.neuroml2_source_file: from pyneuroml import pynml nml2_doc = pynml.read_neuroml2_file(_locate_file(s.neuroml2_source_file, base_dir), include_includes=True) synapse_objects[s.id] = nml2_doc.get_by_id(s.id) if s.pynn_synapse: synapse_objects[s.id] = nml2_doc_temp.get_by_id(s.id) for p in nl_model.populations: size = evaluate(p.size, nl_model.parameters) properties = p.properties if p.properties else {} if p.random_layout: properties['region'] = p.random_layout.region if not p.random_layout and not p.single_location and not always_include_props: # If there are no positions (abstract network), and <property> # is added to <population>, jLems doesn't like it... (it has difficulty # interpreting pop0[0]/v, etc.) # So better not to give properties... properties = {} if p.notes: handler.handle_population(p.id, p.component, size, cell_objects[p.component] if p.component in cell_objects else None, properties=properties, notes=p.notes) else: handler.handle_population(p.id, p.component, size, cell_objects[p.component] if p.component in cell_objects else None, properties=properties) pop_locations[p.id] = np.zeros((size, 3)) for i in range(size): if p.random_layout: region = nl_model.get_child(p.random_layout.region, 'regions') x = region.x + rng.random() * region.width y = region.y + rng.random() * region.height z = region.z + rng.random() * region.depth pop_locations[p.id][i] = (x, y, z) handler.handle_location(i, p.id, p.component, x, y, z) if p.single_location: loc = p.single_location.location x = loc.x y = loc.y z = loc.z pop_locations[p.id][i] = (x, y, z) handler.handle_location(i, p.id, p.component, x, y, z) if hasattr(handler, 'finalise_population'): handler.finalise_population(p.id) if include_connections: for p in nl_model.projections: type = p.type if p.type else 'projection' handler.handle_projection(p.id, p.presynaptic, p.postsynaptic, p.synapse, synapse_obj=synapse_objects[p.synapse] if p.synapse in synapse_objects else None, pre_synapse_obj=synapse_objects[p.pre_synapse] if p.pre_synapse in synapse_objects else None, type=type) delay = p.delay if p.delay else 0 weight = p.weight if p.weight else 1 conn_count = 0 if p.random_connectivity: for pre_i in range(len(pop_locations[p.presynaptic])): for post_i in range(len(pop_locations[p.postsynaptic])): flip = rng.random() #print("Is cell %i conn to %i, prob %s - %s"%(pre_i, post_i, flip, p.random_connectivity.probability)) if flip < p.random_connectivity.probability: weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) #print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, \ pre_i, \ post_i, \ preSegId=0, \ preFract=0.5, \ postSegId=0, \ postFract=0.5, \ delay=delay, \ weight=weight) conn_count += 1 if p.convergent_connectivity: for post_i in range(len(pop_locations[p.postsynaptic])): for count in range(int(p.convergent_connectivity.num_per_post)): found = False while not found: pre_i = int(rng.random()*len(pop_locations[p.presynaptic])) if p.presynaptic==p.postsynaptic and pre_i==post_i: found=False else: found=True weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) print_v("Adding connection %i (%i->%i; %i to %s of post) with weight: %s, delay: %s"%(conn_count, pre_i, post_i, count, p.convergent_connectivity.num_per_post, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, \ pre_i, \ post_i, \ preSegId=0, \ preFract=0.5, \ postSegId=0, \ postFract=0.5, \ delay=delay, \ weight=weight) conn_count += 1 elif p.one_to_one_connector: for i in range(min(len(pop_locations[p.presynaptic]), len(pop_locations[p.postsynaptic]))): weight = evaluate(weight, nl_model.parameters) delay = evaluate(delay, nl_model.parameters) #print_v("Adding connection %i with weight: %s, delay: %s"%(conn_count, weight, delay)) handler.handle_connection(p.id, conn_count, p.presynaptic, p.postsynaptic, p.synapse, \ i, \ i, \ preSegId=0, \ preFract=0.5, \ postSegId=0, \ postFract=0.5, \ delay=delay, \ weight=weight) conn_count += 1 handler.finalise_projection(p.id, p.presynaptic, p.postsynaptic, p.synapse) if include_inputs: for input in nl_model.inputs: handler.handle_input_list(input.id, input.population, input.input_source, size=0, input_comp_obj=None) input_count = 0 for i in range(len(pop_locations[input.population])): flip = rng.random() weight = input.weight if input.weight else 1 if flip * 100. < input.percentage: number_per_cell = evaluate(input.number_per_cell, nl_model.parameters) if input.number_per_cell else 1 for j in range(number_per_cell): handler.handle_single_input(input.id, input_count, i, weight=evaluate(weight, nl_model.parameters)) input_count += 1 handler.finalise_input_source(input.id) if hasattr(handler, 'finalise_document'): handler.finalise_document()
0.011326
def take_profit(self, accountID, **kwargs): """ Shortcut to create a Take Profit Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TakeProfitOrderRequest Returns: v20.response.Response containing the results from submitting the request """ return self.create( accountID, order=TakeProfitOrderRequest(**kwargs) )
0.004115
def value(self): """ Returns :data:`True` if the host returned a single ping, and :data:`False` otherwise. """ # XXX This is doing a DNS lookup every time it's queried; should we # call gethostbyname in the constructor and ping that instead (good # for consistency, but what if the user *expects* the host to change # address?) with io.open(os.devnull, 'wb') as devnull: try: subprocess.check_call( ['ping', '-c1', self.host], stdout=devnull, stderr=devnull) except subprocess.CalledProcessError: return False else: return True
0.002766
def tile_bbox(self, tile_indices): """ Returns the WGS84 bbox of the specified tile """ (z, x, y) = tile_indices topleft = (x * self.tilesize, (y + 1) * self.tilesize) bottomright = ((x + 1) * self.tilesize, y * self.tilesize) nw = self.unproject_pixels(topleft, z) se = self.unproject_pixels(bottomright, z) return nw + se
0.005063
def definitiondir(self, filetype, **kwargs): """Returns definition subdirectory in :envvar:`PLATELIST_DIR` of the form: ``NNNNXX``. Parameters ---------- filetype : str File type parameter. designid : int or str Design ID number. Will be converted to int internally. Returns ------- definitiondir : str Definition directory in the format ``NNNNXX``. """ designid = int(kwargs['designid']) designid100 = designid // 100 subdir = "{:0>4d}".format(designid100) + "XX" return subdir
0.004823
def _get_cairo_bmp(self, mdc, key, rect, is_selected, view_frozen): """Returns a wx.Bitmap of cell key in size rect""" bmp = wx.EmptyBitmap(rect.width, rect.height) mdc.SelectObject(bmp) mdc.SetBackgroundMode(wx.SOLID) mdc.SetBackground(wx.WHITE_BRUSH) mdc.Clear() mdc.SetDeviceOrigin(0, 0) context = wx.lib.wxcairo.ContextFromDC(mdc) context.save() # Zoom context zoom = self.zoom context.scale(zoom, zoom) # Set off cell renderer by 1/2 a pixel to avoid blurry lines rect_tuple = \ -0.5, -0.5, rect.width / zoom + 0.5, rect.height / zoom + 0.5 spell_check = config["check_spelling"] cell_renderer = GridCellCairoRenderer(context, self.data_array, key, rect_tuple, view_frozen, spell_check=spell_check) # Draw cell cell_renderer.draw() # Draw selection if present if is_selected: context.set_source_rgba(*self.selection_color_tuple) context.rectangle(*rect_tuple) context.fill() context.restore() return bmp
0.001623
def _read_para_transaction_id(self, code, cbit, clen, *, desc, length, version): """Read HIP TRANSACTION_ID parameter. Structure of HIP TRANSACTION_ID parameter [RFC 6078]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Identifier / +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ / | Padding | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 transaction_id.type Parameter Type 1 15 transaction_id.critical Critical Bit 2 16 transaction_id.length Length of Contents 4 32 transaction_id.id Identifier """ _tsid = self._read_unpack(clen) transaction_id = dict( type=desc, critical=cbit, length=clen, id=_tsid, ) _plen = length - clen if _plen: self._read_fileng(_plen) return transaction_id
0.002514
def runCLI(): """ The starting point for the execution of the Scrapple command line tool. runCLI uses the docstring as the usage description for the scrapple command. \ The class for the required command is selected by a dynamic dispatch, and the \ command is executed through the execute_command() method of the command class. """ args = docopt(__doc__, version='0.3.0') try: check_arguments(args) command_list = ['genconfig', 'run', 'generate'] select = itemgetter('genconfig', 'run', 'generate') selectedCommand = command_list[select(args).index(True)] cmdClass = get_command_class(selectedCommand) obj = cmdClass(args) obj.execute_command() except POSSIBLE_EXCEPTIONS as e: print('\n', e, '\n')
0.005006
def _zc_decode(self, msg): """ZC: Zone Change.""" status = _status_decode(int(msg[7:8], 16)) return {'zone_number': int(msg[4:7])-1, 'zone_status': status}
0.011173
def fromfits(infile, hdu = 0, verbose = True): """ Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) """ pixelarray, hdr = ft.getdata(infile, hdu, header=True) pixelarray = np.asarray(pixelarray).transpose() #print pixelarray pixelarrayshape = pixelarray.shape if verbose : print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]) print "Input file BITPIX : %s" % (hdr["BITPIX"]) pixelarrayshape = np.asarray(pixelarrayshape) if verbose : print "Internal array type :", pixelarray.dtype.name return f2nimage(pixelarray, verbose = verbose)
0.01958
def relpath(self): """ Determine the relative path to this repository Returns: str: relative path to this repository """ here = os.path.abspath(os.path.curdir) relpath = os.path.relpath(self.fpath, here) return relpath
0.006969
def _check_lr(name, optimizer, lr): """Return one learning rate for each param group.""" n = len(optimizer.param_groups) if not isinstance(lr, (list, tuple)): return lr * np.ones(n) if len(lr) != n: raise ValueError("{} lr values were passed for {} but there are " "{} param groups.".format(n, name, len(lr))) return np.array(lr)
0.002558
def _update_servers(self): """Sync our Servers from TopologyDescription.server_descriptions. Hold the lock while calling this. """ for address, sd in self._description.server_descriptions().items(): if address not in self._servers: monitor = self._settings.monitor_class( server_description=sd, topology=self, pool=self._create_pool_for_monitor(address), topology_settings=self._settings) weak = None if self._publish_server: weak = weakref.ref(self._events) server = Server( server_description=sd, pool=self._create_pool_for_server(address), monitor=monitor, topology_id=self._topology_id, listeners=self._listeners, events=weak) self._servers[address] = server server.open() else: self._servers[address].description = sd for address, server in list(self._servers.items()): if not self._description.has_server(address): server.close() self._servers.pop(address)
0.001531
def get_altimeter(wxdata: [str], units: Units, version: str = 'NA') -> ([str], Number): # type: ignore # noqa """ Returns the report list and the removed altimeter item Version is 'NA' (North American / default) or 'IN' (International) """ if not wxdata: return wxdata, None altimeter = '' target = wxdata[-1] if version == 'NA': # Version target if target[0] == 'A': altimeter = wxdata.pop()[1:] # Other version but prefer normal if available elif target[0] == 'Q': if wxdata[-2][0] == 'A': wxdata.pop() altimeter = wxdata.pop()[1:] else: units.altimeter = 'hPa' altimeter = wxdata.pop()[1:].lstrip('.') # Else grab the digits elif len(target) == 4 and target.isdigit(): altimeter = wxdata.pop() elif version == 'IN': # Version target if target[0] == 'Q': altimeter = wxdata.pop()[1:].lstrip('.') if '/' in altimeter: altimeter = altimeter[:altimeter.find('/')] # Other version but prefer normal if available elif target[0] == 'A': if wxdata[-2][0] == 'Q': wxdata.pop() altimeter = wxdata.pop()[1:] else: units.altimeter = 'inHg' altimeter = wxdata.pop()[1:] # Some stations report both, but we only need one if wxdata and (wxdata[-1][0] == 'A' or wxdata[-1][0] == 'Q'): wxdata.pop() # convert to Number if not altimeter: return wxdata, None if units.altimeter == 'inHg': value = altimeter[:2] + '.' + altimeter[2:] else: value = altimeter return wxdata, make_number(value, altimeter)
0.000552
def do_execute(self, options, args): """Implementation of 'coverage run'.""" # Set the first path element properly. old_path0 = sys.path[0] # Run the script. self.coverage.start() code_ran = True try: try: if options.module: sys.path[0] = '' self.run_python_module(args[0], args) else: filename = args[0] sys.path[0] = os.path.abspath(os.path.dirname(filename)) self.run_python_file(filename, args) except NoSource: code_ran = False raise finally: self.coverage.stop() if code_ran: self.coverage.save() # Restore the old path sys.path[0] = old_path0
0.002304
def stop(self): '''Set everything back to normal and collect our data''' for key, value in self._configs.items(): self._client.config_set(key, value) logs = self._client.execute_command('slowlog', 'get', 100000) current = { 'name': None, 'accumulated': defaultdict(list) } for _, _, duration, request in logs: command = request[0] if command == 'slowlog': continue if 'eval' in command.lower(): subcommand = request[3] self._timings['qless-%s' % subcommand].append(duration) if current['name']: if current['name'] not in self._commands: self._commands[current['name']] = defaultdict(list) for key, values in current['accumulated'].items(): self._commands[current['name']][key].extend(values) current = { 'name': subcommand, 'accumulated': defaultdict(list) } else: self._timings[command].append(duration) if current['name']: current['accumulated'][command].append(duration) # Include the last if current['name']: if current['name'] not in self._commands: self._commands[current['name']] = defaultdict(list) for key, values in current['accumulated'].items(): self._commands[current['name']][key].extend(values)
0.001285
def checkout(request, user_id=None): ''' Runs the checkout process for the current cart. If the query string contains ``fix_errors=true``, Registrasion will attempt to fix errors preventing the system from checking out, including by cancelling expired discounts and vouchers, and removing any unavailable products. Arguments: user_id (castable to int): If the requesting user is staff, then the user ID can be used to run checkout for another user. Returns: render or redirect: If the invoice is generated successfully, or there's already a valid invoice for the current cart, redirect to ``invoice``. If there are errors when generating the invoice, render ``registrasion/checkout_errors.html`` with the following data:: { "error_list", [str, ...] # The errors to display. } ''' if user_id is not None: if request.user.is_staff: user = User.objects.get(id=int(user_id)) else: raise Http404() else: user = request.user current_cart = CartController.for_user(user) if "fix_errors" in request.GET and request.GET["fix_errors"] == "true": current_cart.fix_simple_errors() try: current_invoice = InvoiceController.for_cart(current_cart.cart) except ValidationError as ve: return _checkout_errors(request, ve) return redirect("invoice", current_invoice.invoice.id)
0.000649
def create_namespace(self, body, **kwargs): # noqa: E501 """create_namespace # noqa: E501 create a Namespace # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespace(body, async_req=True) >>> result = thread.get() :param async_req bool :param V1Namespace body: (required) :param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Namespace If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespace_with_http_info(body, **kwargs) # noqa: E501 else: (data) = self.create_namespace_with_http_info(body, **kwargs) # noqa: E501 return data
0.001445
def delete(self): """Remove this resource (recursive).""" self._check_write_access() filepath = self._getFilePath() commands.remove(self.provider.ui, self.provider.repo, filepath, force=True)
0.013453
def _get_pseudo_key(self, row): """ Returns the pseudo key in a row. :param dict row: The row. :rtype: tuple """ ret = list() for key in self._pseudo_key: ret.append(row[key]) return tuple(ret)
0.007353
def concat_chunks(data, ipyclient): """ Concatenate chunks. If multiple chunk files match to the same sample name but with different barcodes (i.e., they are technical replicates) then this will assign all the files to the same sample name file. """ ## collate files progress bar start = time.time() printstr = ' writing/compressing | {} | s1 |' lbview = ipyclient.load_balanced_view() elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer) ## get all the files ftmps = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.fastq")) ## a dict to assign tmp files to names/reads r1dict = {} r2dict = {} for sname in data.barcodes: if "-technical-replicate-" in sname: sname = sname.rsplit("-technical-replicate", 1)[0] r1dict[sname] = [] r2dict[sname] = [] ## assign to name keys for ftmp in ftmps: base, orient, _ = ftmp.rsplit("_", 2) sname = base.rsplit("/", 1)[-1].split("tmp_", 1)[1] if orient == "R1": r1dict[sname].append(ftmp) else: r2dict[sname].append(ftmp) ## concatenate files snames = [] for sname in data.barcodes: if "-technical-replicate-" in sname: sname = sname.rsplit("-technical-replicate", 1)[0] snames.append(sname) writers = [] for sname in set(snames): tmp1s = sorted(r1dict[sname]) tmp2s = sorted(r2dict[sname]) writers.append(lbview.apply(collate_files, *[data, sname, tmp1s, tmp2s])) total = len(writers) while 1: ready = [i.ready() for i in writers] elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(total, sum(ready), printstr.format(elapsed), spacer=data._spacer) time.sleep(0.1) if all(ready): print("") break
0.005638
def information_coefficient(total1,total2,intersect): '''a simple jacaard (information coefficient) to compare two lists of overlaps/diffs ''' total = total1 + total2 return 2.0*len(intersect) / total
0.018519
def get_members(pkg_name, module_filter = None, member_filter = None): """ 返回包中所有符合条件的模块成员。 参数: pkg_name 包名称 module_filter 模块名过滤器 def (module_name) member_filter 成员过滤器 def member_filter(module_member_object) """ modules = get_modules(pkg_name, module_filter) ret = {} for m in modules: members = dict(("{0}.{1}".format(v.__module__, k), v) for k, v in getmembers(m, member_filter)) ret.update(members) return ret
0.011236
def get_document_unit(self): """Get the unit of the SVG surface. If the surface passed as an argument is not a SVG surface, the function sets the error status to ``STATUS_SURFACE_TYPE_MISMATCH`` and returns :ref:`SVG_UNIT_USER`. :return: The SVG unit of the SVG surface. *New in cairo 1.16.* *New in cairocffi 0.9.* """ unit = cairo.cairo_svg_surface_get_document_unit(self._pointer) self._check_status() return unit
0.003929
def set_decode_area(codec, image, start_x=0, start_y=0, end_x=0, end_y=0): """Wraps openjp2 library function opj_set_decode area. Sets the given area to be decoded. This function should be called right after read_header and before any tile header reading. Parameters ---------- codec : CODEC_TYPE Codec initialized by create_decompress function. image : ImageType pointer The decoded image previously set by read_header. start_x, start_y : optional, int The left and upper position of the rectangle to decode. end_x, end_y : optional, int The right and lower position of the rectangle to decode. Raises ------ RuntimeError If the OpenJPEG library routine opj_set_decode_area fails. """ OPENJP2.opj_set_decode_area.argtypes = [CODEC_TYPE, ctypes.POINTER(ImageType), ctypes.c_int32, ctypes.c_int32, ctypes.c_int32, ctypes.c_int32] OPENJP2.opj_set_decode_area.restype = check_error OPENJP2.opj_set_decode_area(codec, image, ctypes.c_int32(start_x), ctypes.c_int32(start_y), ctypes.c_int32(end_x), ctypes.c_int32(end_y))
0.000679
def thread_stopped(self): """ :meth:`.WThreadTask._polling_iteration` implementation """ if self.__current_task is not None: task = self.__task_chain[self.__current_task] task.stop() self.__current_task = None
0.035714
def _insert_dummy_zmat(self, exception, inplace=False): """Works INPLACE""" def insert_row(df, pos, key): if pos < len(df): middle = df.iloc[pos:(pos + 1)] middle.index = [key] start, end = df.iloc[:pos], df.iloc[pos:] return pd.concat([start, middle, end]) elif pos == len(df): start = df.copy() start.loc[key] = start.iloc[-1] return start def raise_warning(i, dummy_d): give_message = ('For the dihedral reference of atom {i} the ' 'dummy atom {dummy_d} was inserted').format warnings.warn(give_message(i=i, dummy_d=dummy_d), UserWarning) def insert_dummy(zmat, i, dummy_cart, dummy_d): """Works INPLACE on self._frame""" cols = ['b', 'a', 'd'] actual_d = zmat.loc[i, 'd'] zframe = insert_row(zmat, zmat.index.get_loc(i), dummy_d) zframe.loc[i, 'd'] = dummy_d zframe.loc[dummy_d, 'atom'] = 'X' zframe.loc[dummy_d, cols] = zmat.loc[actual_d, cols] zmat_values = dummy_cart._calculate_zmat_values( [dummy_d] + list(zmat.loc[actual_d, cols]))[0] zframe.loc[dummy_d, ['bond', 'angle', 'dihedral']] = zmat_values zmat._frame = zframe zmat._metadata['has_dummies'][i] = {'dummy_d': dummy_d, 'actual_d': actual_d} raise_warning(i, dummy_d) zmat = self if inplace else self.copy() if exception.index in zmat._metadata['has_dummies']: zmat._remove_dummies(to_remove=[exception.index], inplace=True) else: insert_dummy(zmat, exception.index, *zmat._insert_dummy_cart(exception)) try: zmat._metadata['last_valid_cartesian'] = zmat.get_cartesian() except InvalidReference as e: zmat._insert_dummy_zmat(e, inplace=True) if not inplace: return zmat
0.00095
def graph(ctx, path, metrics, output, x_axis, changes): """ Graph a specific metric for a given file, if a path is given, all files within path will be graphed. Some common examples: Graph all .py files within src/ for the raw.loc metric $ wily graph src/ raw.loc Graph test.py against raw.loc and cyclomatic.complexity metrics $ wily graph src/test.py raw.loc cyclomatic.complexity Graph test.py against raw.loc and raw.sloc on the x-axis $ wily graph src/test.py raw.loc --x-axis raw.sloc """ config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.graph import graph logger.debug(f"Running report on {path} for metrics {metrics}") graph( config=config, path=path, metrics=metrics, output=output, x_axis=x_axis, changes=changes, )
0.002203
def _get_names(self, collector): """Get names of timeseries the collector produces.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['', '_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): for suffix in type_suffixes.get(metric.type, ['']): result.append(metric.name + suffix) return result
0.002026
def sign(self, pkt, key): """ Sign an IPsec (ESP or AH) packet with this algo. @param pkt: a packet that contains a valid encrypted ESP or AH layer @param key: the authentication key, a byte string @return: the signed packet """ if not self.mac: return pkt mac = self.new_mac(key) if pkt.haslayer(ESP): mac.update(raw(pkt[ESP])) pkt[ESP].data += mac.finalize()[:self.icv_size] elif pkt.haslayer(AH): clone = zero_mutable_fields(pkt.copy(), sending=True) mac.update(raw(clone)) pkt[AH].icv = mac.finalize()[:self.icv_size] return pkt
0.002841
def send(self, wifs, txouts, change_address=None, lock_time=0, fee=10000): """TODO add doc string""" # FIXME test!! rawtx = self.create_tx(txouts=txouts, lock_time=lock_time) rawtx = self.add_inputs(rawtx, wifs, change_address=change_address, fee=fee) return self.publish(rawtx)
0.005714
def convert_time(obj): """Returns a TIME column as a time object: >>> time_or_None('15:06:17') datetime.time(15, 6, 17) Illegal values are returned as None: >>> time_or_None('-25:06:17') is None True >>> time_or_None('random crap') is None True Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but can accept values as (+|-)DD HH:MM:SS. The latter format will not be parsed correctly by this function. Also note that MySQL's TIME column corresponds more closely to Python's timedelta and not time. However if you want TIME columns to be treated as time-of-day and not a time offset, then you can use set this function as the converter for FIELD_TYPE.TIME. """ if not PY2 and isinstance(obj, (bytes, bytearray)): obj = obj.decode('ascii') m = TIME_RE.match(obj) if not m: return obj try: groups = list(m.groups()) groups[-1] = _convert_second_fraction(groups[-1]) hours, minutes, seconds, microseconds = groups return datetime.time(hour=int(hours), minute=int(minutes), second=int(seconds), microsecond=int(microseconds)) except ValueError: return obj
0.001599
def endpoint_access(self, method): """ Determine access level needed for endpoint :param method: The request verb :return: String representing access type. """ if method == 'OPTIONS': # The CORS pre-flight checks should not require authentication return self.UNAUTHENTICATED_ACCESS elif method not in self.METHOD_ACCESS: logging.error('Cannot determine access needed for %s method', method) raise HTTPError(500, 'Internal Server Error') return self.METHOD_ACCESS[method]
0.005146
def write_template(fn, lang="python"): """ Write language-specific script template to file. Arguments: - fn(``string``) path to save the template to - lang('python', 'bash') which programming language """ with open(fn, "wb") as fh: if lang == "python": fh.write(PY_TEMPLATE) elif lang == "bash": fh.write(SH_TEMPLATE)
0.005025
def update_user_lock(repository_path, session_token): """ Write or clear the user lock file """ # NOTE ALWAYS use within lock access callback # While the user lock file should ALWAYS be written only within a lock_access # callback, it is sometimes read asynchronously. Because of this updates to # the file must be atomic. Write plus move is used to achieve this. real_path = cpjoin(repository_path, 'user_file') tmp_path = cpjoin(repository_path, 'new_user_file') with open(tmp_path, 'w') as fd2: if session_token is None: fd2.write('') else: fd2.write(json.dumps({'session_token' : session_token, 'expires' : int(time.time()) + 30})) fd2.flush() os.rename(tmp_path, real_path)
0.013569
def clean_str(string): """Tokenization/string cleaning for all datasets except for SST. Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py """ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", r" \( ", string) string = re.sub(r"\)", r" \) ", string) string = re.sub(r"\?", r" \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower()
0.002457
def status(self): """ The current status of the event (started, finished or pending). """ myNow = timezone.localtime(timezone=self.tz) fromDt = getAwareDatetime(self.except_date, self.time_from, self.tz) daysDelta = dt.timedelta(days=self.num_days - 1) toDt = getAwareDatetime(self.except_date + daysDelta, self.time_to, self.tz) if toDt < myNow: return "finished" elif fromDt < myNow: return "started"
0.006024
def new_line(self, tokens, line_end, line_start): """a new line has been encountered, process it if necessary""" if _last_token_on_line_is(tokens, line_end, ";"): self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end)) line_num = tokens.start_line(line_start) line = tokens.line(line_start) if tokens.type(line_start) not in _JUNK_TOKENS: self._lines[line_num] = line.split("\n")[0] self.check_lines(line, line_num)
0.005906
def get_client(self, email=None, password=None, **__): """Get the google data client.""" if self.client is not None: return self.client return Auth(email, password)
0.01
def zrevrank(self, name, value): """ Returns the ranking in reverse order for the member :param name: str the name of the redis key :param member: str """ with self.pipe as pipe: return pipe.zrevrank(self.redis_key(name), self.valueparse.encode(value))
0.005714
def prior_rev(C, alpha=-1.0): r"""Prior counts for sampling of reversible transition matrices. Prior is defined as b_ij= alpha if i<=j b_ij=0 else The reversible prior adds -1 to the upper triagular part of the given count matrix. This prior respects the fact that for a reversible transition matrix the degrees of freedom correspond essentially to the upper, respectively the lower triangular part of the matrix. Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix alpha : float (optional) Value of prior counts Returns ------- B : (M, M) ndarray Matrix of prior counts """ ind = np.triu_indices(C.shape[0]) B = np.zeros(C.shape) B[ind] = alpha return B
0.001242
def render_children(self, block, view_name=None, context=None): """Render a block's children, returning a list of results. Each child of `block` will be rendered, just as :func:`render_child` does. Returns a list of values, each as provided by :func:`render`. """ results = [] for child_id in block.children: child = self.get_block(child_id) result = self.render_child(child, view_name, context) results.append(result) return results
0.005682
def get_default_values(self): """ Overridding to make updating the defaults after instantiation of the option parser possible, update_defaults() does the dirty work. """ if not self.process_default_values: # Old, pre-Optik 1.5 behaviour. return optparse.Values(self.defaults) defaults = self.update_defaults(self.defaults.copy()) # ours for option in self._get_all_options(): default = defaults.get(option.dest) if isinstance(default, basestring): opt_str = option.get_opt_string() defaults[option.dest] = option.check_value(opt_str, default) return optparse.Values(defaults)
0.002774
def no_witness(self): ''' Tx -> bytes ''' tx = bytes() tx += self.version tx += VarInt(len(self.tx_ins)).to_bytes() for tx_in in self.tx_ins: tx += tx_in.to_bytes() tx += VarInt(len(self.tx_outs)).to_bytes() for tx_out in self.tx_outs: tx += tx_out.to_bytes() tx += self.lock_time return bytes(tx)
0.00489
def _right_align(p_str): """ Returns p_str with content after <TAB> character aligned right. Right alignment is done using proper number of spaces calculated from 'line_width' attribute. """ to_fill = _columns() - len(escape_ansi(p_str)) if to_fill > 0: p_str = re.sub('\t', ' '*to_fill, p_str) else: p_str = re.sub('\t', ' ', p_str) return p_str
0.002494
def create_WCSname(wcsname): """ Verify that a valid WCSNAME has been provided, and if not, create a default WCSNAME based on current date. """ if util.is_blank(wcsname): ptime = fileutil.getDate() wcsname = "User_"+ptime return wcsname
0.00361
def next(self): ''' Return the next iteration by popping `chunk_size` from the left and appending `chunk_size` to the right if there's info on the file left to be read. ''' if self.__buffered is None: # Use floor division to force multiplier to an integer multiplier = self.__max_in_mem // self.__chunk_size self.__buffered = "" else: multiplier = 1 self.__buffered = self.__buffered[self.__chunk_size:] data = self.__file.read(self.__chunk_size * multiplier) # Data is a byte object in Python 3 # Decode it in order to append to self.__buffered str later # Use the salt util in case it's already a string (Windows) data = salt.utils.stringutils.to_str(data) if not data: self.__file.close() raise StopIteration self.__buffered += data return self.__buffered
0.002073
async def get_storage_list(self) -> List[Storage]: """Return information about connected storage devices.""" return [ Storage.make(**x) for x in await self.services["system"]["getStorageList"]({}) ]
0.00813
def from_fptr(cls, label, type_, fptr): """Return ``FSEntry`` object.""" return FSEntry( label=label, type=type_, path=fptr.path, use=fptr.use, file_uuid=fptr.file_uuid, derived_from=fptr.derived_from, checksum=fptr.checksum, checksumtype=fptr.checksumtype, )
0.005277
def convert(self, value, param, ctx): # pylint: disable=inconsistent-return-statements """Validate memory argument. Returns the memory value in megabytes.""" matches = MEMORY_RE.match(value.lower()) if matches is None: self.fail('%s is not a valid value for memory amount' % value, param, ctx) amount_str, unit = matches.groups() amount = int(amount_str) if unit in [None, 'm', 'mb']: # Assume the user intends gigabytes if they specify a number < 1024 if amount < 1024: return amount * 1024 else: if amount % 1024 != 0: self.fail('%s is not an integer that is divisable by 1024' % value, param, ctx) return amount elif unit in ['g', 'gb']: return amount * 1024
0.00591
def _get_digest(self, info): """ Get a digest from a dictionary by looking at keys of the form 'algo_digest'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5. """ result = None for algo in ('sha256', 'md5'): key = '%s_digest' % algo if key in info: result = (algo, info[key]) break return result
0.004237
def ncbi_blast(self, db="nr", megablast=True, sequence=None): """ perform an NCBI blast against the sequence of this feature """ import requests requests.defaults.max_retries = 4 assert sequence in (None, "cds", "mrna") seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence)) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', timeout=20, data=dict( PROGRAM="blastn", #EXPECT=2, DESCRIPTIONS=100, ALIGNMENTS=0, FILTER="L", # low complexity CMD="Put", MEGABLAST=True, DATABASE=db, QUERY=">%s\n%s" % (self.name, seq) ) ) if not ("RID =" in r.text and "RTOE" in r.text): print("no results", file=sys.stderr) raise StopIteration rid = r.text.split("RID = ")[1].split("\n")[0] import time time.sleep(4) print("checking...", file=sys.stderr) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", DESCRIPTIONS=100, DATABASE=db, CMD="Get", )) while "Status=WAITING" in r.text: print("checking...", file=sys.stderr) time.sleep(10) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", CMD="Get", )) for rec in _ncbi_parse(r.text): yield rec
0.007042
def create_devices(self, thing_names, config_file, region=None, cert_dir=None, append=False, account_id=None, policy_name='ggd-discovery-policy', profile_name=None): """ Using the `thing_names` values, creates Things in AWS IoT, attaches and downloads new keys & certs to the certificate directory, then records the created information in the local config file for inclusion in the Greengrass Group as Greengrass Devices. :param thing_names: the thing name or list of thing names to create and use as Greengrass Devices :param config_file: config file used to track the Greengrass Devices in the group :param region: the region in which to create the new devices. [default: us-west-2] :param cert_dir: the directory in which to store the thing's keys and certs. If `None` then use the current directory. :param append: append the created devices to the list of devices in the config file. [default: False] :param account_id: the account ID in which to create devices. If 'None' the config_file will be checked for an `account_id` value in the `misc` section. :param policy_name: the name of the policy to associate with the device. [default: 'ggd-discovery-policy'] :param profile_name: the name of the `awscli` profile to use. [default: None] """ logging.info("create_devices thing_names:{0}".format(thing_names)) config = GroupConfigFile(config_file=config_file) if append is False and config.is_device_fresh() is False: raise ValueError( "Config file tracking previously created devices. Append " "devices instead" ) if region is None: region = self._region if account_id is None: account_id = self._account_id devices = dict() if append: devices = config['devices'] if type(thing_names) is str: thing_names = [thing_names] iot_client = _get_iot_session(region=region, profile_name=profile_name) for thing_name in thing_names: keys_cert, thing = self.create_thing(thing_name, region, cert_dir) cert_arn = keys_cert['certificateArn'] devices[thing_name] = { 'thing_arn': thing['thingArn'], 'cert_arn': cert_arn, 'cert_id': keys_cert['certificateId'], 'thing_name': thing_name } logging.info("Thing:'{0}' associated with cert:'{1}'".format( thing_name, cert_arn)) device_policy = self.get_device_policy( device_name=thing_name, account_id=account_id, region=region ) self._create_attach_thing_policy(cert_arn, device_policy, iot_client, policy_name) config['devices'] = devices logging.info("create_devices cfg:{0}".format(config))
0.001593
def get_root_path(self, language): """ Get root path to pass to the LSP servers. This can be the current project path or the output of getcwd_or_home (except for Python, see below). """ path = None # Get path of the current project if self.main and self.main.projects: path = self.main.projects.get_active_project_path() # If there's no project, use the output of getcwd_or_home. if not path: # We can't use getcwd_or_home for Python because if it # returns home and you have a lot of Python files on it # then computing Rope completions takes a long time # and blocks the PyLS server. # Instead we use an empty directory inside our config one, # just like we did for Rope in Spyder 3. if language == 'python': path = get_conf_path('lsp_root_path') if not osp.exists(path): os.mkdir(path) else: path = getcwd_or_home() return path
0.001821
def get_cache_key(datatable_class, view=None, user=None, **kwargs): """ Returns a cache key unique to the current table, and (if available) the request user. The ``view`` argument should be the class reference itself, since it is easily obtainable in contexts where the instance is not available. """ datatable_name = datatable_class.__name__ if datatable_name.endswith('_Synthesized'): datatable_name = datatable_name[:-12] datatable_id = '%s.%s' % (datatable_class.__module__, datatable_name) if CACHE_KEY_HASH: datatable_id = _hash_key_component(datatable_id) cache_key = 'datatable_%s' % (datatable_id,) if view: if not inspect.isclass(view): # Reduce view to its class view = view.__class__ view_id = '%s.%s' % (view.__module__, view.__name__) if CACHE_KEY_HASH: view_id = _hash_key_component(view_id) cache_key += '__view_%s' % (view_id,) if user and user.is_authenticated(): cache_key += '__user_%s' % (user.pk,) # All other kwargs are used directly to create a hashed suffix # Order the kwargs by key name, then convert them to their repr() values. items = sorted(kwargs.items(), key=lambda item: item[0]) values = [] for k, v in items: values.append('%r:%r' % (k, v)) if values: kwargs_id = '__'.join(values) kwargs_id = _hash_key_component(kwargs_id) cache_key += '__kwargs_%s' % (kwargs_id,) log.debug("Cache key derived for %r: %r (from kwargs %r)", datatable_class, cache_key, values) return cache_key
0.002457
def create_geotiff(name, Array, driver, ndv, xsize, ysize, geot, projection, datatype, band=1): ''' Creates new geotiff from array ''' if isinstance(datatype, np.int) == False: if datatype.startswith('gdal.GDT_') == False: datatype = eval('gdal.GDT_'+datatype) newfilename = name+'.tif' # Set nans to the original No Data Value Array[np.isnan(Array)] = ndv # Set up the dataset DataSet = driver.Create(newfilename, xsize, ysize, 1, datatype) # the '1' is for band 1. DataSet.SetGeoTransform(geot) DataSet.SetProjection(projection.ExportToWkt()) # Write the array DataSet.GetRasterBand(band).WriteArray(Array) DataSet.GetRasterBand(band).SetNoDataValue(ndv) return newfilename
0.00527
def on_close(self): """ Called by the server when the App have to be terminated """ self._stop_update_flag = True for ws in self.websockets: ws.close()
0.010256
def plot(parameterized, fignum=None, ax=None, colors=None, figsize=(12, 6)): """ Plot latent space X in 1D: - if fig is given, create input_dim subplots in fig and plot in these - if ax is given plot input_dim 1D latent space plots of X into each `axis` - if neither fig nor ax is given create a figure with fignum and plot in there colors: colors of different latent space dimensions input_dim """ if ax is None: fig = pb.figure(num=fignum, figsize=figsize) if colors is None: from ..Tango import mediumList from itertools import cycle colors = cycle(mediumList) pb.clf() else: colors = iter(colors) lines = [] fills = [] bg_lines = [] means, variances = parameterized.mean.values, parameterized.variance.values x = np.arange(means.shape[0]) for i in range(means.shape[1]): if ax is None: a = fig.add_subplot(means.shape[1], 1, i + 1) elif isinstance(ax, (tuple, list)): a = ax[i] else: raise ValueError("Need one ax per latent dimension input_dim") bg_lines.append(a.plot(means, c='k', alpha=.3)) lines.extend(a.plot(x, means.T[i], c=next(colors), label=r"$\mathbf{{X_{{{}}}}}$".format(i))) fills.append(a.fill_between(x, means.T[i] - 2 * np.sqrt(variances.T[i]), means.T[i] + 2 * np.sqrt(variances.T[i]), facecolor=lines[-1].get_color(), alpha=.3)) a.legend(borderaxespad=0.) a.set_xlim(x.min(), x.max()) if i < means.shape[1] - 1: a.set_xticklabels('') pb.draw() a.figure.tight_layout(h_pad=.01) # , rect=(0, 0, 1, .95)) return dict(lines=lines, fills=fills, bg_lines=bg_lines)
0.003254
def skip_while(self, predicate): '''Omit elements from the start for which a predicate is True. Note: This method uses deferred execution. Args: predicate: A single argument predicate function. Returns: A Queryable over the sequence of elements beginning with the first element for which the predicate returns False. Raises: ValueError: If the Queryable is closed(). TypeError: If predicate is not callable. ''' if self.closed(): raise ValueError("Attempt to call take_while() on a " "closed Queryable.") if not is_callable(predicate): raise TypeError("skip_while() parameter predicate={0} is " "not callable".format(repr(predicate))) return self._create(itertools.dropwhile(predicate, self))
0.003297
def remove_binaries(package_dir=False): """Remove all binaries for the current platform Parameters ---------- package_dir: bool If True, remove all binaries from the `resources` directory of the qpsphere package. If False, remove all binaries from the user's cache directory. """ paths = [] if package_dir: pdir = RESCR_PATH else: pdir = CACHE_PATH for pp in pdir.iterdir(): if pp.name != "shipped_resources_go_here": paths.append(pp) for pp in paths: pp.unlink()
0.001739