query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Reopens bloomberg connection. Function is called when the 'Restart Bloomberg Connection' button from the pricer frame is clicked
def reOpenConnection(self): self.blptsAnalytics.closeSession() self.blptsAnalytics = None self.bbgstreamBIDEM.closeSubscription() self.bbgstreamBIDEM = None self.streamWatcherBID = None self.streamWatcherAnalytics = None self.blptsPriceOnly.closeSession() ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()", "def REBpowerup(self):\n #specific to REB1\n self.cabac_reset()\n\n self.load_sequencer()\n #sets the default sequencer clock states to 0\n self.fpga.send_function(0, fpga0.Funct...
[ "0.6018849", "0.5963336", "0.5805716", "0.5769722", "0.5753302", "0.5612181", "0.555544", "0.549498", "0.5492464", "0.54829496", "0.54354995", "0.5419979", "0.53779286", "0.5329917", "0.5328147", "0.5313881", "0.5282361", "0.52603227", "0.5255711", "0.5238575", "0.5236348", ...
0.7195131
0
Refreshes the swap rates. Function is called when the 'Refresh Rates' button from the pricer menu is clicked.
def refreshSwapRates(self): self.firstPass()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_refresh_button_cicked_(self):\n for slider in self.sliders:\n slider.refresh()", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n self._refresh_method()", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n...
[ "0.6369983", "0.6226615", "0.6226615", "0.6216588", "0.61472285", "0.61472285", "0.61472285", "0.61254907", "0.6106044", "0.608328", "0.6068694", "0.59617007", "0.59551466", "0.58658063", "0.58497584", "0.58342487", "0.58342487", "0.5820157", "0.5804136", "0.5759256", "0.5748...
0.77100295
0
Fill historical prices and ratings. Function is called when the pricer menu first launches.
def fillHistoricalPricesAndRating(self): time_start = time.time() self.buildPriceHistory() savepath = TEMPPATH + 'bondhistoryrating.csv' #If bondhistoryratingUAT.csv doesn't exist, download data and write file. cols = ['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_current_prices(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n eth_price = strategy.contract_status.get(\n \"priceprovider_get_latest_answer\", None\n )\n btc_price = strategy.contract_status.get(\n \"btcpriceprovider_get_latest_answer\...
[ "0.60714465", "0.5873897", "0.5567629", "0.5556391", "0.5556155", "0.54767716", "0.54149306", "0.5359882", "0.5354679", "0.5351185", "0.5349947", "0.5340057", "0.5323976", "0.52928245", "0.5278397", "0.52684623", "0.52457297", "0.5233734", "0.522753", "0.5223621", "0.5218184"...
0.6407883
0
Check if a test value is within permissive relative difference from refval. Returns a boolean.
def _isInAllowedRange( self, testval, refval, reltol=1.e-2 ): denom = refval if refval == 0: if testval == 0: return True else: denom = testval rdiff = (testval-refval)/denom del denom,testval,refval return (abs(rdiff) <= re...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def within_value(v1, v2):\n percentage = 0.1\n error_allowed = percentage * v1\n high = v1 + error_allowed\n low = v1 - error_allowed\n\n return low <= v2 <= high", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 ...
[ "0.6399123", "0.62219507", "0.60484093", "0.60059714", "0.59347", "0.59186506", "0.590363", "0.5893858", "0.58391315", "0.58225876", "0.58073807", "0.5764574", "0.5754584", "0.5754552", "0.5754552", "0.5742861", "0.5693243", "0.56759703", "0.5661897", "0.5655538", "0.5655186"...
0.76101834
0
Convert input to a list If input is None, this method simply returns None.
def _to_list( self, input ): import numpy listtypes = (list, tuple, numpy.ndarray) if input == None: return None elif type(input) in listtypes: return list(input) else: return [input]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _convert_to_list(self, input_argument):\n if type(input_argument) is not list:\n input_argument = [input_argument]\n return input_argument", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n ...
[ "0.7752635", "0.73784405", "0.7284458", "0.7166048", "0.7078194", "0.7020485", "0.6989503", "0.689384", "0.6810329", "0.676979", "0.67141014", "0.6616923", "0.6545607", "0.6530351", "0.6525318", "0.6469693", "0.64340585", "0.6415135", "0.6366174", "0.632168", "0.62872195", ...
0.89297014
0
check if FFT used in sinusoidal baselining properly handles flag info checking is done by comparing the baseline fitting results from two input data, defined as 'infile_spk' and 'infile_int'. 'infile_spk' has six spiky features in its spectra at ch 2,22,42,62,82,and 97 and channels around these spikes (namely, 04,2024,...
def testFlagFFT(self): mode = "list" infile_spk = self.infile_02spk outfile_spk = self.outroot+"_flagFFT_spk.asap" result = sdbaseline(infile=infile_spk,maskmode=mode,outfile=outfile_spk,blfunc='sinusoid',fftthresh='top3') infile_int = self.infile_02int outfile_int = self...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):\n length = length*divide\n #fs = sample rate, sound = multichannel sound signal\n try:\n fs1, sound = wavfile.read(filename)\n except ValueError:\n print(str(filename) + ' failed to process')\n return 'faile...
[ "0.6024711", "0.587461", "0.577335", "0.57703745", "0.5720756", "0.5682307", "0.5646616", "0.5549602", "0.5549602", "0.5549602", "0.55184007", "0.55042326", "0.54917777", "0.5479666", "0.54760754", "0.547309", "0.54530597", "0.54324853", "0.5427759", "0.542531", "0.5405512", ...
0.77132446
0
Tries to delete ``filename`` and ignores any error that is raised.
def safe_delete(self, filename): try: os.remove(filename) except OSError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def safe_delete(filename):\r\n try:\r\n os.unlink(filename)\r\n except OSError as e:\r\n if e.errno != errno.ENOENT:\r\n raise", "def _delete(filename):\n return os.remove(filename)", "def delete_file(filename):\n if os.path.isfile(filename):\n return os.remove(filename)", "def dele...
[ "0.8534043", "0.8300577", "0.82324356", "0.81359273", "0.8114149", "0.8065215", "0.7914509", "0.78162354", "0.78063285", "0.7795391", "0.77448577", "0.769363", "0.7657995", "0.7628127", "0.76126134", "0.75135005", "0.74589443", "0.7451056", "0.7368061", "0.73465884", "0.72446...
0.84430987
1
Generates the API documentation for all of the packages/modules/classes/functions. Sphinx doesn't automatically generate the documentation for the api. This calls sphinxapidoc which will create the API .rst files and dump them in the source directory. It is expected that one of the TOC directives calls out to the creat...
def generate_api_docs(self): if self.API_OUTPUT_DIR: args = [ # Put documentation for each module on its own page '-e', # don't create the "modules.rst" file (the table of contents # file) as this is already provided by the package's ma...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def write_api_docs(self, outdir):\r\n if not os.path.exists(outdir):\r\n os.mkdir(outdir)\r\n # compose list of modules\r\n ...
[ "0.8099668", "0.79043376", "0.75185376", "0.73260605", "0.7320983", "0.71930486", "0.7081947", "0.70498407", "0.69733393", "0.697248", "0.68476886", "0.6820365", "0.6783848", "0.6733729", "0.6585713", "0.657623", "0.6568642", "0.656522", "0.6563635", "0.6527915", "0.65139586"...
0.8630611
0
Attempts to clean all of the files found in ``self.FILES_TO_CLEAN``. Ignores all errors.
def try_clean(self): for f in self.FILES_TO_CLEAN: if not os.path.exists(f): continue if os.path.isdir(f): # don't care on error shutil.rmtree(f, onerror=lambda *x, **y: None) else: self.safe_delete(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_files()", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n #...
[ "0.757727", "0.74859875", "0.7146801", "0.7119696", "0.7087362", "0.7058787", "0.7051271", "0.7023708", "0.69623536", "0.6948941", "0.69370013", "0.6927504", "0.6906198", "0.679786", "0.6780564", "0.67707026", "0.67506415", "0.6735639", "0.6723798", "0.66588336", "0.6654853",...
0.85913986
0
Gathers all command line arguments and then builds the docs. This performs command line parsing and stores the known flags (those added with ``self.add_argument()``) into ``self.args`` and all leftover unknown args into ``self.argv`` (see
def build(self, argv=None): if argv is None: argv = sys.argv self.setup_default_arguments() self.args, self.argv = self.parser.parse_known_args(argv) if self.args.clean: self.try_clean() self.pre_build_hook() self.generate_documentation() ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_args(self):\n parser = argparse.ArgumentParser(description='Build PDF documentation')\n parser.add_argument('config', help='YAML config file')\n parser.add_argument('-f', '--fast', help='Do not update toc',\n action='store_true', default=False)\n par...
[ "0.69166946", "0.6655604", "0.65268964", "0.63026917", "0.6240775", "0.6204276", "0.616624", "0.6138024", "0.60846776", "0.60733485", "0.60671383", "0.604279", "0.6033722", "0.60330325", "0.60208917", "0.5987356", "0.598559", "0.59679663", "0.59497654", "0.5935443", "0.593300...
0.67095804
1
Read csv to list
def read_csv_to_list(csv_path): with open(csv_path, newline="") as f: reader = csv.reader(f) data = list(reader) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def read_file_to_l...
[ "0.7983896", "0.7966335", "0.782238", "0.7816782", "0.7676543", "0.7659375", "0.759841", "0.759841", "0.7496229", "0.74867773", "0.74808073", "0.74660474", "0.7411088", "0.7405299", "0.7381975", "0.73708904", "0.7361922", "0.73237014", "0.73150474", "0.7302132", "0.72732013",...
0.8154642
0
Saves labels to csv
def save_labels_to_disk(labels: list, label_path: str): with open(label_path, "w") as result_file: wr = csv.writer(result_file, dialect="excel") wr.writerows(labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def labels2csv(labels, csv_path):\n with open(csv_path, \"w\") as file:\n file.write(\"id,label\\n\")\n for i, label in enumerate(labels):\n file.write(\"{},{}\\n\".format(i, label))", "def write_csv_label(labels, csv_file):\n with open(csv_file, 'w') as f:\n writer = csv.wr...
[ "0.8146048", "0.79251695", "0.7332245", "0.70488673", "0.69873244", "0.6764667", "0.6763731", "0.67252517", "0.67002916", "0.6674826", "0.66690266", "0.66482043", "0.66482043", "0.66221464", "0.661912", "0.6589544", "0.6589139", "0.65413326", "0.6503416", "0.6473352", "0.6437...
0.80636126
1
Splits the train set into test images
def split_test_train(train_folder_path, train_labels, test_folder, n_test_images): os.makedirs(test_folder, exist_ok=True) data = read_csv_to_list(train_labels) # Prepare test labels and move images to new folder labels = [] for img in data[1:n_test_images]: # Input and new image paths ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_datasets(img_lst):\n num = len(img_lst)\n\n idx = np.random.permutation(num)\n train_lst = np.array(img_lst)[idx[:int(num * .8)]] # 80/20 split\n validation_lst = np.array(img_lst)[idx[int(num * .8):int(num * .9)]]\n test_lst = np.array(img_lst)[idx[int(num * .9):]]\n return train_lst...
[ "0.74955", "0.7494549", "0.7489278", "0.74131435", "0.73808104", "0.7314455", "0.7261903", "0.7215054", "0.7191714", "0.7166882", "0.71620023", "0.7145183", "0.71408004", "0.7076274", "0.7059303", "0.70584756", "0.70197546", "0.70191765", "0.69856054", "0.69731814", "0.696396...
0.7677867
0
Generate human readable tool test reports. Creates reports in various formats (HTML, text, markdown) from the structured test output (tool_test_output.json).
def cli(ctx, path, **kwds): if not os.path.exists(path): io.error("Failed to tool test json file at %s" % path) return 1 test_data = StructuredData(path) handle_reports(ctx, test_data.structured_data, kwds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(execution_type = \"Native\", qmetry_data = {}):\r\n try:\r\n prog = dellunit.TestProgram(LOGTIME, execution_type, qmetry_data)\r\n jsonfile = ('report_%s.json'%logtime if (prog.timestamp_report == 1) else 'report.json')\r\n htmlfile = ('report_%s.html'%logtime if (prog.timestamp_re...
[ "0.6395984", "0.63704044", "0.63287634", "0.6298978", "0.62303543", "0.62125254", "0.6170875", "0.6157971", "0.61302793", "0.6079714", "0.6063422", "0.60530716", "0.60408163", "0.60160196", "0.59748095", "0.5972561", "0.59694135", "0.59077525", "0.58958656", "0.58577126", "0....
0.6617242
0
Load a dataset into the Dataset object from the self.input_file
def _load_data(self): # This allows a simulated dataset to use the same constructor. if self.input_file is None: return logging.info(f"Loading data from file {self.input_file}") # Load the dataset. if os.path.isdir(self.input_file): self.data = get_matr...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\...
[ "0.70277363", "0.69790083", "0.6945343", "0.6936825", "0.6823342", "0.6752945", "0.6737796", "0.67372847", "0.6703015", "0.6688588", "0.66557074", "0.65531415", "0.6552921", "0.64937913", "0.64900523", "0.64452887", "0.64417726", "0.6441511", "0.6436873", "0.64123297", "0.640...
0.7132409
0
Trim the dataset for inference, choosing barcodes and genes to use. Sets the values of self.analyzed_barcode_inds, and self.empty_barcode_inds, which are used throughout training.
def _trim_dataset_for_analysis(self, low_UMI_count_cutoff: int = 30, num_transition_barcodes: Union[int, None] = 7000, gene_blacklist: List[int] = []): logging.info("Trimming dataset for inference.") ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim(self, trim_samples):\n n = len(self.timestamps)\n self.timestamps = self.timestamps[:n - trim_samples]\n self.labels = self.labels[:n - trim_samples]\n self.emg = [x[:n - trim_samples] for x in self.emg]\n ...
[ "0.5931867", "0.5751522", "0.56039447", "0.55723774", "0.5401302", "0.5370132", "0.5366945", "0.53542274", "0.5294477", "0.52587545", "0.5251565", "0.52389866", "0.52253807", "0.5208275", "0.51975644", "0.5188658", "0.514943", "0.51444215", "0.51317275", "0.51254904", "0.5110...
0.7511484
0
Estimate relevant priors, populating fields in the self.priors dict.
def _estimate_priors(self): # Estimate the log UMI count turning point between cells and 'empties'. self.priors['log_counts_crossover'] = \ np.mean(np.log1p([self.priors['cell_counts'], self.priors['empty_counts']])).item() # Estimate prior for the sca...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_priors(self, prior_U, method='inverse'):\n if self.Pchance is None:\n raise IOError(\"Set Pchance before calling this method\")\n\n # TODO -- Move this into Bayesian\n if prior_U < 0.:\n self.prior_U = np.product(self.candidates['P_c'])\n else:\n ...
[ "0.6760685", "0.6671473", "0.6519258", "0.6318768", "0.62852085", "0.62144035", "0.6048549", "0.60283566", "0.5965118", "0.59049845", "0.5749101", "0.56592935", "0.560779", "0.560301", "0.55439323", "0.5540224", "0.5506283", "0.54675066", "0.54665715", "0.5448176", "0.5443485...
0.75927603
0
Get the count matrix, trimmed if trimming has occurred.
def get_count_matrix(self) -> sp.csr.csr_matrix: if self.is_trimmed: # Return the count matrix for selected barcodes and genes. trimmed_bc_matrix = self.data['matrix'][self.analyzed_barcode_inds, :].tocsc() trimmed_matrix ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_matrix_empties(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.empty_barcode_inds,\n :].tocsc()\n ...
[ "0.7883857", "0.695453", "0.5581138", "0.5525962", "0.5487828", "0.54187196", "0.53689903", "0.5367731", "0.5279583", "0.52316165", "0.5052875", "0.5052875", "0.5050538", "0.5036658", "0.50318664", "0.498184", "0.49812087", "0.49420154", "0.49123126", "0.4906234", "0.4897239"...
0.7683302
1
Get the count matrix for empty drops, trimmed if trimming has occurred.
def get_count_matrix_empties(self) -> sp.csr.csr_matrix: if self.is_trimmed: # Return the count matrix for selected barcodes and genes. trimmed_bc_matrix = self.data['matrix'][self.empty_barcode_inds, :].tocsc() trimmed_ma...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_matrix(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.analyzed_barcode_inds,\n :].tocsc()\n tri...
[ "0.6858056", "0.657635", "0.6126679", "0.56833136", "0.55564344", "0.5550837", "0.553804", "0.54982156", "0.54872674", "0.5481251", "0.5453243", "0.5403643", "0.5371395", "0.53526986", "0.5337087", "0.53238875", "0.5268101", "0.5231699", "0.5193867", "0.51901627", "0.51793355...
0.7904562
0
Get the count matrix, trimming only genes, not barcodes.
def get_count_matrix_all_barcodes(self) -> sp.csr.csr_matrix: if self.is_trimmed: # Return the count matrix for selected barcodes and genes. trimmed_bc_matrix = self.data['matrix'].tocsc() trimmed_matrix = trimmed_bc_matrix[:, self.analyzed_gene_inds].tocsr() #...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_count_matrix(self) -> sp.csr.csr_matrix:\n\n if self.is_trimmed:\n\n # Return the count matrix for selected barcodes and genes.\n trimmed_bc_matrix = self.data['matrix'][self.analyzed_barcode_inds,\n :].tocsc()\n tri...
[ "0.7951889", "0.76719147", "0.65599716", "0.6110267", "0.6110267", "0.60370463", "0.6031134", "0.57888055", "0.5784263", "0.57638943", "0.56966496", "0.569527", "0.5555884", "0.555468", "0.55300796", "0.5523527", "0.5501671", "0.54715586", "0.5467564", "0.54530025", "0.544283...
0.80284107
0
Load a count matrix from an mtx directory from CellRanger's output.
def get_matrix_from_mtx(filedir: str) -> Dict[str, Union[sp.csr.csr_matrix, List[np.ndarray], np.ndarray]]: assert os.path.isdir(filedir), "The directory {filedir} i...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cellranger_counts(fname, genome=\"matrix\"):\n with tables.open_file(fname, \"r\") as f:\n try:\n group = f.get_node(f.root, genome)\n except tables.NoSuchNodeError:\n print(\"That genome does not exist in this file.\")\n return None\n gene_ids = getattr...
[ "0.5822121", "0.5723406", "0.55659884", "0.55165875", "0.5495147", "0.5473045", "0.5368484", "0.52734065", "0.5208827", "0.51845884", "0.5170246", "0.51477873", "0.50873554", "0.50840425", "0.50825506", "0.50770015", "0.5061036", "0.50164026", "0.5006129", "0.500322", "0.5000...
0.6061012
0
Load a count matrix from an h5 file from CellRanger's output. The file needs to be a _raw_gene_bc_matrices_h5.h5 file. This function returns a dictionary that includes the count matrix, the gene names (which correspond to columns of the count matrix), and the barcodes (which correspond to rows of the count matrix). Thi...
def get_matrix_from_h5(filename: str) -> Dict[str, Union[sp.csr.csr_matrix, List[np.ndarray], np.ndarray]]: # try: with tables.open_file(filename, 'r') as f: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_matrix_to_h5(output_file: str,\n gene_names: np.ndarray,\n barcodes: np.ndarray,\n inferred_count_matrix: sp.csc.csc_matrix,\n cell_barcode_inds: Union[np.ndarray, None] = None,\n ambient_express...
[ "0.6468086", "0.62510335", "0.56875813", "0.5545488", "0.5397619", "0.5346175", "0.5329193", "0.53139323", "0.5265356", "0.5219594", "0.5218226", "0.51863855", "0.5039143", "0.50300133", "0.5007558", "0.49996474", "0.49768156", "0.49536636", "0.49511558", "0.49158347", "0.491...
0.6791512
0
Write count matrix data to output HDF5 file using CellRanger format.
def write_matrix_to_h5(output_file: str, gene_names: np.ndarray, barcodes: np.ndarray, inferred_count_matrix: sp.csc.csc_matrix, cell_barcode_inds: Union[np.ndarray, None] = None, ambient_expression: Union...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n ...
[ "0.62956023", "0.5863224", "0.5714787", "0.56640995", "0.5645576", "0.5634674", "0.5626462", "0.55892056", "0.5532122", "0.5529419", "0.5496241", "0.5484333", "0.545105", "0.54503447", "0.54497975", "0.5441969", "0.5421193", "0.5383405", "0.53695124", "0.5364304", "0.5358697"...
0.67506576
0
Compute an estimate of reasonable priors on cell size and ambient size. Given a dataset (scipy.sparse.csr matrix of counts where rows are barcodes and columns are genes), and an expected cell count, compute an estimate of reasonable priors on cell size and ambient count size. This is done by a series of heuristics.
def get_d_priors_from_dataset(dataset: Dataset) -> Tuple[float, float]: # Count the total unique UMIs per barcode (summing after transforming). transformed_counts = \ np.array(dataset.transformation.transform(dataset.data['matrix'] [:, dataset.analyzed_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_cell_count_from_dataset(dataset: Dataset) -> int:\n\n # If it's a model that does not model empty droplets, the dataset is cells.\n # NOTE: this is overridden if --expected_cells is specified.\n if dataset.model_name == 'simple':\n return dataset.data['matrix'].shape[0]\n\n # Count ...
[ "0.620238", "0.54556507", "0.5310594", "0.5288271", "0.5102519", "0.509151", "0.509151", "0.5053118", "0.5047448", "0.504083", "0.50119156", "0.50027806", "0.5002605", "0.4951536", "0.4927784", "0.49217126", "0.49094176", "0.48926193", "0.4889214", "0.48789653", "0.48736045",...
0.57310665
1
Compute an estimate of number of real cells in a dataset. Given a Dataset, compute an estimate of the number of real cells. This is done CellRangerstyle, by taking barcode with total UMI count in the 99th percentile of the dataset, and then finding the number of barcodes that have greater than 0.9 that number of UMIs.
def estimate_cell_count_from_dataset(dataset: Dataset) -> int: # If it's a model that does not model empty droplets, the dataset is cells. # NOTE: this is overridden if --expected_cells is specified. if dataset.model_name == 'simple': return dataset.data['matrix'].shape[0] # Count number of UM...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_d_priors_from_dataset(dataset: Dataset) -> Tuple[float, float]:\n\n # Count the total unique UMIs per barcode (summing after transforming).\n transformed_counts = \\\n np.array(dataset.transformation.transform(dataset.data['matrix']\n [:, datase...
[ "0.665363", "0.6281301", "0.59919727", "0.59555984", "0.5892062", "0.5744019", "0.5660073", "0.56421137", "0.5569726", "0.5563326", "0.54325527", "0.5362565", "0.5356441", "0.53364575", "0.53349966", "0.5327423", "0.5314066", "0.5296762", "0.5288975", "0.5220886", "0.519564",...
0.83259344
0
Return this user's nickname. The nickname will be a unique, human readable identifier for this user with respect to this application. It will be an email address for some users, but not all.
def nickname(self): if (self.__email and self.__auth_domain and self.__email.endswith('@' + self.__auth_domain)): suffix_len = len(self.__auth_domain) + 1 return self.__email[:-suffix_len] else: return self.__email
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_nickname(self):\n return self._nick", "def nickname(self):\r\n if \"nickname\" in self.data:\r\n return self.data[\"nickname\"]\r\n return None", "def get_nickname_for_user(cls, user):\n return cls.get_account_for_user(user).nickname", "def mail_nickname(self):\n ...
[ "0.8245342", "0.82021475", "0.80784094", "0.7570145", "0.7570145", "0.7427743", "0.7356333", "0.7088285", "0.70873076", "0.6995919", "0.6968879", "0.6951663", "0.6922138", "0.6818108", "0.6752576", "0.6742666", "0.6720184", "0.66955596", "0.6691967", "0.6687338", "0.6678083",...
0.8227201
1
Return this user's auth domain.
def auth_domain(self): return self.__auth_domain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def domain(self):\n # type: (...) -> AuthDomain\n return self._domain", "def auth_domain(request):\n return request.registry.settings.get('h.auth_domain', request.domain)", "def get_domain(self):\n return self.domain", "def get_domain(self):\n return self._domain", "def get_d...
[ "0.8473398", "0.7908221", "0.7783108", "0.7752245", "0.77312803", "0.7607269", "0.7606187", "0.7574134", "0.7574134", "0.75533456", "0.7479255", "0.7479255", "0.7479255", "0.74182606", "0.7284481", "0.7259606", "0.7259606", "0.7225471", "0.7218564", "0.7094098", "0.7087787", ...
0.8618813
0
Computes the logout URL for this request and specified destination URL.
def create_logout_url(dest_url): req = user_service_pb.StringProto() resp = user_service_pb.StringProto() req.set_value(dest_url) try: apiproxy_stub_map.MakeSyncCall('user', 'CreateLogoutURL', req, resp) except apiproxy_errors.ApplicationError, e: if (e.application_error == user_service_pb.Use...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_logout_url(self, redirect_url=None):\n url = urllib_parse.urljoin(self.server_url, 'logout')\n if redirect_url:\n params = {self.logout_redirect_param_name: redirect_url}\n query = urllib_parse.urlencode(params)\n return ''.join([url, '?', query])\n ret...
[ "0.73377967", "0.6966632", "0.62754434", "0.62183374", "0.6205199", "0.61692834", "0.608848", "0.5966493", "0.5965179", "0.591183", "0.58448863", "0.58086485", "0.5677516", "0.56206053", "0.5591618", "0.55612814", "0.5513131", "0.5512236", "0.5501371", "0.54162616", "0.539065...
0.69746566
1
Get an instance to the sql connection object.
def get_sql_connection(self): return self.sql
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_conn(self):\n return self.get_connection(self.mssql_conn_id)", "def get_connection(cls):\n return cls.database.connection", "def _get_connection(self) -> Connection:\n # TODO(101) is there a problem with having just one db connection?\n # Will this cause bugs with failed com...
[ "0.73680174", "0.7256587", "0.69811875", "0.69683695", "0.69191766", "0.6882339", "0.6825282", "0.6809682", "0.6749888", "0.669564", "0.669564", "0.66404516", "0.66221774", "0.66088647", "0.660717", "0.65949684", "0.6583066", "0.6569411", "0.65676725", "0.656309", "0.65545565...
0.73205405
1
Initialize rpkirtr database tables. Three tables are created one that keeps track of the rpkirtr session, one that keeps track of the prefixes associated with each active rpkirtr session, and finally one for storing router key information.
def init_rpki_rtr_tables(self): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute(''' CREATE TABLE cache ( cache_id INTEGER PRIMARY KEY NOT NULL, host TEXT NOT NULL, port ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rib_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE rtr_cache (\n rtr_id INTEGER PRIMARY KEY NOT NULL,\n device TEXT NOT NULL,\n ...
[ "0.7012303", "0.6316942", "0.62167615", "0.6146707", "0.6076793", "0.60717803", "0.60717803", "0.606234", "0.60622776", "0.6019015", "0.59543115", "0.592308", "0.5921048", "0.5912806", "0.588549", "0.5882846", "0.5846708", "0.58415794", "0.5828501", "0.58272105", "0.57964", ...
0.81615967
0
Initialize RIB database tables. Two tables are created. One stores the rtr ID associated with a given device that is to be queried, while the second stores different route attributes gleaned from 'sh ip bgp' command.
def init_rib_tables(self): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute(''' CREATE TABLE rtr_cache ( rtr_id INTEGER PRIMARY KEY NOT NULL, device TEXT NOT NULL, rtrupdt ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rpki_rtr_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE cache (\n cache_id INTEGER PRIMARY KEY NOT NULL,\n host TEXT NOT NULL,\n ...
[ "0.67964107", "0.5831904", "0.578942", "0.5739625", "0.57393616", "0.5728153", "0.56974334", "0.5678885", "0.5677351", "0.56728756", "0.5611789", "0.5607463", "0.56032944", "0.5599525", "0.5599525", "0.5587869", "0.5570944", "0.5511227", "0.55088484", "0.55059016", "0.5493284...
0.8008855
0
Reset an existing rpkirtr session. Reset any existing rpkirtr session for the given host and port.
def reset_rpki_rtr_session(self, host, port): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute("DELETE FROM cache WHERE host = ? and port = ?", (host, port)) self.sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_session(self):\n if not self.is_open():\n return\n if self._active_result is not None:\n self._active_result.fetch_all()\n try:\n self.keep_open = self.protocol.send_reset(self.keep_open)\n except (InterfaceError, OperationalError) as err:\n ...
[ "0.6069027", "0.5886393", "0.5725652", "0.5698956", "0.5672209", "0.5669425", "0.56320226", "0.5576333", "0.5535342", "0.5509324", "0.5509324", "0.5509324", "0.5509324", "0.5490396", "0.5471192", "0.54675925", "0.54552346", "0.5447819", "0.5424177", "0.5382374", "0.5368454", ...
0.72867167
0
Reset a RIB query session. Delete any state corresponding to a RIB query that was issued for a particular router device.
def reset_rtr_rib_session(self, device): cur = self.sql.cursor() cur.execute("PRAGMA foreign_keys = on") cur.execute("DELETE FROM rtr_cache WHERE device = ?", (device, )) self.sql.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\r\n _debug('api.reset()')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", ...
[ "0.6432845", "0.6182595", "0.6182595", "0.6182595", "0.6182595", "0.6125877", "0.6085551", "0.60491025", "0.5917269", "0.5793116", "0.5774083", "0.57254076", "0.5672482", "0.5669636", "0.5629088", "0.5625022", "0.5625022", "0.56182265", "0.5617173", "0.56071633", "0.56038904"...
0.671066
0
Fetch rib information in conjunction with rpki information. Construct a database 'join' of the contents of currently available rpkirtr information with currently available RIB information. The join is constructed over matching prefix ranges; that is, for cases where the rpki rtr ROA covers the route prefix in the RIB.
def get_rpki_rib(self): cur = self.sql.cursor() cur.execute("SELECT DISTINCT host, port, device, idx, asn, prefix, prefixlen, " " max_prefixlen, status, pfx, pfxlen, pfxstr_min, pfxstr_max, " " nexthop, metric, locpref, weight, pathbutone, orig_asn, route_orig...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_rib_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE rtr_cache (\n rtr_id INTEGER PRIMARY KEY NOT NULL,\n device TEXT NOT NULL,\n ...
[ "0.54669535", "0.50892144", "0.50018066", "0.48029062", "0.47392702", "0.47358343", "0.46948", "0.46539682", "0.46516523", "0.46367228", "0.46123815", "0.4599465", "0.45961982", "0.45904016", "0.45855385", "0.45748854", "0.4573801", "0.4570454", "0.45699817", "0.4546437", "0....
0.73909044
0
Retry until f succeeds or an exception that isn't caused by EINTR occurs.
def until_not_interrupted(f, *args, **kw): while True: try: return f(*args, **kw) except (IOError, OSError) as e: if e.args[0] == errno.EINTR: continue raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _eintr_retry(func, *args):\n while True:\n try:\n return func(*args)\n except (OSError, select.error) as e:\n if e.args[0] != errno.EINTR:\n raise", "def _eintr_retry(func, *args):\n\twhile True:\n\t\ttry:\n\t\t\treturn func(*args)\n\t\texcept (OSError, s...
[ "0.7447151", "0.7388192", "0.679798", "0.6735481", "0.6682818", "0.64191544", "0.6375819", "0.62591887", "0.6195957", "0.60207826", "0.6017203", "0.5943427", "0.59385973", "0.5765465", "0.57129365", "0.5606272", "0.55996346", "0.5597502", "0.55897963", "0.55773646", "0.557329...
0.76732606
0
Evaluates the Stumpff function C(z) according to the Equation 3.53
def stump_C(z) : if z > 0 : return (1 - cos(sqrt(z)))/z elif z < 0 : return (cosh(sqrt(-z)) - 1)/(-z) else : return 0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stump_S(z) :\n\n if z > 0:\n sz = sqrt(z) \n return (sz - sin(sz))/pow(sz,3)\n elif z < 0 :\n s_z = sqrt(-z) \n # According to the equation the denominatori is pow(sqrt(z),3)\n return (sinh(s_z) - s_z)/pow(s_z,3)\n else :\n return 0.1666666666666666", "def ...
[ "0.6817996", "0.6719775", "0.64250463", "0.63308775", "0.6235414", "0.61715096", "0.6168971", "0.61512005", "0.6137466", "0.61325085", "0.61201406", "0.60167384", "0.60125196", "0.59461224", "0.593396", "0.5912131", "0.59098804", "0.5876366", "0.5876366", "0.5868159", "0.5868...
0.70650136
0
Evaluates the Stumpff function S(z) according to the Equation 3.52
def stump_S(z) : if z > 0: sz = sqrt(z) return (sz - sin(sz))/pow(sz,3) elif z < 0 : s_z = sqrt(-z) # According to the equation the denominatori is pow(sqrt(z),3) return (sinh(s_z) - s_z)/pow(s_z,3) else : return 0.1666666666666666
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SFR(self, z):\n sfr = (0.017 + 0.13 * z)/(1 + np.power(z/3.3, 5.3))\n return sfr", "def st(self, sigma, z):\n \n aa = self.params[0]\n a = self.params[1]\n p = self.params[2]\n delta_c = self.params[3]\n \n return aa * sqrt(2.e0*a/pi) * (delta_c/sigma) * exp( (-a*delta_c*de...
[ "0.6983814", "0.62845415", "0.6270047", "0.6215237", "0.60620826", "0.60620826", "0.5966971", "0.59611017", "0.58421904", "0.58372325", "0.5802116", "0.5789399", "0.57810855", "0.57647526", "0.5738204", "0.56974256", "0.5696346", "0.56717175", "0.56093794", "0.55684173", "0.5...
0.7471584
0
Send a notification to systemd. state is a string; see
def sd_notify(state, logger, unset_environment=False): addr = os.environ.get('NOTIFY_SOCKET') if addr is None: # not run in a service, just a noop return try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM | socket.SOCK_CLOEXEC) if addr[0] == '@': addr =...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(cls, state):\r\n return PlatformMessage(method=\"__reply__\", kwargs={\"state\": state})", "def notify(self, path, state):\n pass", "def set_state(self, state: bool) -> None:\n payload = self._cfg.state_power_on if state else self._cfg.state_power_off\n command = f\"{COMM...
[ "0.71543527", "0.6743333", "0.62118864", "0.58737814", "0.58208144", "0.57788986", "0.57196826", "0.5703359", "0.5696581", "0.56706965", "0.5624191", "0.5618526", "0.5584987", "0.5573422", "0.5566332", "0.55626523", "0.5525932", "0.5525822", "0.55131394", "0.54965544", "0.543...
0.74761426
0
Return the layout for a popup window. It consists of a title bar showing the `title` text, and a body layout. The window is surrounded by borders.
def create_popup_window(title, body): assert isinstance(title, six.text_type) assert isinstance(body, Container) return HSplit([ VSplit([ Window(width=D.exact(1), height=D.exact(1), content=FillControl(BORDER.TOP_LEFT, token=Token.Window.Border)), TokenLis...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def window(*args, width: int = 200, height: int = 200, autosize: bool = False,\n no_resize: bool = False, no_title_bar: bool = False, no_move: bool = False, no_scrollbar: bool = False,\n no_collapse: bool = False, horizontal_scrollbar: bool = False, no_focus_on_appearing: bool = False,\n ...
[ "0.5706642", "0.5676141", "0.556403", "0.549403", "0.5490062", "0.5477046", "0.5474232", "0.5458338", "0.5445988", "0.54405284", "0.5437478", "0.53826696", "0.5343365", "0.53356165", "0.5334864", "0.5316925", "0.52934974", "0.52924705", "0.52872044", "0.5278744", "0.5267551",...
0.750733
0
Create an `Application` for the history screen. This has to be run as a sub application of `python_input`. When this application runs and returns, it retuns the selected lines.
def create_history_application(python_input, original_document): history_mapping = HistoryMapping(python_input.history, original_document) def default_buffer_pos_changed(): """ When the cursor changes in the default buffer. Synchronize with history buffer. """ # Only when this buffer ha...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history(command):\n namespace = app.main(command)\n assert namespace.command == 'h' or namespace.command == \"history\"", "def history_main(args=None, stdin=None):\n hist = builtins.__xonsh_history__\n ns = _hist_parse_args(args)\n if ns:\n _HIST_MAIN_ACTIONS[ns.action](ns, hist)", "d...
[ "0.6645085", "0.6400172", "0.63021773", "0.6147445", "0.6069044", "0.600494", "0.5942434", "0.57836646", "0.5758709", "0.5725173", "0.56594366", "0.5636762", "0.5608617", "0.5605935", "0.5590616", "0.557176", "0.55626994", "0.5554046", "0.55293834", "0.5527644", "0.54987985",...
0.7123608
0
When the cursor changes in the default buffer. Synchronize with history buffer.
def default_buffer_pos_changed(): # Only when this buffer has the focus. if buffer_mapping.focus_stack[-1] == DEFAULT_BUFFER: try: line_no = default_buffer.document.cursor_position_row - \ history_mapping.result_line_offset if line_no < 0:...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def history_buffer_pos_changed():\n # Only when this buffer has the focus.\n if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER:\n line_no = history_buffer.document.cursor_position_row\n\n if line_no in history_mapping.selected_lines:\n default_lineno = sorted(hi...
[ "0.7898416", "0.68018246", "0.62982154", "0.6297645", "0.6279367", "0.6243511", "0.6192357", "0.6175505", "0.61451435", "0.6135683", "0.6130657", "0.6118235", "0.61064404", "0.6103579", "0.606284", "0.60550404", "0.59968597", "0.59947914", "0.5979873", "0.5979297", "0.5941482...
0.7823026
1
When the cursor changes in the history buffer. Synchronize.
def history_buffer_pos_changed(): # Only when this buffer has the focus. if buffer_mapping.focus_stack[-1] == HISTORY_BUFFER: line_no = history_buffer.document.cursor_position_row if line_no in history_mapping.selected_lines: default_lineno = sorted(history_mappi...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot_history_changed(self, history, _dummy):\r\n pass", "def slot_history_changed(self, _sender, _data):\r\n last_candle = self.history.last_candle()\r\n if last_candle:\r\n self.client.history_last_candle = last_candle.tim", "def slot_history_changed(self, _sender, _data):\...
[ "0.71576846", "0.6885786", "0.68556553", "0.66171944", "0.6570386", "0.6502582", "0.6494127", "0.6404436", "0.63687605", "0.63419145", "0.63144755", "0.62715435", "0.62354267", "0.6198887", "0.6184626", "0.61757636", "0.607954", "0.6064025", "0.6047204", "0.598927", "0.598683...
0.73001057
0
Update the fuel level.
def update_fuel_level(self, new_level): if new_level <= self.fuel_capacity: self.fuel_level = new_level else: print("The tank can't hold that much!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_fuel_level(self, new_level):\r\n if new_level <= self.fuel_capacity:\r\n self.fuel_level = new_level\r\n else:\r\n print(\"The tank can't hold that much!\")", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def upgrage_level(self):\n ...
[ "0.8415923", "0.6688058", "0.66838443", "0.6623987", "0.6615976", "0.6509965", "0.6509965", "0.6485182", "0.64336383", "0.63165337", "0.6311251", "0.6284206", "0.62757164", "0.62296623", "0.6193649", "0.6104646", "0.6078069", "0.60302275", "0.60189354", "0.59883904", "0.59678...
0.84342533
0
Add fuel to the tank.
def add_fuel(self, amount): if (self.fuel_level + amount <= self.fuel_capacity): self.fuel_level += amount print("Added fuel.") else: print("The tank won't hold that much.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_fuel(self, amount):\n if (self.fuel_level + amount <= self.fuel_capacity):\n self.fuel_level += amount\n print(\"Added fuel to \"+ self.make + \".\")\n else:\n print(\"The tank won't hold that much.\")", "def fill_tank(self):\r\n self.fuel_level = sel...
[ "0.78126687", "0.70024645", "0.6612156", "0.6612156", "0.65950745", "0.6408894", "0.62886226", "0.6250818", "0.6184884", "0.6095201", "0.6018097", "0.60175323", "0.59844637", "0.5947288", "0.5943111", "0.5851397", "0.5788474", "0.5713844", "0.56987673", "0.56959385", "0.56133...
0.7887067
0
Fully charge the vehicle.
def charge(self): self.battery.charge_level = 100 print("The vehicle is fully charged.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recharge(self):\n self.battery=self.full\n return self", "def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")", "def charge(self):\r\n\r\n self.charge_level = 100\r\n print(\"The battery is fully charged.\")", "def updat...
[ "0.71078575", "0.6817597", "0.6817597", "0.62536246", "0.6069697", "0.6024664", "0.5913083", "0.59018713", "0.5884432", "0.5862029", "0.58491033", "0.58241874", "0.5808281", "0.57654655", "0.57301015", "0.56985885", "0.5672971", "0.56674844", "0.56599915", "0.5631653", "0.563...
0.7852408
0
find nearest word from target that satisfy condition
def word_nearest(word_list, target, condition = None, consider_phase = True): if not condition: condition = lambda t: True min_distance = 100 min_word = None def word_distance(word1, word2): position1 = word1.position position2 = word2.position distance = ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_word_to(word, some_words):\n closest = ''\n distance = len(word)\n for target in some_words:\n this_distance = len(set(target) - set(word))\n if this_distance < distance:\n distance = this_distance\n closest = target\n return closest", "def get_closest(...
[ "0.73988307", "0.7295452", "0.727856", "0.6958168", "0.69574374", "0.66173923", "0.6592807", "0.65599316", "0.650897", "0.6483398", "0.64312065", "0.6338591", "0.6320641", "0.62936354", "0.62920094", "0.623869", "0.6105924", "0.610165", "0.6100946", "0.60979575", "0.60804534"...
0.8085685
0
Update keyword_mini in keyword_list
def update_keyword_pack(keyword_list, keyword_mini): #print (keyword_mini) keyword_pack = Get(keyword_mini, -1) def update_weight(prop_tuple, weight, pack=keyword_pack): if prop_tuple in pack: pack[prop_tuple] += weight else: prop, sub_prop, prop_type = prop_tupl...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Adjust_Keyword_List( self ):\r\n listing = list( self.system.Get_Term_List( ) ) #get the term list of the current profile\r\n\r\n d=ExpressionAdjust.ExpressionAdjuster( self.root, listing, 'Keywords' )\r\n if(d.return_state==0):\r\n return #Cancel hit\r\n self.system.Se...
[ "0.7004033", "0.6163652", "0.604905", "0.5958035", "0.58509445", "0.58496016", "0.5842247", "0.57948285", "0.5778456", "0.5731634", "0.5702554", "0.5648011", "0.56097215", "0.56031096", "0.55414486", "0.5538705", "0.5530985", "0.55123645", "0.54711574", "0.54400563", "0.54375...
0.7432671
0
List of property in specific unit input
def property_list_of_specific_unit(data_list, unit, counter= None, show = False): if not isinstance(counter, Counter): counter = Counter() total_list = check_from_specific_unit(data_list, unit, show) #print ('t', total_list) #prop_list = [data['Property'] for data in total_list] pro...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_property_list(self,filtr):\n\n\n return self.dp.get_property_list(filtr)", "def test_list_properties(self):\n pass", "def get_properties():", "def getPropertiesAll():", "def getListOfUnits(self, *args):\n return _libsbml.UnitDefinition_getListOfUnits(self, *args)", "def get_u...
[ "0.63807905", "0.62724996", "0.6171355", "0.6048868", "0.60106623", "0.5979617", "0.58818513", "0.58047694", "0.5754058", "0.5747146", "0.57451415", "0.5701677", "0.56990063", "0.5683549", "0.56530946", "0.5640377", "0.5630344", "0.5627606", "0.560844", "0.559831", "0.5588633...
0.6809606
0
returns a function from input to sensitivity analysis heatmap takes in additional keys for "input" and "idx"
def customizable_sensitivity_analysis_fn(input_name, logit_name, network, handlers, inputs, outputs=None, ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sensitivity_analysis_fn(input_name,\n logit_name,\n network,\n handlers,\n inputs=None,\n **kwargs):\n handlers = [\n SensitivityAnalysisOutput(idx_input_key=\"idx\",...
[ "0.56896526", "0.5655239", "0.5604212", "0.5551312", "0.5425711", "0.53594214", "0.5334569", "0.5329085", "0.52712095", "0.52428246", "0.5231885", "0.5228857", "0.52231294", "0.52001", "0.51708233", "0.51637757", "0.5150478", "0.51333094", "0.51168174", "0.5115549", "0.510408...
0.5841512
0
Create a line graph of the rate over time for flow 1 and 2.
def plot(self): clf() # Plot rate for flow 1 x = [] y = [] i = 0 maxY = None while i < self.max_time: bytes = 0 # loop through array of data and find relevant data for (t,sequence,size) in self.data1: if (t >= i - 1) and (t <= i): bytes += size # compute interval left = i - 1 i...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_rates(self, title, ymax=None, linewidth=1):\n line_generator = self.line_gen()\n for flowID, rate_points in self.flow_rates.items():\n times = [point[0] for point in rate_points]\n rates = [point[1] for point in rate_points]\n if flowID is not None:\n ...
[ "0.6258836", "0.62256557", "0.6003119", "0.5807549", "0.5559554", "0.5534315", "0.55321395", "0.5495035", "0.5476266", "0.5467404", "0.5456977", "0.5430374", "0.5430327", "0.5418864", "0.53900796", "0.53784734", "0.53725505", "0.53439164", "0.53158927", "0.531341", "0.5303742...
0.7296539
0
Cleanup Xcode cache and derived data
def cache_clean(): run(cmd="rm -rf ~/Library/Developer/Xcode/DerivedData/*")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def _clean_up(self):", "def cleanup():", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def c...
[ "0.7105218", "0.69078654", "0.68643934", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.6836506", "0.68172604", "0.68070483", "0.6770995", "0.6770995", "0.6767389", "0.6767389", "0.6767389",...
0.8461176
0
removes the columns with missing values below a certain threshold
def remove_columns_missing_values(df, min_threshold): for col in df.columns: rate = sum(df[col].notnull())/float(len(df)) * 100 if rate <= min_threshold: df = df.drop(col,1) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_high_nan(df, threshold=0.5):\n n_nans = df.isnull().sum()\n freq_nans = n_nans/float(len(df)) #in percentage\n to_drop = (freq_nans > threshold).values\n columns_drop = df.columns.values[to_drop].tolist()\n return df.drop(columns_drop, axis=1)", "def filterMissings(self, threshold, data)...
[ "0.8112062", "0.76812786", "0.76032734", "0.757241", "0.75673884", "0.73716843", "0.7303441", "0.7258961", "0.7185174", "0.6898564", "0.67759484", "0.67757314", "0.6744774", "0.67218643", "0.6713158", "0.66977847", "0.6672517", "0.66387624", "0.65781564", "0.6426446", "0.6406...
0.8181951
0
Wrap function pointers in C/C++ to Python functions.
def cython_c2py_conv_function_pointer(t_, ts): t = t_[1] argnames = [] argdecls = [] argbodys = [] argrtns = [] for n, argt in t[1][2]: argnames.append(n) decl, body, rtn = ts.cython_py2c(n, argt, proxy_name="c_" + n) argdecls += decl.s...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_c_function_stubs(self):\n fn =\\\n\"\"\"{rettype} {fnname}({args}){{\n {rettype} ret;\n\n ret = {cast_and_deref}___madz_LANG_python_OUTPUT.{nodename}({argnames});\n\n return ret;\n}}\n\n\"\"\"\n fn_no_return =\\\n\"\"\"{rettype} {fnname}({args}){{\n ___madz_LANG_python_OUTPUT.{no...
[ "0.6707142", "0.62710714", "0.61361706", "0.6121755", "0.6089381", "0.5960903", "0.5935476", "0.592127", "0.5906617", "0.5896414", "0.5892851", "0.5890732", "0.5884935", "0.58657503", "0.5816589", "0.58025694", "0.57851225", "0.57759845", "0.57509685", "0.57376343", "0.566366...
0.7016661
0
Given batch of logits, return onehot sample using epsilon greedy strategy (based on given epsilon)
def onehot_from_logits(logits, eps=0.0): # get best (according to current policy) actions in one-hot form argmax_acs = (logits == logits.max(1, keepdim=True)[0]).float() if eps == 0.0: return argmax_acs # get random actions in one-hot form rand_acs = torch.tensor(torch.eye(logits.shape[1])[[...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onehot(rating):\n vec = [0 for i in range(5)]\n vec[rating - 1] = 1\n return np.array(vec)", "def gumbel_softmax_sample(logits, temperature):\n y = logits + sample_gumbel(tf.shape(logits))\n return tf.nn.softmax( y / temperature)", "def gumbel_softmax_sample(logits, temperature):\n y = logits +...
[ "0.6337137", "0.6137332", "0.6137332", "0.60971755", "0.6095149", "0.60834587", "0.6070503", "0.6048451", "0.6005317", "0.59944475", "0.59887624", "0.5985697", "0.5955396", "0.595437", "0.59288865", "0.5925413", "0.59172916", "0.591372", "0.59115016", "0.591121", "0.59053683"...
0.6582483
0
Perform DDPG soft update (move target params toward source based on weight factor tau)
def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def soft_update(source_net, target_net, tau):\n for target_param, param in zip(target_net.parameters(), source_net.parameters()):\n target_param.data.copy_(\n target_param.data * (1.0 - tau) + param.data * tau\n )", "def get_soft_target_model_updates(target, source, tau):\n target_...
[ "0.7275282", "0.69240314", "0.6855993", "0.67452204", "0.67254907", "0.67084914", "0.66630906", "0.6627795", "0.6586101", "0.6532605", "0.65074867", "0.64917654", "0.63443524", "0.63400596", "0.6314455", "0.6290518", "0.62819207", "0.6264888", "0.6256692", "0.6256692", "0.622...
0.7679478
0
Check if n_samples samples can be sampled from the buffer.
def can_sample(self, n_samples): return len(self) >= n_samples
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_sample(self, n_samples):\n return self.replay_buffer.can_sample(n_samples)", "def can_sample(self, batch_size):\n return batch_size + 1 <= self.num_in_buffer", "def has_nsamples(results, n):\n n_rates = results.growth_rates.sample_id.nunique()\n n_exchanges = results.exchanges.sampl...
[ "0.8454722", "0.75795233", "0.640667", "0.6323213", "0.6323213", "0.6323213", "0.6122864", "0.6073661", "0.6004948", "0.5970521", "0.5948469", "0.5939403", "0.58542854", "0.5847299", "0.5821416", "0.57972753", "0.578739", "0.5759048", "0.5714147", "0.5691538", "0.5649599", ...
0.8536972
0
Check whether the replay buffer is full or not.
def is_full(self): return len(self) == self.buffer_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bufferIsFull(self):\n return len(self.buffer) == self.bufferSize", "def isFull(self):\n return self.__size == len(self.__buffer)", "def full(self):\n return len(self.future_buffer) == self.CAPACITY", "def isFull(self):\r\n if (len(self.queue) == self.maxlen):\r\n re...
[ "0.77881503", "0.74171346", "0.7156626", "0.7121728", "0.7075783", "0.7043759", "0.69657725", "0.69448876", "0.69159514", "0.69134", "0.6893223", "0.680426", "0.6794952", "0.6730122", "0.6697695", "0.66528744", "0.6636179", "0.66332513", "0.6545874", "0.6479786", "0.6479786",...
0.7575566
1
Method to get the main event in a list of event position in a sentence. When there is a tie, the first one is chosen. Actually 'cause event only account for one token, we could use leaf_treeposition to get the distance from root to that leaf.
def get_highest_event (self, list_of_event_pos): highest = None highest_distance = 100 part_of_speech_list = self.tree.pos() for i in xrange(len(list_of_event_pos)): event_pos = list_of_event_pos[i] try: distance = len(self.tree.leaf_treep...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEventLocation():\n global currentToken\n global currentChunk\n global currentSentence\n if currentSentence is not None:\n if currentToken is not None and currentToken.isAdjToken(): #if not currentChunk\n position = currentToken.position\n logger.debug(\"Event positio...
[ "0.64501244", "0.6028287", "0.58297163", "0.5478848", "0.54352885", "0.5413929", "0.540867", "0.5355675", "0.5355121", "0.5337441", "0.53225654", "0.5274684", "0.52629817", "0.5251305", "0.5098605", "0.5098509", "0.5095546", "0.5054653", "0.5035906", "0.50138444", "0.49922046...
0.6938581
0
Get the path in the syntactic tree between two extends. The particular purpose of the method in the task is to find the minimum tree that connects between two events, removing the POS and LEMMA of single token entity, removing internal structure of multiple token entity (consider the multiple token entity as one node i...
def get_pruned_tree_path (self, index_1_beg, index_1_end, index_2_beg, index_2_end, in_between_children = False ): tempo_2_beg = index_2_beg tempo_2_end = index_2_end if index_1_beg >= index_2_end: index_2_beg = index_1_beg index_2_end = index_1...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shortest_path(start, end):\n\troot1=node(start)\n\troot2=node(end)\n\tarr1=[[] for wqw in range(8)]\n\tarr2=[[] for qwq in range(8)]\n\tser1=[[] for lp in range(100000)]\n\tser2=[[] for lp in range(100000)]\n\tarr1[0].append(root1)\n\tarr2[0].append(root2)\n\tser1[(hash(start))%100000].append(start)\n\tser2[(h...
[ "0.5908375", "0.58959204", "0.5705175", "0.5621129", "0.5587853", "0.5524062", "0.55239725", "0.54976195", "0.5487586", "0.54690427", "0.54500455", "0.54114205", "0.5408605", "0.53920585", "0.5367605", "0.53577787", "0.5334908", "0.53292596", "0.5320974", "0.5301725", "0.5288...
0.6095896
0
Prune the tree that include the begin_index and the end_index so that it doesn't include leaves outside of the range limited by begin_index and end_index
def prune_tree( cls, tree, begin_index, end_index ): begin_path = tree.leaf_treeposition(begin_index) end_path = tree.leaf_treeposition(end_index) current_node = tree[begin_path[:-1]] end_node = tree[end_path[:-1]] new_tree = ParentedTree('(' + tree.node + ')')...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtask_prune(tree):\n\n for st in tree.subtrees():\n if \"start\" in st.label():\n new_label = st.label().split(\"start\")[0] + \"start\"\n st.set_label(new_label)\n if \"end\" in st.label():\n new_label = st.label().split(\"end\")[0] + \"end\"\n st...
[ "0.6238034", "0.6027081", "0.5997571", "0.5995651", "0.59796435", "0.5975343", "0.5934116", "0.56413025", "0.5607906", "0.5580411", "0.55378866", "0.5519736", "0.5518996", "0.5501341", "0.5469353", "0.54643464", "0.54435647", "0.5434015", "0.5430153", "0.5422087", "0.5415858"...
0.76061183
0
Genarmos el estado compuesto por distGhost = distancia al fantasma mas cercano // 0 > cerca (menos de 3 unidades), 1 > medio (entre 3 y 7), 2 > lejos (mas de 7 unidades) isParedEast = (0 > no hay pared en la Direccion este, 1 > hay pared) isParedWest isParedNorth isParedShouth directionGhost = direccion en la que se en...
def generateState(self, gameState): state = [None, None, None, None, None, None] #Calculamos la distancia al fantasma mas cercano distGhosts = gameState.data.ghostDistances nearest = 100000 for i in distGhosts: if i < nearest and i is not None: neares...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formation_dir(self, id):\n player = self.players[id]\n min_dist = 2\n\n if abs(player.pos.x - FORM[self.formation][self.dir][id]['coord'].x) <= min_dist and abs(player.pos.y - FORM[self.formation][self.dir][id]['coord'].y) <= min_dist:\n player.walk_count = 0\n return...
[ "0.6291606", "0.6226369", "0.6037247", "0.58772886", "0.5799525", "0.57812417", "0.5745552", "0.5744289", "0.5727976", "0.5722428", "0.56805277", "0.56567246", "0.5634245", "0.55896896", "0.5572104", "0.55714655", "0.55670816", "0.55526555", "0.5545422", "0.5538702", "0.55377...
0.625722
1
Syncs all incident workflows daily.
def daily_sync_workflow(db_session: SessionLocal, project: Project): workflow_plugin = plugin_service.get_active_instance( db_session=db_session, project_id=project.id, plugin_type="workflow" ) if not workflow_plugin: log.warning(f"No workflow plugin is enabled. ProjectId: {project.id}") ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_entries():\n import time\n\n while True:\n try:\n update_pending_scripts(settings['api_handler'])\n except:\n logging.exception(\"Error occured during synchronisation\")\n time.sleep(60)", "def sync(self):\n self._start_slow_sync()\n self._a...
[ "0.57603675", "0.57562184", "0.57505476", "0.55784416", "0.5569469", "0.55263174", "0.54931366", "0.5471725", "0.5429856", "0.54000676", "0.5378067", "0.53648674", "0.5315507", "0.52774256", "0.5255891", "0.52555865", "0.5242909", "0.5198132", "0.5193238", "0.51775247", "0.51...
0.7605369
0
Method to launch the camera to capture a new users image
def launch_webcam(self): global face_encoding # Call the image_import.add_user method which launches the camera and # returns the face encodings if a new picture is taken face_encoding = image_import.add_user() # Check if a new image was returned from the add_user method ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_picture(self):\n self.drone.take_picture()", "def take_picture(self):\n self.drone.take_picture()", "def capture(self):\n current_time=time.strftime('%Y%m%d-%H%M%S')\n self.filepath=f\"files/{current_time}.png\"\n self.ids.camera.export_to_png(self.filepath)\n ...
[ "0.70523137", "0.70523137", "0.70149106", "0.69485945", "0.6907842", "0.6902539", "0.6862673", "0.68337494", "0.6800053", "0.6798291", "0.6715196", "0.6605783", "0.6555237", "0.6545424", "0.65297544", "0.65204567", "0.6518754", "0.6515041", "0.6500687", "0.6491813", "0.648151...
0.73050624
0
Expand config file path and switch default path if OS is windows.
def expand_config_path(path): if path == DEFAULT_LINUX_PATH and os.name == "nt": path = DEFAULT_WINDOWS_PATH return os.path.expanduser(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setdefault_path(envvar, default):\n if envvar in os.environ:\n return _strip_slash(os.environ[envvar])\n value = os.environ[envvar] = _strip_slash(default() if callable(default) else default)\n return value", "def _get_config_path():\n return os.path.join(os.path.expanduser...
[ "0.62735844", "0.62659293", "0.6136765", "0.61052674", "0.6024256", "0.60104406", "0.5977486", "0.59363705", "0.59358454", "0.5918923", "0.5909648", "0.58821416", "0.5881801", "0.58483577", "0.5808059", "0.58060867", "0.57793844", "0.5775549", "0.5765239", "0.571692", "0.5692...
0.82842994
0
Create a instance of proxmox API.
def connection_proxmox(config): return ProxmoxAPI( config["host"], user=config["user"], password=config["password"], verify_ssl=config["verify_ssl"], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwargs):\n\n builder_kwargs = {}\n\n if \"token\" in kwargs and str(kwargs[\"token\"]) != \"None\":\n\n # If there is a token use it along with the specified proxy details if specified\n config = ApiConfiguration(\n api_url=kwargs.get(\"api_ur...
[ "0.62811315", "0.6246885", "0.6237589", "0.6237589", "0.6151815", "0.61117625", "0.60022", "0.595346", "0.5949549", "0.5943282", "0.590346", "0.5862528", "0.58615875", "0.58429396", "0.5837407", "0.5835226", "0.5828535", "0.5819076", "0.5808679", "0.5795841", "0.5793958", "...
0.694525
0
List resources by pools
def list_resources(px, pools): result = [] for pool in pools: for i in px.pools.get(pool)["members"]: result.append( { "pool": pool, "vmid": i["vmid"], "name": i["name"], "status": i["status"], ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_list_resources(config=DEFAULT_LINUX_PATH):\n config = load_config_file(expand_config_path(config))\n px = connection_proxmox(config[\"proxmox\"])\n try:\n if config[\"pools\"]:\n l, h = list_resources(px, config[\"pools\"])\n return tabulate(l, h)\n else:\n ...
[ "0.7661837", "0.7562814", "0.73195827", "0.71622837", "0.71473014", "0.6853682", "0.6808259", "0.6700441", "0.6636041", "0.6562624", "0.6485822", "0.63801914", "0.6367146", "0.6345225", "0.6284152", "0.62583864", "0.6168935", "0.6146001", "0.6121617", "0.6098948", "0.60779124...
0.8344572
0
Switcher virtual machine to use one pci resource like GPU
def cmd_switch_vm(name, config=DEFAULT_LINUX_PATH): config = load_config_file(expand_config_path(config)) px = connection_proxmox(config["proxmox"]) resources, _ = list_resources(px, config["pools"]) name_int = -1 try: name_int = int(name) except Exception as e: print(e) it...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gpu_device_if_present():\n d = dpctl.SyclDevice(\"gpu,cpu\")\n print(\"Selected \" + (\"GPU\" if d.is_gpu else \"CPU\") + \" device\")", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", ...
[ "0.61294276", "0.60951805", "0.60499793", "0.60417134", "0.60086673", "0.59688807", "0.59526575", "0.59526575", "0.5943511", "0.5913145", "0.5844729", "0.58256614", "0.5817914", "0.5816057", "0.5782098", "0.57035667", "0.5691402", "0.5680439", "0.5678658", "0.56265086", "0.56...
0.6416047
0
return sale price if not None otherwise return prices
def get_price(self): return self.sale_price if self.sale_price else self.price
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prices(variation_object,Map):\n\n sale_price = variation_object[Map['itemtype']['fields']['sale_price']]\n price = sale_price\n\n if variation_object[Map['itemtype']['fields']['price']]:\n price = variation_object[Map['itemtype']['fields']['price']]\n\n return price,s...
[ "0.668608", "0.66682285", "0.66242206", "0.6555448", "0.6544855", "0.6537114", "0.65338093", "0.6442291", "0.6424068", "0.6401285", "0.6377285", "0.6377285", "0.6377285", "0.63466", "0.6335578", "0.63162786", "0.6285069", "0.62215525", "0.6178284", "0.6166635", "0.61564565", ...
0.72438234
0
LibThread(q) Get representation of cmus' cache On success, puts a dict representing cmus' cache in the Queue object q. This function is intended to be called as a separate thread! Thus, it does few error handling and modifies nice value.
def LibThread(q): os.nice(1) time.sleep(0) library = cmus.library() cache = cmus.Cache() liblist = {} # BUG: newly added tracks don't appear in the listing as they aren't recorded # neither in the cache nor in the library.pl # TODO: report progress values, maybe even return partial results? for t...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n ...
[ "0.56700563", "0.56635237", "0.553123", "0.5365313", "0.5352841", "0.5348453", "0.52659357", "0.52659", "0.5217064", "0.52100486", "0.5172683", "0.5155207", "0.51392865", "0.5134736", "0.51345325", "0.51182157", "0.5099683", "0.5090276", "0.505746", "0.50211537", "0.5011173",...
0.60629076
0
load_font(fontname, fontsize) > the appropriate pygame.Font() Searches for the font given by fontname and fontsize at the following
def load_font(fontname, fontsize): # system fonts if pygame.font.get_fonts().count(fontname) == 1: return pygame.font.SysFont(fontname, fontsize) # standard MS fonts if os.path.exists('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf'): return pygame.font.Font('/usr/share/fonts/truetype/msttcore...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.tru...
[ "0.803262", "0.7981457", "0.7703131", "0.75344706", "0.74571955", "0.72757286", "0.7114398", "0.701324", "0.6925238", "0.6885135", "0.68326175", "0.6789328", "0.675228", "0.66453326", "0.6518165", "0.6435163", "0.63940454", "0.63801473", "0.63370556", "0.63328856", "0.6306267...
0.8439754
0
checkpoint(name, [first]) print elapsed time since last checkpoint Prints the elapsed time since the last call to checkpoint(), but only if the global variable DEBUG is nonzero.
def checkpoint(name, first = False): global DEBUG if DEBUG: if name != 'first': print 'checkpoint %15s: %f' % ((time.time() - SCRIPT_START) if not first else name, (time.time() - checkpoint.start)) checkpoint.start = time.time()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timeCheckpoint(start_time, name):\n\n time = clock() - start_time\n print(str.capitalize(name) + ': \\t%.3f' % time)\n return clock()", "def checkpoint():", "def checkpoint(self, msg=None, start=False):\n if start:\n start_time = time.perf_counter()\n self.start_time =...
[ "0.7120378", "0.6994583", "0.6789564", "0.6231906", "0.57427275", "0.57129097", "0.5706359", "0.56981295", "0.5689351", "0.5677075", "0.5628234", "0.5567891", "0.5557613", "0.550662", "0.54966205", "0.54919827", "0.54845047", "0.54744905", "0.54601556", "0.5449334", "0.544629...
0.90047246
0
load_svg(filename, size) > pygame.Surface() Loads the SVG graphic pointed at by filename rendered at the given size into a pygame surface
def load_svg(filename, size): try: import rsvg, cairo, array, cStringIO os.stat(filename) except (ImportError, OSError): return pygame.Surface((0,0)) width, height = size csurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height) context = cairo.Context(csurface) svg = rsvg.Handle(file=fi...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def openSVG(path):\n from xml.dom import minidom\n doc = minidom.parse(open(path))\n svg = doc.getElementsByTagName(\"svg\")[0]\n sizeMatch = re.match(r\"(\\d+) (\\d+) (\\d+) (\\d+)\", svg.getAttribute(\"viewBox\"))\n w, h = int(sizeMatch.group(3)), int(sizeMatch.group(4))\n return svg, w, h", ...
[ "0.6418509", "0.618921", "0.61643314", "0.58858806", "0.5492147", "0.5443901", "0.54266787", "0.53245497", "0.5302827", "0.5243275", "0.5228394", "0.5216369", "0.520375", "0.5180269", "0.5172298", "0.5142751", "0.513323", "0.51182353", "0.5066457", "0.5053668", "0.504966", ...
0.8696741
0
Screen.load_fonts() load all required fonts Loads all fonts defined in Screen.fonts by name and size as pygame.Font object.
def load_fonts(self): for key, font in enumerate(self.fonts): self.fonts[key]['font'] = load_font(font['name'], font['size']) checkpoint('fonts')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fonts():\r\n return pygame.font.get_fonts()", "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def set_fonts(cls, fonts={}):\n for font in fonts:\n if font not in cls._fonts:\n cl...
[ "0.79402465", "0.69677985", "0.6945662", "0.69180137", "0.6783275", "0.66593045", "0.6651047", "0.65976846", "0.65976846", "0.6560783", "0.6481212", "0.6473694", "0.6451046", "0.641963", "0.6212922", "0.6205454", "0.61772597", "0.61706495", "0.6160708", "0.6146837", "0.612347...
0.82232374
0
Screen.deactivate_screensaver() deactivate a running screensaver
def deactivate_screensaver(self): # TODO: support xscreensaver and maybe others (kscreensaver?) try: try: self.session_bus = dbus.SessionBus() self.scrsvr = self.session_bus.get_object( 'org.gnome.ScreenSaver', '/org/gnome/ScreenSaver' ) self.scrsvr_cook...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activate_screensaver(self):\n # TODO: support xscreensaver and maybe others (kscreensaver?)\n try:\n try:\n self.scrsvr.UnInhibit(self.scrsvr_cookie)\n except dbus.exceptions.DBusException:\n pass\n except (NameError, AttributeError):\n pass", "def quit(self):\n pygam...
[ "0.7479362", "0.63613266", "0.63610584", "0.633496", "0.6135051", "0.5999619", "0.59912586", "0.5932093", "0.58503425", "0.5755786", "0.5723827", "0.5719525", "0.57059157", "0.5682629", "0.5667251", "0.5652389", "0.5652389", "0.5630067", "0.5630067", "0.5618864", "0.5607908",...
0.83224225
0
Screen.activate_screensaver() activate screensaver Reactivates a previously deactivated screensaver.
def activate_screensaver(self): # TODO: support xscreensaver and maybe others (kscreensaver?) try: try: self.scrsvr.UnInhibit(self.scrsvr_cookie) except dbus.exceptions.DBusException: pass except (NameError, AttributeError): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deactivate_screensaver(self):\n # TODO: support xscreensaver and maybe others (kscreensaver?)\n try:\n try:\n self.session_bus = dbus.SessionBus()\n self.scrsvr = self.session_bus.get_object(\n 'org.gnome.ScreenSaver',\n '/org/gnome/ScreenSaver'\n )\n se...
[ "0.71691304", "0.6425529", "0.57666975", "0.5484504", "0.5466138", "0.53428775", "0.52931684", "0.5235076", "0.51869184", "0.5186303", "0.5072131", "0.50614804", "0.5037811", "0.5016691", "0.5016691", "0.5003856", "0.49859083", "0.49517408", "0.4933769", "0.48208684", "0.4800...
0.82880116
0
compute weights for a nearestneighbor sampling between low and high resolution grids llat,llon specify the lat and longitude grids on the low resolution grid hlat,hlon specify the lat and longitude grids on the high resolution grid returns a lowres grid with values of n high res points per grid cell
def gen_weights(hlat,hlon,llat,llon,mask=None,verbose=False): if len(llat.shape)==1: llon,llat=np.meshgrid(llon,llat) if len(hlat.shape)==1: hlon,hlat=np.meshgrid(hlon,hlat) output=np.zeros(llon.shape) if mask==None: mask=np.ones(hlat.shape,dtype=bool) search=2 if v...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lat_weights_regular_grid(lat): \n dlat = np.abs(np.diff(lat))\n np.testing.assert_almost_equal(dlat, dlat[0])\n w = np.abs(np.sin(np.radians(lat + dlat[0] / 2.)) - np.sin(np.radians(lat - dlat[0] / 2.)))\n\n if np.abs(lat[0]) > 89.9999: \n w[0] = np.abs(1. - np.sin(np.radians(np.pi / 2 - d...
[ "0.6848878", "0.6793101", "0.62360746", "0.62186825", "0.6205327", "0.60586023", "0.60279727", "0.58803076", "0.57930404", "0.57810956", "0.573738", "0.5684344", "0.5633059", "0.56270546", "0.5623209", "0.5623209", "0.56100583", "0.55861974", "0.55694896", "0.55694896", "0.55...
0.7154857
0
Compare WRF, SNODAS, and Lidar data on the lidar domain
def main(): snowdensity=0.35 #from May 1 2010 SNOTEL (2011,2013 were similar, 2014 was 0.4), at the saddle in May 1 2010 it was 0.4 snodasyears=[2010,2004,2005] wdata=[wrf.load("wrf/SWE_daily.nc",extractday=212+5+int(np.round(365.25*year))) for year in [3,4]] wdata.extend([wrf.load("wrf/SWE_daily.nc",ex...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs\"\n \"/lake_analysis_one_21_Jun_2021\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"...
[ "0.5914553", "0.58950686", "0.58319104", "0.5644557", "0.56231236", "0.56231236", "0.55532014", "0.55138475", "0.55106986", "0.551042", "0.54737157", "0.5461819", "0.54479355", "0.53951114", "0.5363747", "0.5324874", "0.53132623", "0.5280498", "0.5247656", "0.52454746", "0.52...
0.5927307
0
Store main graph and all topic graphs into collection.
def store(self, graphs, start_date, end_date): documents = [{'topic_id': key, 'graph': graph, 'start_date': start_date, 'end_date': end_date} for key, graph in graphs.items()] self.collection.insert_many(documents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def populate_graph(self):", "def _store(self, cuds_object):\n assert cuds_object.session == self\n self._registry.put(cuds_object)\n for t in cuds_object._graph:\n ...
[ "0.6284722", "0.60069364", "0.59175754", "0.574157", "0.5713566", "0.56346995", "0.56080204", "0.55680335", "0.556138", "0.550367", "0.543818", "0.5379034", "0.5379034", "0.53369987", "0.5320596", "0.5315405", "0.5294312", "0.5285918", "0.52664405", "0.52534753", "0.52492744"...
0.63743895
0
Check if the local toon is allowed to enter.
def allowedToEnter(self): if base.cr.isPaid(): return True place = base.cr.playGame.getPlace() myHoodId = ZoneUtil.getCanonicalHoodId(place.zoneId) if myHoodId in \ (ToontownGlobals.ToontownCentral, ToontownGlobals.MyEstate, ToontownGlobals...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_telescope_on_allowed(self):\n handler = self.get_command_object(\"TelescopeOn\")\n return handler.check_allowed()", "def __TipIsForbiddenToEnter(self, vial, tipId = None):\n if tipId == None:\n tipId = self.__m_Platform.CurrentTipID()[1]\n if vial.getLabel() in self....
[ "0.6684113", "0.62243545", "0.61877936", "0.61838484", "0.61765593", "0.611133", "0.60216933", "0.60216933", "0.59972715", "0.59768903", "0.58596575", "0.58082616", "0.5807725", "0.58012944", "0.5789078", "0.5779336", "0.57534045", "0.57501376", "0.5747623", "0.5714134", "0.5...
0.7630505
0
Check if the library supports the language.
def language_supported(self, iso_lang="ca-ES"): # -> bool test_lang = "" if len(iso_lang) == 0: return False try: for sep in ["-", "_"]: if sep in iso_lang: test_lang = iso_lang.split(sep)[0] break except (A...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list", "def compare_language(language):\n if l...
[ "0.73111284", "0.72048944", "0.714278", "0.70415515", "0.6972099", "0.690896", "0.6857913", "0.67120904", "0.66275656", "0.66275656", "0.6623935", "0.6518069", "0.64795995", "0.64205503", "0.640512", "0.63772917", "0.63161093", "0.6273078", "0.6245229", "0.61978394", "0.61965...
0.75273556
0
This method will sweep through the range of standard ids given from low to high. This will actively filter for 6 ids at a time and sniff for the given amount of time in seconds. If at least one message is read in then it will go individually through the 6 ids and sniff only for that id for the given amount of time. Thi...
def filterStdSweep(self, freq, low, high, time = 5): msgIDs = [] self.client.serInit() self.client.MCPsetup() for i in range(low, high+1, 6): print "sniffing id: %d, %d, %d, %d, %d, %d" % (i,i+1,i+2,i+3,i+4,i+5) comment= "sweepFilter: " #comment = "swe...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sweepRandom(self, freq, number = 5, time = 5):\n msgIDs = [] #standard IDs that we have observed during run\n ids = [] #standard IDs that have been tried\n self.client.serInit()\n self.client.MCPsetup()\n for i in range(0,number+1,6):\n idsTemp = []\n co...
[ "0.6650663", "0.60136783", "0.5309716", "0.5241036", "0.5212497", "0.5179154", "0.5103345", "0.5085045", "0.50332683", "0.49909282", "0.49524868", "0.49447545", "0.49435183", "0.4942979", "0.49187246", "0.49124974", "0.48578656", "0.48059002", "0.4800731", "0.47861597", "0.47...
0.6956039
0
This method will choose random values to listen out of all the possible standard ids up to the given number. It will sniff for the given amount of time on each set of ids on the given frequency. Sniffs in groups of 6 but when at least one message is read in it will go through all six individually before continuing. Thi...
def sweepRandom(self, freq, number = 5, time = 5): msgIDs = [] #standard IDs that we have observed during run ids = [] #standard IDs that have been tried self.client.serInit() self.client.MCPsetup() for i in range(0,number+1,6): idsTemp = [] comment = "swe...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder i...
[ "0.600158", "0.5833272", "0.5673142", "0.56084114", "0.5455134", "0.5315466", "0.52986", "0.52894807", "0.5211797", "0.51809776", "0.5144543", "0.5077216", "0.5045132", "0.5042023", "0.5041329", "0.5027075", "0.50139046", "0.5008723", "0.5007171", "0.50008476", "0.49974468", ...
0.73567533
0
This method will sweep through the range of ids given by lowID to highID and send a remote transmissions request (RTR) to each id and then listen for a response. The RTR will be repeated in the given number of attempts and will sniff for the given duration continuing to the next id. Any messages that are sniffed will b...
def rtrSweep(self,freq,lowID,highID, attempts = 1,duration = 1, verbose = True): #set up file for writing now = datetime.datetime.now() datestr = now.strftime("%Y%m%d") path = self.DATA_LOCATION+datestr+"_rtr.csv" filename = path outfile = open(filename,'a'); data...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder i...
[ "0.5480776", "0.54575336", "0.53615844", "0.53565055", "0.51353186", "0.5096511", "0.5049164", "0.4956873", "0.49354088", "0.48810115", "0.4833697", "0.48231986", "0.48105225", "0.4800622", "0.47857952", "0.47641757", "0.47444397", "0.47235665", "0.47144476", "0.47072086", "0...
0.77570313
0
This method will perform generation based fuzzing on the bus. The method will inject properly formatted, randomly generated messages at a given period for a I{writesPerFuzz} number of times. The packets that are injected into the bus will all be saved in the following path DATALOCATION/InjectedData/(today's date (YYYYM...
def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes): #print "Fuzzing on standard ID: %d" %standardId self.client.serInit() self.spitSetup(freq) packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template #get folder information (ba...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generalFuzz(self,freq, Fuzzes, period, writesPerFuzz):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty template\n \n #get folder information (based on today's date...
[ "0.8051458", "0.54595256", "0.53430116", "0.5291932", "0.52867705", "0.52761495", "0.52647626", "0.5254595", "0.52455837", "0.5188657", "0.5176086", "0.5169036", "0.51669353", "0.5132858", "0.5126628", "0.5115051", "0.5114212", "0.5111231", "0.510516", "0.50725013", "0.507243...
0.8079699
0
The method will inject properly formatted, randomly generated messages at a given period for a I{writesPerFuzz} number of times. A new random standard id will be chosen with each newly generated packet. IDs will be chosen from the full range of potential ids ranging from 0 to 4095. The packets that are injected into th...
def generalFuzz(self,freq, Fuzzes, period, writesPerFuzz): #print "Fuzzing on standard ID: %d" %standardId self.client.serInit() self.spitSetup(freq) packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty template #get folder information (based on today's date) now...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generationFuzzer(self,freq, standardIDs, dbLimits, period, writesPerFuzz, Fuzzes):\n #print \"Fuzzing on standard ID: %d\" %standardId\n self.client.serInit()\n self.spitSetup(freq)\n packet = [0,0,0x00,0x00,0x08,0,0,0,0,0,0,0,0] #empty packet template\n \n\n #get folder i...
[ "0.80933756", "0.56880844", "0.54426545", "0.54172146", "0.53790885", "0.5184654", "0.5158563", "0.51489717", "0.51385695", "0.5079167", "0.5074281", "0.5022694", "0.5019261", "0.5008377", "0.5006981", "0.498719", "0.49849373", "0.49766207", "0.49589202", "0.49365693", "0.492...
0.79087734
1
This method will allow the user to listen for a specific packet and then respond with a given message. If no listening packet is included then the method will only listen for the id and respond with the specified packet when it receives a message from that id. This process will continue for the given amount of time (in...
def packetRespond(self,freq, time, repeats, period, responseID, respondPacket,listenID, listenPacket = None): self.client.serInit() self.spitSetup(freq) #formulate response packet SIDhigh = (responseID >> 3) & 0xFF; # get SID bits 10:3, rotate them to bits 7:0...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listen_for(self, packet_id, timeout, payload_pattern=None):\n\n time_left = timeout\n response = ICMPPacket()\n while time_left > 0:\n raw_received, address, time_left = self.socket.receive_packet(time_left)\n\n if raw_received != b'':\n response.unpack...
[ "0.60647124", "0.55641264", "0.5346171", "0.52303034", "0.5217701", "0.51774013", "0.51698893", "0.5100064", "0.5037555", "0.4994777", "0.49564165", "0.49480218", "0.49438927", "0.49380833", "0.49273694", "0.49032032", "0.4898641", "0.48416683", "0.48128486", "0.47973782", "0...
0.7383639
0
Derive the file name of a modelspecific config file
def modelconfigfile(modelfile): return os.path.splitext(modelfile)[0] + '.vars'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_filename(config):\n base = os.path.splitext(config['corpus'])[0]\n return '%s--%dT.model' % (base, config['T'])", "def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.n...
[ "0.7968517", "0.75344723", "0.73829776", "0.73329455", "0.7190793", "0.709332", "0.70396525", "0.7021086", "0.7006523", "0.69161147", "0.68957627", "0.6892056", "0.6885437", "0.6775761", "0.6737451", "0.67183787", "0.6651527", "0.65063626", "0.6502298", "0.648453", "0.6474647...
0.7901898
1
Apply postprocessing `methods` to `preds` of shape (items, classes, time).
def postprocess(preds, methods): for method in methods: if method == 'sigmoid': preds = torch.as_tensor(preds).sigmoid().numpy() else: raise ValueError("Unknown postprocessing method %s" % method) return preds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, method):\n process_dicts = []\n for d in self.data_dicts:\n dd = copy.deepcopy(d)\n for ap in self.aps:\n dd[ap] = method(d[ap])\n process_dicts.append(dict2str(dd))\n\n # print(process_dicts)\n # print(type(process_dicts...
[ "0.56834024", "0.565969", "0.53935933", "0.53585386", "0.53097564", "0.5251589", "0.5239719", "0.5229128", "0.5157222", "0.5151197", "0.5110115", "0.5072888", "0.5009041", "0.4979346", "0.49096116", "0.48874912", "0.4867647", "0.48647955", "0.48600864", "0.48440596", "0.48146...
0.6608731
0
Returns whether the object was closed. This includes both thrown exceptions, and clean exits.
def closed(self): return self.__closeEvent.is_set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closed(self):\n return self._close_state.is_set()", "def closed(self) -> bool:\n return self._closed", "def closed(self) -> bool:\n return self._closed", "def is_closed(self) -> bool:\n return self._closed", "def is_closed(self) -> bool:\n return self._closed", "def...
[ "0.80961", "0.78408647", "0.78408647", "0.7829112", "0.7829112", "0.77884793", "0.77741545", "0.77116525", "0.7690002", "0.7661896", "0.76284987", "0.7603319", "0.754891", "0.75396085", "0.7534947", "0.75115013", "0.7438865", "0.74171317", "0.7379942", "0.73350513", "0.732012...
0.8093589
1
Imports the symbol defined by 'symbol_path'. 'symbol_path' is a string in the form 'foo.bar.baz' which is turned into an import statement 'from foo.bar import baz' (ie. the last component of the name is the symbol name, the rest is the package/ module path to load it from).
def _import_symbol(symbol_path): components = symbol_path.split(".") module_name = ".".join(components[:-1]) symbol_name = components[-1] module = __import__(module_name, globals(), locals(), [symbol_name]) symbol = getattr(module, symbol_name) return symbol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_from_path(module: str, path: str, name: str):\n\n spec = importlib.util.spec_from_file_location(module, path)\n foo = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(foo)\n return getattr(foo, name)", "def import_from_string(import_path: str) -> Any:\n\n import_classname...
[ "0.60043275", "0.57454276", "0.5705168", "0.5638744", "0.5635315", "0.55979246", "0.5471517", "0.54315394", "0.54311717", "0.5304224", "0.530391", "0.52805257", "0.5262261", "0.5223097", "0.5155832", "0.5155832", "0.51526827", "0.5150379", "0.50937927", "0.5057649", "0.500408...
0.88491184
0
Figuring out if a type is a named tuple is not as trivial as one may expect
def type_is_namedtuple(t) -> bool: try: return issubclass(t, tuple) and hasattr(t, "_fields") except TypeError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_namedtuple(v) -> bool:\n try:\n return isinstance(v, tuple) and hasattr(v, \"_fields\")\n except TypeError:\n return False", "def is_namedtuple(obj):\n return isinstance(obj, tuple) and hasattr(obj, '_asdict')", "def isnamedtuple(obj):\n return isinstance(obj, tuple) \\\n ...
[ "0.7847265", "0.77559423", "0.76825786", "0.76751727", "0.7643817", "0.6838801", "0.6626756", "0.6541308", "0.63720953", "0.6256021", "0.61520916", "0.61458665", "0.6096234", "0.60237575", "0.58922905", "0.58663714", "0.5831151", "0.5825571", "0.57641757", "0.5760455", "0.575...
0.82478565
0
Determine if value is a subclass of type_ Will work even value is not a class
def issubclass_safe(value, type_): try: return issubclass(value, type_) except (TypeError, AttributeError): # Cannot perform issubclass on some types return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_type(value):\n if isinstance(value, type):\n return issubclass(value, Type)\n return isinstance(value, Type)", "def isinstance_safe(value, type_):\n try:\n return isinstance(value, type_)\n except TypeError:\n # Cannot perform isinstance on some types\n ...
[ "0.7880763", "0.7379336", "0.7272714", "0.72481436", "0.72121257", "0.7209383", "0.6984322", "0.68737435", "0.68694156", "0.6789055", "0.67275107", "0.6715976", "0.670521", "0.66198355", "0.6616635", "0.6616613", "0.6574762", "0.6546906", "0.64954215", "0.6469347", "0.6450119...
0.7830925
1
Parse a typing hint into its type and and arguments.
def parse_hint(hint: Type) -> Tuple[Type, Optional[List]]: if hasattr(hint, "__origin__"): # This is a type hint (eg typing.Union) # Filter out TypeVars such as KT & VT_co (they generally # indicate that no explicit hint was given) hint_args = [a for a in getattr(hint, "__args__", []...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_type_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"Only keyword options of the form\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\\ntype example (42)\")", "def parse(cls, buf: memoryview, params: Params) \\\n -> tuple[AnyParseable,...
[ "0.6011422", "0.5526414", "0.54611135", "0.53997856", "0.5341764", "0.53028625", "0.5287242", "0.52829146", "0.528289", "0.5266029", "0.5262158", "0.52545506", "0.52489644", "0.5247131", "0.52150744", "0.52064526", "0.5188259", "0.51840985", "0.5177085", "0.5135521", "0.51304...
0.7484519
0
Get the default value for a property with name `property_name` on class `type_`
def get_property_default(type_: Type, property_name: str) -> ...: if issubclass_safe(type_, tuple): # namedtuple if hasattr(type_, "_field_defaults"): default = type_._field_defaults.get(property_name, inspect.Parameter.empty) else: default = inspect.Parameter.empty ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_property_default_value(property):\n return _get_default_value(get_type_name(property.type),\n property.type.is_simple,\n property.is_iterative,\n property.is_required)", "def get_property_default(self, name, default...
[ "0.8010006", "0.7963241", "0.71551555", "0.6325218", "0.63015527", "0.6284296", "0.6281772", "0.6276523", "0.62745297", "0.6232446", "0.6232106", "0.62256825", "0.6220216", "0.6199206", "0.6176899", "0.6167932", "0.6161065", "0.61468476", "0.6142605", "0.6139256", "0.6119665"...
0.85256344
0
Parse website URL from proxy page
def get_website_url(self, proxy): body = BeautifulSoup( proxy_get_url(proxy) ) refresh_meta = body.find('meta', attrs={'http-equiv': 'refresh'})['content'] return refresh_meta.split('=')[-1].strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_source(self, response):\n return response.url", "def _parse_source(self, response) -> str:\n return response.url", "def _parseurl(url):\n tracker1=url\n port=int(re.findall(\"[0-9]+\",tracker1)[0])\n host=re.findall(\"[^0-9]+\",tracker1)[0]\n host=host[:-1]\n host=host[6...
[ "0.64632994", "0.6277717", "0.6234803", "0.6130124", "0.6128327", "0.6115169", "0.60802186", "0.6072408", "0.59983116", "0.59591407", "0.59323686", "0.5928596", "0.590725", "0.5872435", "0.5863363", "0.58452016", "0.5842432", "0.58215463", "0.5814262", "0.57957", "0.577748", ...
0.65610504
0
Obtiene una lista con todos las categorias existentes en la base de datos
def get_all() -> list: categorias = [] conn = GenericDao.connect() cursor = conn.execute("SELECT * FROM categorias") for row in cursor: categoria = Categoria(row[1], row[0]) categorias.append(categoria) if debug: print(str(categoria)) conn.close() return cate...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def categories(self):\n pass", "def categories(self):\n game_categories = self.game_categories.all()\n return [ gc.category for gc in game_categories ]", "def categories(self):\n cur = self.con.execute('select category from cc');\n return [d[0] for d in cur]", "def get_used...
[ "0.7249249", "0.7223673", "0.7196713", "0.7126999", "0.7086236", "0.69820863", "0.6896232", "0.6895573", "0.6892413", "0.6884656", "0.68071264", "0.6762062", "0.6733714", "0.6714556", "0.6665258", "0.6662005", "0.6654026", "0.6646227", "0.66089505", "0.6584195", "0.6580966", ...
0.77978975
0
Busca 1 categoria en la base de datos proporcionando el id
def get_id(idd: int) -> Categoria: conn = GenericDao.connect() cursor = conn.execute('SELECT * FROM categorias where categoria_id = ?', (str(idd),)) row = cursor.fetchone() categoria = Categoria(row[1], row[0]) if debug: print(str(categoria)) conn.close() return categoria
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_id_categorie_in_database(self, db):\n\n try:\n select_query = \"SELECT id_categorie FROM categorie WHERE categorie_name='\"+self.categorie_name+\"';\"\n result = db.query(select_query)\n self.id_categorie = result[0][\"id_categorie\"]\n\n\n except IntegrityEr...
[ "0.7121691", "0.6554141", "0.6497267", "0.6486848", "0.604737", "0.599137", "0.5966648", "0.59002024", "0.58759624", "0.58522", "0.5840264", "0.58314836", "0.58063036", "0.5780173", "0.5775938", "0.56541747", "0.56518245", "0.56508154", "0.5645675", "0.5632842", "0.56258434",...
0.7208243
0
Busca el id de 1 categoria en la base de datos proporcionando el nombre
def get_id_nombre(categoria_nombre: str) -> str: conn = GenericDao.connect() cursor = conn.execute('SELECT categoria_id FROM categorias where categoria_nombre=?', (categoria_nombre,)) row = cursor.fetchone() id_categoria = row[0] if debug: print(str(id_categoria)) conn.close() retur...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id(idd: int) -> Categoria:\n conn = GenericDao.connect()\n cursor = conn.execute('SELECT * FROM categorias where categoria_id = ?', (str(idd),))\n row = cursor.fetchone()\n categoria = Categoria(row[1], row[0])\n if debug:\n print(str(categoria))\n\n conn.close()\n return catego...
[ "0.68540025", "0.6634235", "0.63437605", "0.6210306", "0.6125233", "0.6113372", "0.61019367", "0.6087021", "0.598077", "0.59659487", "0.5924591", "0.5912172", "0.5867398", "0.5840604", "0.57927936", "0.57337314", "0.5698949", "0.5683739", "0.5681899", "0.5614703", "0.56006783...
0.7045538
0
Get the network object from a given address and netmask
def get_network(address: str, netmask: str) -> IPv4Network: net = IPv4Network(f"{address}/{netmask}", strict=False) return net
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dotted_netmask(mask):\n mask = int(mask)\n bits = 0xffffffff ^ (1 << 32 - mask) - 1\n return socket.inet_ntoa(struct.pack('>I', bits))", "def get_net_obj(host, object_type, name, refresh=False):\n objs = get_net_objs(host=host, object_type=object_type, refresh=refresh)\n obj_name = name.lower(...
[ "0.63848794", "0.6259637", "0.61794454", "0.6175556", "0.6134683", "0.6117697", "0.59853756", "0.59502894", "0.59267926", "0.5924149", "0.59040844", "0.58833677", "0.5831049", "0.5792555", "0.5789383", "0.5780396", "0.5660327", "0.5654742", "0.56288487", "0.56134087", "0.5599...
0.7700801
0
Check if a given IP address is within a given network
def check_network_contains_ip(network: IPv4Network, address: str) -> bool: ip = IPv4Address(address) if ip in network: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def address_exists_in_network(ip_address, net_n_bits):\n ip_address = struct.unpack('<L', socket.inet_aton(ip_address))[0]\n net, bits = net_n_bits.split('/')\n net_address = struct.unpack('<L', socket.inet_aton(net))[0]\n net_mask = ((1L << int(bits)) - 1)\n return ip_address & net_mask == net_addr...
[ "0.75938654", "0.7511004", "0.7479366", "0.7437868", "0.73920935", "0.73480964", "0.7324643", "0.7290853", "0.7193748", "0.7108727", "0.7097012", "0.70429254", "0.6999613", "0.694608", "0.6937112", "0.69068915", "0.6902325", "0.6866631", "0.683682", "0.6792288", "0.6748791", ...
0.81156
0