query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the plate scale as an `~astropy.units.Quantity`.
def plate_scale(self): return 206265 * uu.arcsec / (self.diameter.to('mm') * self.f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def getScale(self):\n return _libsbml.Unit_getScale(self)", "def scale(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"scale\")", "def scale(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"scale\")", ...
[ "0.7407171", "0.73354304", "0.7272574", "0.7260109", "0.72580636", "0.71984255", "0.7091628", "0.7008195", "0.69788766", "0.6805889", "0.6759866", "0.67273027", "0.67187494", "0.6713132", "0.6710965", "0.6695409", "0.6635227", "0.6599181", "0.65680814", "0.6560833", "0.656083...
0.7882506
0
Identifies genes that are significantly enriched for insertions (CTGs). This function takes a DataFrame of insertions, coming from multiple samples, and identifies if any genes are more frequently affected by an insertion than would be expected by chance. These genes are called Commonly Targeted Genes (CTGs). CTGs are selected by comparing the number of insertions within the gene to the number of insertions that would be expected from the background insertion rate, which is modeled using a Poisson distribution.
def test_ctgs( insertions, # type: List[Insertion] reference, # type: Reference gene_ids=None, # type: Set[str] chromosomes=None, # type: Set[str] pattern=None, # type: str per_sample=True, # type: bool window=None #type: Tuple[int, int] ): # Default to shared chromosome sequences (typically drops some # of the more esoteric extra scaffold/patch sequences). if chromosomes is None: reference_seq = pyfaidx.Fasta(str(reference.fasta_path)) reference_gtf = GtfIterator(reference.indexed_gtf_path) chromosomes = list( set(reference_seq.keys()) & set(reference_gtf.contigs)) if len(chromosomes) == 0: ValueError('No chromosomes are shared between the reference ' 'sequence and reference gtf files') if len(chromosomes) == 0: raise ValueError('At least one chromosome must be given') # Determine gene windows using GTF. logging.info('Generating gene windows') gene_windows = _build_gene_windows( reference.indexed_gtf_path, window=window, chromosomes=chromosomes) # Subset insertions to gene intervals. insertions = _subset_to_windows(insertions, gene_windows) if gene_ids is None: gene_ids = set(ins.metadata['gene_id'] for ins in insertions) # Collapse insertions per gene/sample (recommended). # Corrects for hopping/multiple detection issues. if per_sample: logging.info('Collapsing insertions') insertions = list(_collapse_per_sample(insertions)) # Calculate total number of pattern occurrences within intervals. logging.info('Counting pattern occurrences') reference_seq = pyfaidx.Fasta(str(reference.fasta_path)) total = count_total( reference_seq, pattern=pattern, intervals=gene_windows.values()) # Calculate p-values for each gene. logging.info('Calculating significance for genes') insertion_trees = GenomicIntervalTree.from_objects_position( insertions, chrom_attr='seqname') p_values = { gene_id: test_region( insertions=insertions, reference_seq=reference_seq, region=gene_windows[gene_id], total=total, pattern=pattern, filters=[lambda ins, gid=gene_id: ins.metadata['gene_id'] == gid], insertion_trees=insertion_trees) for gene_id in gene_ids } # Build result frame. result = pd.DataFrame.from_records( iter(p_values.items()), columns=['gene_id', 'p_value']) # Calculate corrected p-value using bonferroni correction. result['q_value'] = (result['p_value'] * len(result)).clip_upper(1.0) # Sort by q-value and p-value. result.sort_values(by=['q_value', 'p_value'], inplace=True) if len(insertions) > 0: # Annotate with gene_name if possible. if 'gene_name' in insertions[0].metadata: name_map = { ins.metadata['gene_id']: ins.metadata['gene_name'] for ins in insertions } result.insert(1, 'gene_name', result['gene_id'].map(name_map)) else: result['gene_name'] = np.nan # Annotate with frequency. frequency = (Insertion.to_frame(insertions) .groupby('gene_id')['sample'].nunique() .reset_index(name='n_samples')) result = pd.merge(result, frequency, on='gene_id', how='left') else: result['gene_name'] = np.nan result['n_samples'] = np.nan return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genes_GT():\n df1=pd.read_csv(config['geneInfo'], sep=\" \")\n df1=df1[df1.chr == '22']\n df2=pd.read_csv(config['counts'], sep=\" \")\n genes=df1.merge(df2.gene_id, on=\"gene_id\")\n return list(set(genes['gene_id']))", "def process_cgc(path, return_dataframe=False, fusions=False):\n # rea...
[ "0.62168896", "0.5950708", "0.5604085", "0.5591386", "0.5474958", "0.54715043", "0.5465627", "0.5456833", "0.535208", "0.5323605", "0.5284358", "0.52755475", "0.5262531", "0.52539337", "0.52331626", "0.520046", "0.5148381", "0.507968", "0.5076866", "0.5065854", "0.5053424", ...
0.6928965
0
Subsets insertions for given gene windows.
def _subset_to_windows( insertions, # type: List[Insertion] gene_windows # type: Dict[str, Tuple[str, int, int]] ): # type: (...) -> List[Insertion] # Create lookup trees. trees = { chrom: IntervalTree.from_tuples((i[1:]) for i in chrom_int) for chrom, chrom_int in itertools.groupby( sorted(gene_windows.values()), operator.itemgetter(0)) } # Determine which insertions overlap tree intervals and # correspond to genes with known gene window. def _in_windows(ins, trees): try: return trees[ins.seqname].overlaps(ins.position) except KeyError: return False return [ ins for ins in insertions if ins.metadata['gene_id'] in gene_windows and _in_windows(ins, trees) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_windows(sliding_windows_file, genes_file, output_file):\n\n\t# Read sliding windows file and create a list in the form\n\t# genes = [('gene1', 1000, 2000), ('gene2', 4000, 45000)]\n\tgenes = []\t\t# this could be a dictionary but I prefer not\n\tfor line in genes_file:\n\t\tline = line.strip()\n\n\t\ti...
[ "0.5770418", "0.53267133", "0.52984023", "0.51812553", "0.51691467", "0.51118696", "0.5079057", "0.50481063", "0.50467324", "0.5036095", "0.50196743", "0.50149506", "0.49723238", "0.49606135", "0.49236315", "0.49137327", "0.48717156", "0.48612767", "0.48102915", "0.48091227", ...
0.7655855
0
Tests a given genomic region for enrichment in insertions.
def test_region( insertions, # type: List[Insertion] reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None, # type: Optional[str] intervals=None, # type: Optional[Iterable[Tuple[str, int, int]]] total=None, # type: Optional[int] filters=None, # type: Optional[List[Callable]] insertion_trees=None # type: GenomicIntervalTree ): # type: (...) -> float if total is None: total = count_total( reference_seq, pattern=pattern, intervals=intervals) # Count pattern in region. region_count = count_region(reference_seq, region=region, pattern=pattern) # Sub-select insertions for region. if insertion_trees is None: insertion_trees = GenomicIntervalTree.from_objects_position( insertions, chrom_attr='seqname') region_ins = set(interval[2] for interval in insertion_trees.search(*region)) # Apply additional filter functions to insertions if given # (such as filtering on gene name/id for example). if filters is not None: for filter_func in filters: region_ins = set(ins for ins in region_ins if filter_func(ins)) # Calculate p-value. x = len(list(region_ins)) mu = len(insertions) * (region_count / total) # Note here we use loc=1, because we are interested in # calculating P(X >= x), not P(X > x) (the default # surivival function). p_val = poisson.sf(x, mu=mu, loc=1) # type: float return p_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_build_genomic_regions(self):\n\n CDS = pybedtools.BedTool(\"\"\"chr1\\t7700\\t7900\\tfoo\\t0\\t+\\n\n chr1\\t7999\\t8500\\tfoo\\t0\\t+\\n\"\"\", from_string = True)\n UTR5 = pybedtools.BedTool(\"\"\"chr1\\t7499\\t7700\\tfoo\\t0\\t+\\n\"\"\", from_string = Tr...
[ "0.6306166", "0.5873809", "0.5715272", "0.5644633", "0.5633991", "0.5508607", "0.55017656", "0.5482008", "0.53994864", "0.53851366", "0.53696203", "0.53695005", "0.53581506", "0.5352486", "0.5298148", "0.5286712", "0.52787757", "0.5276704", "0.52690274", "0.5268633", "0.52497...
0.6274175
1
Counts occurrences of pattern within given genomic region.
def count_region( reference_seq, # type: pyfaidx.Fasta region, # type: Tuple[str, int, int] pattern=None # type: Optional[str] ): # type: (...) -> int chrom, start, end = region seq = reference_seq[chrom][int(start):int(end)] return _count_sequence(seq, regex=_build_regex(pattern))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def pattern_count(DNA, pattern, start=0, end=0, mutation_thresh=0):\n if start < 0 or start >= len(DNA):\n raise ValueError(\"The starting posi...
[ "0.690619", "0.6891801", "0.6734169", "0.661228", "0.64735407", "0.64611524", "0.645101", "0.6441295", "0.643269", "0.63974696", "0.6269934", "0.6190485", "0.61015546", "0.5953266", "0.5953266", "0.58486587", "0.57067573", "0.56916803", "0.56484526", "0.56377214", "0.56303567...
0.7694619
0
Counts occurrences of pattern in sequence.
def _count_sequence(sequence, regex=None): # type: (pyfaidx.Sequence, Pattern[str]) -> int if regex is None: count = len(sequence) else: count = sum((1 for _ in regex.finditer(str(sequence)))) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pattern_count(sequence, pattern):\n return len(re.findall(r'(?=' + pattern + ')', sequence))", "def count_pattern(sentence, pattern):\n n = len(pattern)\n counter = 0\n for i in range(len(sentence) - n + 1):\n if sentence[i:i+n] == pattern:\n counter += 1\n\n return count...
[ "0.8225791", "0.757032", "0.74906814", "0.7362785", "0.7350869", "0.73212504", "0.7278474", "0.7140979", "0.707895", "0.6971106", "0.6928305", "0.6643842", "0.65604806", "0.64908123", "0.6395848", "0.6317644", "0.6313916", "0.6293871", "0.62750435", "0.62558323", "0.6236206",...
0.76051104
1
Counts total occurrences of pattern in reference.
def count_total( reference_seq, # type: pyfaidx.Sequence pattern=None, # type: str intervals=None # type: Iterable[Tuple[str, int, int]] ): # type: (...) -> int regex = _build_regex(pattern) if intervals is None: # Simply count for the entire sequence. count = sum(_count_sequence(reference_seq[seq], regex=regex) for seq in reference_seq.keys()) # yapf: disable else: # Flatten intervals, and then only count for sequences # within the flattened intervals. merged_intervals = list(merge_genomic_intervals(intervals)) seqs = [ reference_seq[chrom][start:end] for chrom, start, end in merged_intervals ] count = sum(_count_sequence(seq, regex=regex) for seq in seqs) return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CountOccurrences(pattern, bwt, starts, occ_counts_before):\n # Implement this function yourself\n return 0", "def count_occurrences(text, pattern, d=0):\n return len(find_occurrences(text, pattern, d))", "def calculate_reference(gram_list, references):\n gram_sub_str = ' '.join(gram_list)\n gram...
[ "0.71985865", "0.7126463", "0.693929", "0.6936579", "0.6664656", "0.6643048", "0.6616412", "0.65986395", "0.65699697", "0.6548704", "0.644031", "0.6386957", "0.63118064", "0.6282825", "0.6272797", "0.6222616", "0.6174506", "0.61413646", "0.60996157", "0.6062659", "0.606119", ...
0.68921715
4
Merges overlapping genomic intervals.
def merge_genomic_intervals(intervals): # type: (Iterable[Tuple[str, int, int]]) -> Iterable[Tuple[str, int, int]] # Group intervals by chromosome. grouped_intervals = itertools.groupby( sorted(intervals), operator.itemgetter(0)) # Now yield merged intervals per chromosome. for chrom, grp in grouped_intervals: chrom_intervals = [interval[1:] for interval in grp] for low, high in merge_intervals(chrom_intervals, is_sorted=True): yield chrom, low, high
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_ranges():", "def test_merge_intervals():\n\n a = pybedtools.example_bedtool(\"a.bed\") # path to test file a\n # This file looks like this:\n # chr1\t1\t100\tfeature1\t0\t+\n # chr1\t100\t200\tfeature2\t0\t+\n # chr1\t150\t500\tfeature3\t0\t-\n # chr1 900\t950\tfeature4\t0\t+\n\n ...
[ "0.75773865", "0.6600593", "0.6574509", "0.6569135", "0.63968265", "0.63313943", "0.6322615", "0.624788", "0.6216611", "0.6146239", "0.6133331", "0.60140103", "0.60021335", "0.59763306", "0.5970033", "0.58648163", "0.58545077", "0.57922995", "0.57907534", "0.57814354", "0.572...
0.6790975
1
Read file into string.
def read_file(self, file: Path) -> str: with open(file) as f: return f.read()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_file(file):\n with open(file, 'r') as f:\n file_string = f.read()\n return file_string", "def read_file(path): #TODO implementme, handling paths more intelligently\n f = open(path, \"r\")\n string = f.read()\n f.close()\n return string", "def file2str(file):\n with open(file, \"r\"...
[ "0.8227773", "0.81447333", "0.8022711", "0.79404026", "0.7933046", "0.78936315", "0.78294694", "0.7784561", "0.7775774", "0.7751609", "0.77187586", "0.7716427", "0.77023", "0.76969075", "0.76942647", "0.7690147", "0.7679615", "0.7668749", "0.76600176", "0.7641418", "0.7638525...
0.7827757
7
Create a new websocket and connect its input and output to the subprocess with the specified PID.
async def websocket_handler(self, request, ws): if self.repl_mgr is None: return sanic.response.HTTPResponse(status=404) log.info('initiating websocket') await self.repl_mgr.process_websocket(ws) log.info('terminating websocket')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_web_socket(vnc_port, web_socket_port, server):\n\n path = os.path.abspath(os.path.dirname(__file__))\n ws = os.path.join(path, \"../../webConsole/bin/websockify.py\")\n\n web_socket_path = os.path.abspath(ws)\n\n cmd = \"%s %s:%s %s:%s --idle-timeout=120 &\" % (web_socket_path, server, vnc_p...
[ "0.61489266", "0.6004585", "0.58952093", "0.5711386", "0.56981546", "0.5681631", "0.5521888", "0.55072397", "0.5495588", "0.54655325", "0.5456767", "0.5442702", "0.539571", "0.53936225", "0.5373037", "0.5360796", "0.53407484", "0.53166866", "0.52801496", "0.5262081", "0.52579...
0.0
-1
opener for opening sheets for client stock company name (e.g AAPL for apple inc.) name name of the sheet (e.g 'income' / 'balace'), use sheets_names() to see all names returns a csv sheet of the sheet of the company
def open_file(stock, name, setup=False): if not isinstance(stock, str): raise TypeError("Parameter 'stock' should be a string, not a " + type(stock).__name__) if setup is True: # when setup, name is "AAPL_income.csv", not "income" # path = _os.path.join(datapath(setup=False), stock, name) path = datapath(True, stock, name) df = _pd.read_csv(path) _gc.collect() return df # not setup, normal open_file names = ['major_holders', 'top_institutional_holders', 'top_mutual_fund_holders', 'Trading_Information', 'Financial_Highlights', 'Valuation_Measures', 'Executives', 'Description', 'Earnings_Estimate', 'Revenue_Estimate', 'Earnings_History', 'EPS_Trend', 'EPS_Revisions', 'Growth_Estimates', 'stats', 'statements', 'reports', 'Executives', 'Description', 'analysis', 'Summary', 'balance', 'cash_flow', 'income'] if name not in names: try: name = _path(name) # when client mistakenly input factor instead of sheet name except ValueError: raise ValueError( 'Parameter "name" should be the name of the financial sheets, not a factor name...Use path method to ' 'find the location of a factor') path = datapath(True, stock, stock) try: df = _pd.read_csv(path + '_' + name + '.csv') _gc.collect() except FileNotFoundError: _gc.collect() if _os.path.exists(datapath(True, stock)): raise ValueError("There is no sheet - {} - for company {}. Use main_get to retrieve the sheet".format (name, stock)) else: raise ValueError("There is no record of '" + stock + "' in database") return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def excel(df_ccl, df_arg_stocks, df_bonds, df_arg_stocks_ccl):\n if os.path.exists('CCL.xlsx'):\n wb = xw.Book('CCL.xlsx')\n # SHEET CEDEARS\n ws = wb.sheets('CCL CEDEARs')\n ws.range('A1').expand().value = df_ccl\n # SHEET MERVAL\n ws_merval = wb.sheets('Merval')\n ...
[ "0.5797708", "0.5627527", "0.55049616", "0.547899", "0.5430646", "0.53921485", "0.53507286", "0.531778", "0.5266958", "0.52605325", "0.5259202", "0.52366793", "0.52347517", "0.5210146", "0.5195938", "0.51869226", "0.51795375", "0.5152984", "0.51428926", "0.51320624", "0.51209...
0.5508073
2
Read CSV in folder "general" in database. Also used in setup.py
def open_general(file, setup=False): try: if setup is False: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.csv') elif setup is True: p = datapath(True, 'general', file) df = _pd.read_csv(p + '.py') else: df = None # not tested here return df except FileNotFoundError as e: print("There is no record of {} in your database. Go to your chosen setup path to check, if not there go to " "Github and download the missing sheet".format(file)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def read_csv_file(self):\n pass", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file...
[ "0.6300106", "0.61802393", "0.6075419", "0.60727006", "0.606177", "0.6019453", "0.5933561", "0.58593744", "0.5730161", "0.5652817", "0.56373644", "0.5618824", "0.55967456", "0.5585875", "0.55683404", "0.5567126", "0.5550129", "0.5536807", "0.55361545", "0.55278426", "0.550909...
0.65410346
0
Read the stock list in database, a wrap up of open_general. Open stock list files in database using open_general() function.
def open_stock_list(exchange='ALL'): if exchange not in ['NYSE', 'NASDAQ'] and exchange != 'ALL': raise ValueError("Parameter 'exchange' should either NYSE or NASDAQ") if exchange == 'ALL': # all tickets c1 = open_general('NASDAQ') c2 = open_general('NYSE') df = _pd.concat([c1, c2], ignore_index=True).drop('Unnamed: 9', axis=1) # drop duplicated column else: _csv = open_general(exchange) df = _csv.drop('Unnamed: 9', axis=1) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_stock(db, openfile):\n pass", "def read_stock_codes_from_db():\n\n print('connecting to database...')\n Stocks = get_db()['Stocks']\n print('reading...')\n\n stocks = Stocks.find()\n return stocks", "def database_open(self):\n\t\n\t\tfilename = tkFileDialog.askopenfilename(multiple=F...
[ "0.8676556", "0.6498297", "0.6392442", "0.62527007", "0.6116841", "0.6115268", "0.60903823", "0.6079111", "0.605728", "0.60320973", "0.59506726", "0.58965456", "0.58925784", "0.58342135", "0.581336", "0.57088405", "0.5691312", "0.56265354", "0.56252694", "0.5618806", "0.56076...
0.6232286
4
Determines whether the discrepancy has been sufficiently resolved; used as return value for fix_discrepancy.
def discrepancy_resolved(self): # If there's a discrepancy and distance change matches the existing data, we're good. if self.distance_change == self.existing_data: return True # If recommend_updates, i.e., if self.distance_change == self.new_data, we'll update the data and we're good elif self.recommend_updates: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_solved(self):\n if not self._find_empty():\n return True\n else:\n return False", "def is_solved(self):\n\n marker = self._marker\n amount_of_pegs = 0\n for row in marker:\n for i in row:\n if i == \"*\":\n ...
[ "0.6593381", "0.622655", "0.62037015", "0.6201302", "0.6191842", "0.61888754", "0.60700476", "0.6044539", "0.6014487", "0.6009676", "0.5961862", "0.5950892", "0.5950892", "0.59451425", "0.5910703", "0.58750445", "0.5857207", "0.582537", "0.5819653", "0.580809", "0.5805597", ...
0.7586293
0
Run when the palette is closed
def on_palette_close(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(self, args):\r\n try:\r\n self.cmd_object_.on_palette_close()\r\n\r\n except:\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n ui.messageBox('Failed During Palette Close:\\n{}'.format(traceback.form...
[ "0.767233", "0.6960615", "0.67901963", "0.6604122", "0.6592666", "0.64696324", "0.64392585", "0.6405782", "0.6362652", "0.6352815", "0.6348606", "0.6346555", "0.63247025", "0.6313968", "0.6289701", "0.6276552", "0.6267763", "0.62607265", "0.62607265", "0.6249281", "0.623151",...
0.91420245
0
Function is run when the palette is executed. Useful to gather initial data and send to html page
def on_palette_execute(self, palette: adsk.core.Palette): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(self, args):\r\n app = adsk.core.Application.cast(adsk.core.Application.get())\r\n ui = app.userInterface\r\n try:\r\n\r\n # Create and display the palette.\r\n palette = ui.palettes.itemById(self.cmd_object_.palette_id)\r\n\r\n if not palette:\r\n ...
[ "0.71663606", "0.5976665", "0.59089667", "0.5755218", "0.57544327", "0.56373245", "0.5611845", "0.56111276", "0.5609256", "0.55880344", "0.5586924", "0.5578858", "0.5556275", "0.5524903", "0.55076844", "0.5467774", "0.54665", "0.5456171", "0.543333", "0.543", "0.5428835", "...
0.6666663
1
Function is run when the addin stops. Clean up. If overridden ensure to execute with super().on_stop()
def on_stop(self): app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface palette = ui.palettes.itemById(self.palette_id) for handler in self.html_handlers: palette.incomingFromHTML.remove(handler) if palette: palette.deleteMe() super().on_stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def on_stop(self):\n pass", "def post_stop(self):", "def on_stop(self):\n ...
[ "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.81118715", "0.7925438", "0.7536368", "0.7458011", "0.7412743", "0.7316049", "0.73140156", "0.7286416", "0.72050714", "0.7180944", "0.71321785", "0.7109044", "0.7109044", "0.71050143", "0.70...
0.6577097
95
Method executed by Fusion. DOn't rename
def notify(self, args): try: command_ = args.command inputs_ = command_.commandInputs on_execute_handler = _PaletteExecuteHandler(self.cmd_object_) command_.execute.add(on_execute_handler) self.cmd_object_.handlers.append(on_execute_handler) self.cmd_object_.on_create(command_, inputs_) except: app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface ui.messageBox('Command created failed: {}'.format(traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_rename(self, src, dst):\n self.renamerCalled = True", "def _fix_up(self, cls, code_name):", "def fix_name(self):\n self._name_fixed = True", "def OnRenameTimer(self):\r\n \r\n self.Edit(self._current)", "def rename(old, new):", "def rename(old, new):", "def _transform...
[ "0.6707409", "0.63080657", "0.6128592", "0.6127869", "0.60907537", "0.60907537", "0.60767657", "0.60607314", "0.5969004", "0.5958113", "0.5949128", "0.59331226", "0.592982", "0.5924983", "0.5913726", "0.5904696", "0.59042233", "0.5902338", "0.5860706", "0.5860248", "0.5841527...
0.0
-1
Method executed by Fusion. Don't rename
def notify(self, args): app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface try: # Create and display the palette. palette = ui.palettes.itemById(self.cmd_object_.palette_id) if not palette: palette = ui.palettes.add( self.cmd_object_.palette_id, self.cmd_object_.palette_name, self.cmd_object_.palette_html_file_url, self.cmd_object_.palette_is_visible, self.cmd_object_.palette_show_close_button, self.cmd_object_.palette_is_resizable, self.cmd_object_.palette_width, self.cmd_object_.palette_height, True ) # Add handler to HTMLEvent of the palette. on_html_event_handler = _HTMLEventHandler(self.cmd_object_) palette.incomingFromHTML.add(on_html_event_handler) self.cmd_object_.handlers.append(on_html_event_handler) self.cmd_object_.html_handlers.append(on_html_event_handler) # Add handler to CloseEvent of the palette. on_closed_handler = _PaletteCloseHandler(self.cmd_object_) palette.closed.add(on_closed_handler) self.cmd_object_.handlers.append(on_closed_handler) else: main_url = urlparse(self.cmd_object_.palette_html_file_url) current_url = urlparse(palette.htmlFileURL) if not ( (not self.cmd_object_.palette_force_url_reload) & (main_url.netloc == current_url.netloc) & (main_url.path == current_url.path) ): # ui.messageBox(current_url.netloc + " vs. " + main_url.netloc) # ui.messageBox(current_url.path + " vs. " + main_url.path) # ui.messageBox(str(self.cmd_object_.palette_force_url_reload)) palette.htmlFileURL = self.cmd_object_.palette_html_file_url palette.isVisible = True self.cmd_object_.on_palette_execute(palette) except: ui.messageBox('Palette ({}) Execution Failed: {}'.format( self.cmd_object_.palette_html_file_url, traceback.format_exc()) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_up(self, cls, code_name):", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def script(self):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def falcon():", "def fix_name(self):\n self._name_fixed = True", "def dumm...
[ "0.6129317", "0.6021787", "0.5983167", "0.59377337", "0.59124655", "0.5910845", "0.5819237", "0.57898045", "0.57898045", "0.5778516", "0.5760022", "0.5747388", "0.5742715", "0.56831336", "0.56831336", "0.56831336", "0.56831336", "0.56460947", "0.5617484", "0.5591955", "0.5591...
0.0
-1
Method executed by Fusion. Don't rename
def notify(self, args): try: html_args = adsk.core.HTMLEventArgs.cast(args) self.cmd_object_.on_html_event(html_args) except: app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface ui.messageBox('Failed Handling HTML Event:\n{}'.format(traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_up(self, cls, code_name):", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def script(self):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def falcon():", "def fix_name(self):\n self._name_fixed = True", "def dumm...
[ "0.6129317", "0.6021787", "0.5983167", "0.59377337", "0.59124655", "0.5910845", "0.5819237", "0.57898045", "0.57898045", "0.5778516", "0.5760022", "0.5747388", "0.5742715", "0.56831336", "0.56831336", "0.56831336", "0.56831336", "0.56460947", "0.5617484", "0.5591955", "0.5591...
0.0
-1
Method executed by Fusion. Don't rename
def notify(self, args): try: self.cmd_object_.on_palette_close() except: app = adsk.core.Application.cast(adsk.core.Application.get()) ui = app.userInterface ui.messageBox('Failed During Palette Close:\n{}'.format(traceback.format_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_up(self, cls, code_name):", "def my_rename(self, src, dst):\n self.renamerCalled = True", "def script(self):", "def process_file(file_name):\n pass # delete this line and replace with your code here", "def falcon():", "def fix_name(self):\n self._name_fixed = True", "def dumm...
[ "0.6129317", "0.6021787", "0.5983167", "0.59377337", "0.59124655", "0.5910845", "0.5819237", "0.57898045", "0.57898045", "0.5778516", "0.5760022", "0.5747388", "0.5742715", "0.56831336", "0.56831336", "0.56831336", "0.56831336", "0.56460947", "0.5617484", "0.5591955", "0.5591...
0.0
-1
Builds the selection spec.
def build_selection_spec(client_factory, name): sel_spec = client_factory.create('ns0:SelectionSpec') sel_spec.name = name return sel_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , se...
[ "0.6188935", "0.59599125", "0.5944895", "0.55326456", "0.5366692", "0.5328713", "0.53118664", "0.53038955", "0.5270639", "0.5265871", "0.5262976", "0.5192852", "0.518418", "0.51645607", "0.51583874", "0.5150827", "0.5110556", "0.5098783", "0.5098783", "0.50898474", "0.5065787...
0.72015435
0
Builds the traversal spec object.
def build_traversal_spec(client_factory, name, spec_type, path, skip, select_set): traversal_spec = client_factory.create('ns0:TraversalSpec') traversal_spec.name = name traversal_spec.type = spec_type traversal_spec.path = path traversal_spec.skip = skip traversal_spec.selectSet = select_set return traversal_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_recursive_traversal_spec(client_factory):\r\n visit_folders_select_spec = build_selection_spec(client_factory,\r\n \"visitFolders\")\r\n # For getting to hostFolder from datacenter\r\n dc_to_hf = build_traversal_spec(client_factory, \"dc_to_hf\", \"Datacenter\"...
[ "0.6779343", "0.64210516", "0.57740533", "0.55809647", "0.5465794", "0.5431072", "0.5333568", "0.52883077", "0.5245698", "0.52180934", "0.5119556", "0.5117688", "0.5117688", "0.5085158", "0.504372", "0.5026567", "0.5005321", "0.4998705", "0.4986079", "0.49831903", "0.49652678...
0.70819336
0
Builds the Recursive Traversal Spec to traverse the object managed object hierarchy.
def build_recursive_traversal_spec(client_factory): visit_folders_select_spec = build_selection_spec(client_factory, "visitFolders") # For getting to hostFolder from datacenter dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter", "hostFolder", False, [visit_folders_select_spec]) # For getting to vmFolder from datacenter dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter", "vmFolder", False, [visit_folders_select_spec]) # For getting Host System to virtual machine h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem", "vm", False, [visit_folders_select_spec]) # For getting to Host System from Compute Resource cr_to_h = build_traversal_spec(client_factory, "cr_to_h", "ComputeResource", "host", False, []) # For getting to datastore from Compute Resource cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds", "ComputeResource", "datastore", False, []) rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp") rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm") # For getting to resource pool from Compute Resource cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp", "ComputeResource", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to child res pool from the parent res pool rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to Virtual Machine from the Resource Pool rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool", "vm", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # Get the assorted traversal spec which takes care of the objects to # be searched for from the root folder traversal_spec = build_traversal_spec(client_factory, "visitFolders", "Folder", "childEntity", False, [visit_folders_select_spec, dc_to_hf, dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp, rp_to_rp, h_to_vm, rp_to_vm]) return traversal_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def HierarchyIterator(obj):\n w...
[ "0.63169825", "0.577359", "0.5654486", "0.5466749", "0.5373837", "0.52702874", "0.5185981", "0.517904", "0.50982857", "0.50975597", "0.5090196", "0.5065294", "0.50485235", "0.50385857", "0.503404", "0.49969995", "0.4953721", "0.4944381", "0.49171883", "0.4908716", "0.49081615...
0.6471344
0
Builds the Property Spec.
def build_property_spec(client_factory, type="VirtualMachine", properties_to_collect=["name"], all_properties=False): property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = all_properties property_spec.pathSet = properties_to_collect property_spec.type = type return property_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def build(sel...
[ "0.6053929", "0.595817", "0.5930383", "0.5877144", "0.5749173", "0.56912214", "0.5589146", "0.5584564", "0.5541774", "0.5472907", "0.54651445", "0.53387296", "0.5295169", "0.5217021", "0.5211193", "0.51826376", "0.51688266", "0.5148735", "0.5138071", "0.5137829", "0.51148397"...
0.69546396
0
Builds the object Spec.
def build_object_spec(client_factory, root_folder, traversal_specs): object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = root_folder object_spec.skip = False object_spec.selectSet = traversal_specs return object_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build(self, spec, prefix):\n make()", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n ...
[ "0.70841205", "0.66101444", "0.6534153", "0.6428157", "0.630121", "0.6247484", "0.6246544", "0.6246544", "0.62344396", "0.62344396", "0.62138087", "0.62038517", "0.61900073", "0.61408484", "0.6102088", "0.607086", "0.607086", "0.607086", "0.60343456", "0.599119", "0.58868694"...
0.6370524
4
Builds the Property Filter Spec.
def build_property_filter_spec(client_factory, property_specs, object_specs): property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_prop_filter_spec(client_factory, obj_spec, prop_spec):\r\n prop_filter_spec = \\\r\n client_factory.create('ns0:PropertyFilterSpec')\r\n prop_filter_spec.propSet = prop_spec\r\n prop_filter_spec.objectSet = obj_spec\r\n return prop_filter_spec", "def get_prop_filter_spec(client_factory...
[ "0.6691381", "0.6613962", "0.6211153", "0.60873485", "0.59203535", "0.5826266", "0.5766887", "0.546501", "0.54321504", "0.5387858", "0.5352514", "0.5318498", "0.53072774", "0.52971756", "0.52805036", "0.5272654", "0.5242891", "0.5188393", "0.51839644", "0.5144999", "0.5102500...
0.76600534
0
Gets the properties of the Managed object specified.
def get_object_properties(vim, collector, mobj, type, properties): client_factory = vim.client.factory if mobj is None: return None usecoll = collector if usecoll is None: usecoll = vim.get_service_content().propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = (properties is None or len(properties) == 0) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object_properties(vim, collector, mobj, type, properties):\n client_factory = vim.client.factory\n if mobj is None:\n return None\n usecoll = collector\n if usecoll is None:\n usecoll = vim.service_content.propertyCollector\n property_filter_spec = client_factory.create('ns0:Pr...
[ "0.66242605", "0.6595597", "0.65718126", "0.65414923", "0.64137036", "0.63451725", "0.6315363", "0.6177473", "0.61558604", "0.61558604", "0.61421597", "0.61126566", "0.6106942", "0.6099833", "0.60941976", "0.6071322", "0.6071322", "0.6018031", "0.599956", "0.5993409", "0.5958...
0.679616
0
Gets a particular property of the Managed Object.
def get_dynamic_property(vim, mobj, type, property_name): obj_content = \ get_object_properties(vim, None, mobj, type, [property_name]) property_value = None if obj_content: dynamic_property = obj_content[0].propSet if dynamic_property: property_value = dynamic_property[0].val return property_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_property(self, property):\n return self.shell([\"getprop\", property])", "def get_property(self,name):\n return self.dp.get_property(name)", "def get_property(self, key):\n return self.properties.get(key)", "def get_property(self, name):\n if (not name in self.properties):\n ...
[ "0.7311495", "0.7275758", "0.7193801", "0.7128644", "0.70792645", "0.7021553", "0.6953444", "0.69131315", "0.6811504", "0.67831117", "0.67660815", "0.67022717", "0.66058373", "0.65965885", "0.65847474", "0.6570505", "0.65630543", "0.65239185", "0.63693666", "0.6347373", "0.63...
0.62482166
27
Gets the list of objects of the type specified.
def get_objects(vim, type, properties_to_collect=["name"], all=False): client_factory = vim.client.factory object_spec = build_object_spec(client_factory, vim.get_service_content().rootFolder, [build_recursive_traversal_spec(client_factory)]) property_spec = build_property_spec(client_factory, type=type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = build_property_filter_spec(client_factory, [property_spec], [object_spec]) return vim.RetrieveProperties(vim.get_service_content().propertyCollector, specSet=[property_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_objects_by_type(self, *types) -> List[TgnObject]:\n if not types:\n return list(self.objects.values())\n types_l = [o.lower() for o in types]\n return [o for o in self.objects.values() if o.type.lower() in types_l]", "def all(self, *args, **kwargs):\n list_to_return = [...
[ "0.7903194", "0.7377341", "0.73040676", "0.70410174", "0.692821", "0.69026285", "0.68966556", "0.6836504", "0.6808106", "0.6714846", "0.6546497", "0.65462595", "0.6545677", "0.6539864", "0.6508331", "0.6492654", "0.64498925", "0.64006305", "0.63523966", "0.63385695", "0.63068...
0.6089714
40
Builds the Property Spec Object.
def get_prop_spec(client_factory, spec_type, properties): prop_spec = client_factory.create('ns0:PropertySpec') prop_spec.type = spec_type prop_spec.pathSet = properties return prop_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_spec(client_factory, type=\"VirtualMachine\",\r\n properties_to_collect=[\"name\"],\r\n all_properties=False):\r\n property_spec = client_factory.create('ns0:PropertySpec')\r\n property_spec.all = all_properties\r\n property_spec.pathSet = p...
[ "0.7005923", "0.6116688", "0.5938139", "0.58759177", "0.58554643", "0.57825583", "0.5649714", "0.5636901", "0.5603624", "0.559764", "0.5581322", "0.5505116", "0.54937416", "0.54541737", "0.5407613", "0.5344718", "0.532845", "0.5320788", "0.52512217", "0.5245343", "0.5237961",...
0.59059453
3
Builds the Object Spec object.
def get_obj_spec(client_factory, obj, select_set=None): obj_spec = client_factory.create('ns0:ObjectSpec') obj_spec.obj = obj obj_spec.skip = False if select_set is not None: obj_spec.selectSet = select_set return obj_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_object_spec(client_factory, root_folder, traversal_specs):\r\n object_spec = client_factory.create('ns0:ObjectSpec')\r\n object_spec.obj = root_folder\r\n object_spec.skip = False\r\n object_spec.selectSet = traversal_specs\r\n return object_spec", "def build(self, spec, prefix):\n ...
[ "0.66367", "0.6583926", "0.64170843", "0.62385815", "0.6159886", "0.6103416", "0.60937", "0.6093361", "0.5987617", "0.5961307", "0.59383756", "0.59383756", "0.58615595", "0.58615595", "0.58420074", "0.5829586", "0.5824573", "0.58184236", "0.58095497", "0.5804861", "0.5796754"...
0.54430425
40
Builds the Property Filter Spec Object.
def get_prop_filter_spec(client_factory, obj_spec, prop_spec): prop_filter_spec = \ client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_property_filter_spec(client_factory, property_specs, object_specs):\r\n property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')\r\n property_filter_spec.propSet = property_specs\r\n property_filter_spec.objectSet = object_specs\r\n return property_filter_spec", "def get_prop_...
[ "0.76403946", "0.65880555", "0.6298685", "0.6076335", "0.597899", "0.5965586", "0.5764983", "0.5655848", "0.56413084", "0.5557564", "0.5519107", "0.5488389", "0.54726166", "0.5452801", "0.544979", "0.5363146", "0.5314718", "0.51662815", "0.5132564", "0.5092276", "0.50902605",...
0.66637796
1
Gets the list of properties for the collection of objects of the type specified.
def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): client_factory = vim.client.factory if len(obj_list) == 0: return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) return vim.RetrieveProperties(vim.get_service_content().propertyCollector, specSet=[prop_filter_spec])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_properties_for_a_collection_of_objects(vim, type,\n obj_list, properties):\n client_factory = vim.client.factory\n if len(obj_list) == 0:\n return []\n prop_spec = get_prop_spec(client_factory, type, properties)\n lst_obj_specs = []\n for ...
[ "0.7357272", "0.65244734", "0.6457637", "0.6373247", "0.6370086", "0.6297487", "0.62510055", "0.62469465", "0.6221552", "0.6219856", "0.6206821", "0.610221", "0.60433024", "0.60395074", "0.5994236", "0.59763896", "0.59744734", "0.59715617", "0.59705645", "0.5961555", "0.59590...
0.74611396
0
Take the top c cards of each stack and return a copy
def copy_stacks(self, c1, c2): return ( deque([n for n in self._s1][-c1:]), deque([n for n in self._s2][-c2:]) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pop_top_card(self):\n return self.pop_card(top=True)", "def top_draw(self):\n top_card = self.cards.pop(0)\n return top_card", "def top(self):\n return self.get_cards()[-1]", "def get_card_at_top_index(deck):\n \n small_joker_value = get_small_joker_value(deck)\n if d...
[ "0.6781717", "0.6382215", "0.63411653", "0.63055867", "0.6273443", "0.6257995", "0.61584336", "0.61021817", "0.60572743", "0.6045613", "0.6024501", "0.6022065", "0.600286", "0.5925546", "0.5907862", "0.58364666", "0.5817762", "0.5810385", "0.5734856", "0.5729768", "0.5695732"...
0.57403517
18
Return tuple (player number, s1, s2). The first element indicates the winner
def play(self): while len(self._s1) > 0 and len(self._s2) > 0: if self._serialize() in self._seen_games: # Game over player 1 wins return (1, *self.decks) self._seen_games.add(self._serialize()) n1, n2 = self._s1.pop(), self._s2.pop() if len(self._s1) >= n1 and len(self._s2) >= n2: # Play a sub game sub_game = Game(*self.copy_stacks(n1, n2)) res, _, _ = sub_game.play() else: res = 1 if n1 > n2 else 2 if res == 1: self._s1.appendleft(n1) self._s1.appendleft(n2) else: self._s2.appendleft(n2) self._s2.appendleft(n1) return (1 if len(self._s1) else 2, *self.decks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def winner(self):\n # Credit to Dariusz Walczak for inspiration.\n # http://stackoverflow.com/questions/1720421/merge-two-lists-in-python\n moves = [p.possible_moves(p.pieces, self) for p in self.players]\n if False in [mv == [] for mv in moves]:\n return (\"None\")\n ...
[ "0.7398989", "0.7312626", "0.7270267", "0.7123724", "0.7110263", "0.7102775", "0.70441735", "0.70315427", "0.6907012", "0.6888181", "0.6876374", "0.6870772", "0.6866335", "0.6822034", "0.67885065", "0.6718184", "0.67145145", "0.6701902", "0.6701902", "0.6701902", "0.6685644",...
0.60619944
93
Initialize the parameters of the logistic regression
def __init__(self, input, n_in, n_out): # start-snippet-1 # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True ) # initialize the baises b as a vector of n_out 0s self.b = theano.shared(value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True ) # symbolic expression for computing the matrix of class-membership probabilities where: # W is a matrix where column-k represent the separation hyper plain for class-k # x is a matrix where row-j represents input training sample-j # b is a vector where element-k represent the free parameter of hyper plane-k self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) # symbolic description of how to compute prediction as class whose probability is maximal self.y_pred = T.argmax(self.p_y_given_x, axis=1) # end-snippet-1 # parameters of the model self.params = [self.W, self.b] # keep track of model input self.input = input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_initial_params(model: LogisticRegression):\n n_classes = 15 # threat types\n n_features = 33 # Number of features in dataset\n model.classes_ = np.array([i for i in range(15)])\n\n model.coef_ = np.zeros((n_classes, n_features))\n if model.fit_intercept:\n model.intercept_ = np.zeros(...
[ "0.77980274", "0.7470837", "0.72140586", "0.6893265", "0.68832946", "0.6734596", "0.6729252", "0.66861194", "0.6655325", "0.65969175", "0.6565215", "0.65200406", "0.6474855", "0.64675874", "0.6451465", "0.6448672", "0.644657", "0.6324524", "0.6307155", "0.6290791", "0.6231589...
0.0
-1
Return the mean of the negative loglikelihood of the prediction of this model under a given target distribution.
def negative_log_likelihood(self, y): # start-snippet-2 # y.shape[0] is (symbolically) the number of rows in y, i.e. number of examples (call it n) in the minibatch # T.arange(y.shape[0]) is a symbolic vector which will contain [0,1,2,... n-1] # T.log(self.p_y_given_x) is a matrix of Log-Probabilities (call it LP) with one row per example and one column per class # LP[T.arange(y.shape[0]),y] is a vector v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ..., LP[n-1,y[n-1]]] and # T.mean(LP[T.arange(y.shape[0]),y]) is the mean (across minibatch examples) of the elements in v, i.e., the mean log-likelihood across the minibatch. #print "y.ndim = ",y.ndim return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y]) # end-snippet-2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def negative_log_likelihood(self):\n # y.shape[0] is (symbolically) the number of rows in y, i.e.,\n # number of examples (call it n) in the minibatch\n # T.arange(y.shape[0]) is a symbolic vector which will contain\n # [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of\n # L...
[ "0.7408264", "0.7209359", "0.7209359", "0.7208547", "0.7176592", "0.7176592", "0.70599693", "0.68988174", "0.68891734", "0.65442437", "0.64843166", "0.64635515", "0.64410263", "0.6426811", "0.6400936", "0.639749", "0.639114", "0.6367916", "0.6353634", "0.63428533", "0.6317736...
0.6822186
9
Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch
def errors(self, y): # check if y has same dimension of y_pred if y.ndim != self.y_pred.ndim: raise TypeError( 'y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type) ) # check if y is of the correct datatype if y.dtype.startswith('int'): # the T.neq operator returns a vector of 0s and 1s, where 1 # represents a mistake in prediction return T.mean(T.neq(self.y_pred, y)) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def nb_errors_nb(self, input_data, target):\n input_data_resize = input_data.view(2000, 1,...
[ "0.64593107", "0.6453829", "0.6450095", "0.6359758", "0.6284254", "0.6248897", "0.61730903", "0.6143888", "0.6141596", "0.61282915", "0.61234075", "0.6117177", "0.6080115", "0.60368556", "0.60218304", "0.60163116", "0.60140604", "0.5989487", "0.59858793", "0.59834915", "0.597...
0.0
-1
Demonstrate stochastic gradient descent optimization of a loglinear model
def sgd_optimization(data_type, target, model_dir, learning_rate=0.1, n_epochs=10, batch_size=100): test_fold = 1 #xxxxxxxxxxxx TEMP XXXXXXXXXXXXXXXX write_model_file = model_dir + '/model.' + target + '.' + str(test_fold) +'.pkl' fold_path = helpers.get_fold_path(data_type) targets = helpers.build_targets(fold_path, data_type) fnames = targets[target] fold_accuracies = {} did_something = False # pct_ct = [] # roc_auc = [] # run 4 folds vs 1 fold with each possible scenario # for curr_fl in range(5): # print 'Building data for target: ' + target + ', fold: ' + str(curr_fl) # loop through all folds, for now just do 1! datasets, test_set_labels = helpers.th_load_data(data_type, fold_path, target, fnames, 0, test_fold) train_set_x, train_set_y = datasets[0] test_set_x, test_set_y = datasets[1] valid_set_x = train_set_x valid_set_y = train_set_y # compute number of rows for training, validation and testing rows_train = train_set_x.get_value(borrow=True).shape[0] rows_valid = valid_set_x.get_value(borrow=True).shape[0] rows_test = test_set_x.get_value(borrow=True).shape[0] # compute number of minibatches for training, validation and testing n_train_batches = rows_train / batch_size n_valid_batches = rows_valid / batch_size n_test_batches = rows_test / batch_size ####################### BUILD ACTUAL MODEL ####################### # allocate symbolic variables for the data index = T.lscalar() # index to a [mini]batch # generate symbolic variables for input (x and y represent a minibatch) x = T.matrix('x') # data, presented as rasterized images y = T.ivector('y') # labels, presented as 1D vector of [int] labels # construct the logistic regression class # n_in: Each MNIST image has size 32*32 = 1024 # n_out: 10 different digits - multi-task LR classifier = LogisticRegression(input=x, n_in=32 * 32, n_out=2) # the cost we minimize during training is the negative log likelihood of the model in symbolic format cost = classifier.negative_log_likelihood(y) # compiling a Theano function that computes the mistakes that are made by the model on a minibatch test_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: test_set_x[index * batch_size: (index + 1) * batch_size], y: test_set_y[index * batch_size: (index + 1) * batch_size] } ) validate_model = theano.function( inputs=[index], outputs=classifier.errors(y), givens={ x: valid_set_x[index * batch_size: (index + 1) * batch_size], y: valid_set_y[index * batch_size: (index + 1) * batch_size] } ) # compute the gradient of cost with respect to theta = (W,b) g_W = T.grad(cost=cost, wrt=classifier.W) g_b = T.grad(cost=cost, wrt=classifier.b) # start-snippet-3 # specify how to update the parameters of the model as a list of # (variable, update expression) pairs. updates = [(classifier.W, classifier.W - learning_rate * g_W), (classifier.b, classifier.b - learning_rate * g_b)] # compiling a Theano function `train_model` that returns the cost, but in # the same time updates the parameter of the model based on the rules # defined in `updates` train_model = theano.function( inputs=[index], outputs=cost, updates=updates, givens={ x: train_set_x[index * batch_size: (index + 1) * batch_size], y: train_set_y[index * batch_size: (index + 1) * batch_size] } ) # end-snippet-3 ################ TRAIN MODEL ################ # early-stopping parameters patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is found improvement_threshold = 0.995 # a relative improvement of this much is considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many minibatches before checking the network on the validation set; in this case we check every epoch best_validation_loss = numpy.inf test_score = 0. start_time = time.clock() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_model(minibatch_index) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in xrange(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) # print( 'epoch %i, minibatch %i/%i, validation error %f %%' % # (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.) ) # if we got the best validation score until now if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # test it on the test set test_losses = [test_model(i) for i in xrange(n_test_batches)] test_score = numpy.mean(test_losses) # print( (' epoch %i, minibatch %i/%i, test error of best model %f %%' ) % # ( epoch, minibatch_index + 1, n_train_batches, test_score * 100. ) ) # save the best model with open(write_model_file, 'w') as f: cPickle.dump(classifier, f) if patience <= iter: done_looping = True break end_time = time.clock() print( ('Optimization complete for %d with best validation score of %f %% with test performance %f %%') % (test_fold, best_validation_loss * 100., test_score * 100.) ) print 'The code ran for %d epochs, with %f epochs/sec' % (epoch, 1. * epoch / (end_time - start_time)) # print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.1fs' % ((end_time - start_time))) # end-snippet-4 # Now we do the predictions # load the saved best model for this fold classifier = cPickle.load(open(write_model_file)) # compile a predictor function predict_model = theano.function(inputs=[classifier.input], outputs=[classifier.y_pred,classifier.p_y_given_x]) # compile a confidence predictor function # predict_conf_model = theano.function( inputs=[classifier.input], outputs=classifier.p_y_given_x) # We can test it on some examples from test test """ *************** build AUC curve *************** """ # get the probability of our predictions test_set = test_set_x.get_value() predicted_values, conf_preds = predict_model(test_set[:(rows_test)]) conf_predictions = [] for i in range(len(conf_preds)): # ignore the first column; this gives a lower score that seems wrong. conf_predictions.append(conf_preds[i][1]) # determine ROC / AUC fpr, tpr, thresholds = metrics.roc_curve(test_set_labels, conf_predictions) auc = metrics.auc(fpr, tpr) # e.g. 0.855 """ *********************************************** """ num_correct = 0 num_false = 0 for i in range(len(predicted_values)): if predicted_values[i] == test_set_labels[i]: num_correct += 1 else: num_false += 1 total = len(predicted_values) percent_correct = num_correct / float(total) fold_results = '' fold_results += '#################### Results for ' + data_type + ' ####################' + '\n' fold_results += 'target:' + target + ' fold:' + str(test_fold) + ' predicted: ' + \ str(total) + ' wrong: ' + \ str(num_false) + ' pct correct: ' + str(percent_correct) + ', auc: ' + str(auc) print fold_results write_predictions_file = model_dir + '/predictions.' + target + '.' + str(test_fold) +'.txt' with open(write_predictions_file, 'w') as f: f.write(fold_results + "\n") # def run_predictions(data_type, curr_target): # fold_path = get_fold_path(data_type) # targets = build_targets(fold_path, data_type) # # print "Found " + str(len(targets)) + " targets for " + data_type # fold_accuracies = {} # did_something = False # for target, fnames in targets.iteritems(): # if (target != curr_target): # continue # else: # did_something = True # # retrieve our stratified folds # folds = get_folds(data_type, fold_path, target, fnames) # pct_ct = [] # roc_auc = [] # # run 4 folds vs 1 fold with each possible scenario # for curr_fl in range(5): # print 'Building data for target: ' + target + ', fold: ' + str(curr_fl) # # folds 1-4 # temp_data = [] # for i in range(len(folds)): # if(i == curr_fl): # # don't include the test fold # continue # else: # temp_data += folds[i] # # vs current 5th test fold # test_data = folds[curr_fl] # """ Turning 1024 bits into features is a slow process """ # # build training data # X = [] # Y = [] # for i in range(len(temp_data)): # row = [] # for bit in temp_data[i][0]: # row.append(int(bit)) # X.append(row) # Y.append(int(temp_data[i][1])) # X = np.array(X) # Y = np.array(Y) # # build test data # X_test = [] # Y_test = [] # for i in range(len(test_data)): # row = [] # for bit in test_data[i][0]: # row.append(int(bit)) # X_test.append(row) # Y_test.append(int(test_data[i][1])) # X_test = np.array(X_test) # Y_test = np.array(Y_test) # percent_correct, auc = random_forest(target, X, Y, X_test, Y_test, curr_fl) # pct_ct.append(percent_correct) # roc_auc.append(auc) # # now get the average fold results for this target # accuracy = sum(pct_ct) / float(len(pct_ct)) # all_auc = sum(roc_auc) / float(len(roc_auc)) # print 'Results for '+ target + ': accuracy: ' + str(accuracy) + ', auc: ' + str(all_auc) # # update fold accuracies # fold_accuracies[target] = (accuracy, all_auc) if(did_something == False): print curr_target + ' not found in ' + data_type + '!' exit(0) print '#################### Results for ' + data_type + ' ####################' # output results accuracies = 0.00 aucs = 0.00 num_targets = 0.00 for target, obj in fold_accuracies.iteritems(): acc = obj[0] auc = obj[1] print target + ' accuracy: ' + str(acc) + ', auc:' + str(auc) accuracies += acc aucs += auc num_targets += 1 # overall_acc = accuracies / num_targets # overall_auc = aucs / num_targets # print ' overall accuracy: ' + str(overall_acc) + ', overall auc: ' + str(overall_auc) print '############################################################'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logistic_regression_SGD(y, tx, initial_w, max_iters, gamma, batch_size=10, verbose=False):\n return stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_logistic_loss, \n compute_logistic_gradient, batch_size=10, verbose=verbose)", "def log_prior_gr...
[ "0.7094588", "0.6940799", "0.69093955", "0.6795237", "0.6782252", "0.6769422", "0.67630374", "0.67446244", "0.6688038", "0.66774327", "0.66749567", "0.66108364", "0.66103506", "0.6609761", "0.65868056", "0.6584251", "0.6573774", "0.6572846", "0.6521604", "0.6511357", "0.64980...
0.0
-1
Run `code` with profiler. Used by ``%prun`` and ``%run p``.
def _run_with_profiler(self, code, opts, namespace): # Fill default values for unspecified options: opts.merge(Struct(D=[''], l=[], s=['time'], T=[''])) prof = profile.Profile() try: prof = prof.runctx(code, namespace, namespace) sys_exit = '' except SystemExit: sys_exit = """*** SystemExit exception caught in code being profiled.""" stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s) lims = opts.l if lims: lims = [] # rebuild lims with ints/floats/strings for lim in opts.l: try: lims.append(int(lim)) except ValueError: try: lims.append(float(lim)) except ValueError: lims.append(lim) # Trap output. stdout_trap = StringIO() stats_stream = stats.stream try: stats.stream = stdout_trap stats.print_stats(*lims) finally: stats.stream = stats_stream output = stdout_trap.getvalue() output = output.rstrip() if 'q' not in opts: page.page(output) print(sys_exit, end=' ') dump_file = opts.D[0] text_file = opts.T[0] if dump_file: prof.dump_stats(dump_file) if text_file: with open(text_file, 'w') as pfile: pfile.write(output) if 'r' in opts: return stats else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profile_code(profiler):\n print('\\n')\n ps = pstats.Stats(profiler).strip_dirs().sort_stats('cumulative')\n ps.print_stats(10)", "def runner(code, out_stream):\n code_obj = compiler.compile_source(code)\n vm = virtual_machine.VirtualMachine(out_stream)\n vm.run_code(code_obj)", "def part...
[ "0.6385464", "0.62743026", "0.6012042", "0.5954541", "0.5947372", "0.5926711", "0.5884469", "0.5836414", "0.5775137", "0.5737329", "0.5666656", "0.5654356", "0.55813533", "0.55803514", "0.555477", "0.55139744", "0.5504729", "0.5470353", "0.5440272", "0.5439059", "0.5430186", ...
0.7272947
0
read feature file, find out mass shift then correct
def feature_file_mass_correction(feature_filename: str): output_feature_filename = feature_filename + '.mass_corrected' ppm_shift = [] with open(feature_filename, 'r') as f: reader = csv.reader(f, delimiter=',') header = next(reader) seq_index = header.index("seq") mz_index = header.index("m/z") z_index = header.index("z") for line in reader: mz = float(line[mz_index]) z = float(line[z_index]) observed_mass = mz * z - z * config.mass_H if not line[seq_index]: continue okay, peptide = parse_raw_sequence(line[seq_index]) if not okay: # unknown mods continue theoretical_mass = compute_neutral_peptide_mass(peptide) ppm = (observed_mass - theoretical_mass) / theoretical_mass * 1e6 ppm_shift.append(ppm) if len(ppm_shift) < 100: raise ValueError("too less identified feature for mass correction") ppm_shift = np.median(ppm_shift) print(f"ppm shift: {ppm_shift}") with open(feature_filename, 'r') as fr: with open(output_feature_filename, 'w') as fw: reader = csv.reader(fr, delimiter=',') writer = csv.writer(fw, delimiter=',') writer.writerow(next(reader)) for line in reader: mz = float(line[mz_index]) mz = mz * (1 - ppm_shift * 1e-6) line[mz_index] = "{}".format(mz) writer.writerow(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_msp(infile_name,feat_lim_file=\"\",\n\t\t\t sum_feats=False,selected_features=[],\n\t\t\t max_dist=275,step_size=0.005,feat_bins=[],\n\t\t\t top_peaks=50,windowed_mode=False):\n\n\tinfile = open(infile_name)\n\n\tif len(feat_lim_file) > 0:\n\t\tselected_features = [float(f.strip()) for f in open(feat_lim_...
[ "0.61413145", "0.6100522", "0.56102365", "0.55467093", "0.55414826", "0.5517877", "0.55111635", "0.5491323", "0.5489486", "0.54673564", "0.5458751", "0.54487556", "0.5440117", "0.5364303", "0.53435946", "0.53398484", "0.5336551", "0.5330061", "0.5308056", "0.5289896", "0.5265...
0.6439688
0
Crop images into the four corners, center, and their mirrored versions.
def _oversample(images, crop_dims): # Dimensions and center. im_shape = np.array(images[0].shape) crop_dims = np.array(crop_dims) im_center = im_shape[:2] / 2.0 # Make crop coordinates h_indices = (0, im_shape[0] - crop_dims[0]) w_indices = (0, im_shape[1] - crop_dims[1]) crops_ix = np.empty((5, 4), dtype=int) curr = 0 for i in h_indices: for j in w_indices: crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1]) curr += 1 crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([ -crop_dims / 2.0, crop_dims / 2.0 ]) crops_ix = np.tile(crops_ix, (2, 1)) # Extract crops crops = np.empty((NUM_OVER_SAMPLES * len(images), crop_dims[0], crop_dims[1], im_shape[-1]), dtype=np.float32) ix = 0 for im in images: for crop in crops_ix: crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :] ix += 1 crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors return crops
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_center_img(self):\n # TODO Task 1.1\n img = self.data\n img_with_missing_crop = np.copy(img)\n dim =128\n crop = dim // 2\n start = crop - (crop // 2)\n #ground truth overlaps img_with_missing_crop by 7 pixels in all directions\n img_with_missing_cro...
[ "0.68269926", "0.65620327", "0.648815", "0.6427035", "0.6379702", "0.63226426", "0.62055635", "0.6204814", "0.6204427", "0.6082633", "0.604973", "0.60170174", "0.59973186", "0.5994196", "0.595956", "0.5947782", "0.59313524", "0.5928219", "0.59176385", "0.58877224", "0.5885500...
0.5979108
14
Helper to hold params and allow func like the original.
def __init__(self, graph, weights, input_tensor_name=None, output_tensor_name=None): self.sess = tf.Session() new_saver = tf.train.import_meta_graph(graph) new_saver.restore(self.sess, weights) get_tensor = tf.get_default_graph().get_tensor_by_name # Get the initial place holder, else default if input_tensor_name: self.placeholder = get_tensor(input_tensor_name) else: self.placeholder = get_tensor('Placeholder:0') if output_tensor_name: self.softmax = get_tensor(output_tensor_name) else: self.softmax = get_tensor('Softmax:0') # Save trainables into params trainable_params = tf.trainable_variables() layers = {} params = {} def add_to_layer(name): try: layers[name] = get_tensor("{}:0".format(name)) except KeyError: try: layers[name] = get_tensor("{}/Relu:0".format(name)) except KeyError: print("Activation Not Found.") pass for v in trainable_params: if 'weight' in v.name: name = v.name.split('/')[0] params[name] = v add_to_layer(name) # Pooling layers usually don't have a nice way of gathering. for n in tf.get_default_graph().as_graph_def().node: if 'pool' in n.name: v = get_tensor("{}:0".format(n.name)) name = n.name.split('/')[0] params[name] = v add_to_layer(name) # Get trainable params - 1 holds locations the other is a dummy script self.params = {} self._params = params self.layers = layers # Save empty dict into blobs self.blobs = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fn(*args, **kwargs):\n pass", "def params(funcarglist):\n def wrapper(function):\n function.funcarglist = funcarglist\n return function\n return wrapper", "def my_func(a, b):", "def wrapper(*args):", "def dummy_fn(self, *args, **kwargs):", "def test_param_of_func(self):...
[ "0.692556", "0.6727447", "0.67098534", "0.658326", "0.6408254", "0.63891876", "0.6376225", "0.6366123", "0.62743974", "0.62322086", "0.6189551", "0.6189551", "0.6189551", "0.6188234", "0.6184675", "0.6144916", "0.61128443", "0.6085491", "0.6061988", "0.6006739", "0.5996577", ...
0.0
-1
FUNKTION VON EILEEN Loads a data file saved by relacs. Returns a tuple of dictionaries containing the data and the header information
def load(filename): with open(filename, 'r') as fid: L = [l.lstrip().rstrip() for l in fid.readlines()] ret = [] dat = {} X = [] keyon = False currkey = None for l in L: # if empty line and we have data recorded if (not l or l.startswith('#')) and len(X) > 0: keyon = False currkey = None dat['data'] = np.array(X) ret.append(dat) X = [] dat = {} if '---' in l: continue if l.startswith('#'): if ":" in l: tmp = [e.rstrip().lstrip() for e in l[1:].split(':')] if currkey is None: dat[tmp[0]] = tmp[1] else: dat[currkey][tmp[0]] = tmp[1] elif "=" in l: tmp = [e.rstrip().lstrip() for e in l[1:].split('=')] if currkey is None: dat[tmp[0]] = tmp[1] else: dat[currkey][tmp[0]] = tmp[1] elif l[1:].lower().startswith('key'): dat['key'] = [] keyon = True elif keyon: dat['key'].append(tuple([e.lstrip().rstrip() for e in l[1:].split()])) else: currkey = l[1:].rstrip().lstrip() dat[currkey] = {} elif l: # if l != '' keyon = False currkey = None X.append( [float(e) for e in l.split()]) if len(X) > 0: dat['data'] = np.array(X) else: dat['data'] = [] ret.append(dat) return tuple(ret)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def getHeaderD...
[ "0.6701253", "0.64370406", "0.63576305", "0.6343084", "0.62742215", "0.62503767", "0.62353116", "0.62206954", "0.6175295", "0.61426353", "0.61399317", "0.61399025", "0.6133964", "0.61294085", "0.60999405", "0.60984147", "0.60699123", "0.60694903", "0.6038092", "0.60380447", "...
0.58532524
42
Factory method to create a cache object from github/spilchen/baseball_id_db This is called as part of package initialization and so can be refered to via the Lookup variable. >>> from baseball_id import Lookup >>> Lookup.from_yahoo_ids([10794, 9542, 7578])
def create(cls): ssl._create_default_https_context = ssl._create_unverified_context c = lookup.Cache('https://raw.githubusercontent.com/spilchen/baseball_id_db/main/master.csv') return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_fake(cls):\n source = pkg_resources.open_text('baseball_id', 'sample.master.csv',\n encoding='iso-8859-1')\n c = lookup.Cache(source)\n return c", "def construct(cls, obs_lists, platform_id):\n step = 0\n LookupTable = []\n ...
[ "0.6530035", "0.5592927", "0.5449668", "0.54129845", "0.5390788", "0.5236379", "0.52326137", "0.52017933", "0.5083397", "0.50474405", "0.49929607", "0.4948093", "0.49268007", "0.48906374", "0.48458242", "0.48337904", "0.4833684", "0.48185053", "0.481795", "0.4808004", "0.4792...
0.6934165
0
Factory method to create a fake data source This refers to a static data file that is in the current package. This function exists for testing purposes as it avoids network traffic to get the actual uptodate ID mapping.
def create_fake(cls): source = pkg_resources.open_text('baseball_id', 'sample.master.csv', encoding='iso-8859-1') c = lookup.Cache(source) return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data_source_soaps_id_dynamic_datas_get(self):\n pass", "def init_locally_processed_dataset(directory, source_datasets, uuid_=None):\n md = ptype.DatasetMetadata(\n id_=uuid_,\n # Default creation time is creation of an image.\n creation_dt=datetime.datetime.utcfromtimestam...
[ "0.6308188", "0.62671566", "0.6175177", "0.61343235", "0.5995582", "0.5915337", "0.59063405", "0.58865434", "0.5807432", "0.57975435", "0.5794116", "0.57484156", "0.5740665", "0.56791466", "0.56658155", "0.56552786", "0.5642972", "0.56229484", "0.5622516", "0.56089175", "0.56...
0.70590085
0
The extracter moves files. Arguments input_folder and output_folder are set through GUI. Based on the values in the column called column_name in the spreadsheet, files are copied from input_folder to output_folder. Here, these are the gilbert_numbers in the spreadsheet fed from main(). The are matched to the file names. Each gilber_number gets its own directory in the output_folder. output_folder should be empty, at least not contain the same gilbert_numbers already. Also copies all speaker files from input_folder to output_folder.
def extracter(spreadsheet, column_name): print header, "Running the extracter." root=Tkinter.Tk() root.withdraw() root.update() input_folder=tkFileDialog.askdirectory(title="Inputfolder: Please choose a directory that contains your corpus files") root=Tkinter.Tk() root.withdraw() root.update() output_folder=tkFileDialog.askdirectory(title="Outputfolder: Please choose a directory to copy files into") print header, "Copying files from '{}' to '{}'.".format(input_folder, output_folder) #collecting input files inputfiles=[] print "Locating files." for dirpath, subdirs, files in os.walk(input_folder): for f in files: inputfiles.append(os.path.join(dirpath, f)) if len(inputfiles) in [1000,2000,4000,8000,1600,24000]: print "{} files processed, still working.".format(len(inputfiles)) print "Found {} files.".format(len(inputfiles)) #read from spreadsheet # with open(spreadsheet, "r") as spreadsheet: # spreadsheet=pandas.read_csv(spreadsheet, encoding="utf-8") numbers_to_be_extracted= spreadsheet[column_name].unique() print header, "Gilbert numbers to be extracted:" print ",".join([unicode(i) for i in numbers_to_be_extracted]) #copying speaker files print header, "Copying speaker files." speakerfiles=[f for f in inputfiles if re.match(".*\.txt", os.path.split(f)[1])] os.mkdir(os.path.join(output_folder, "speakers")) for s in speakerfiles: shutil.copy2(s, os.path.join(output_folder, "speakers")) #finding relevant input files result=[] for number in numbers_to_be_extracted: print "Processing {}, creating folder '{}'.".format(number, number) os.mkdir(os.path.join(output_folder, unicode(number))) regex="(\d+)-(\d+)-(\d+)-"+number.astype('U')+"-(\D+)\.wav" findings= [f for f in inputfiles if re.match(regex, os.path.split(f)[1])] result= result+findings for find in findings: shutil.copy2(find, os.path.join(output_folder, unicode(number), os.path.split(find)[1])) print header, "{} files have been copied to {}.".format(len(result), output_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_sample_folder(self, input_folder, target_folder, sample_name):\n print(f'processing {sample_name} folder.')\n # first make a subfolder to contain the images - e.g. 'target_folder/sample_name'\n sample_dir = join(target_folder, sample_name)\n if not os.path.exists(sample_dir):\n...
[ "0.6164347", "0.5919336", "0.5901154", "0.58560616", "0.57583755", "0.5725686", "0.571907", "0.56493175", "0.55794436", "0.5573059", "0.55641127", "0.55598956", "0.5552147", "0.55351996", "0.5534053", "0.5514603", "0.55066884", "0.5490536", "0.54603547", "0.5438883", "0.54376...
0.76440537
0
Display info about pet.
def describe_pet(pet_name,animal_type = 'dog'): print("I have a " + animal_type + ".") print("My " + animal_type + "'s name is " + pet_name.title() + ".\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_pet(self):\n pet = self.pet_factory.get_pet()\n print \"We have a lovely {}\".format(pet)\n print \"It says {}\".format(pet.speak())\n print \"We also have {}\".format(self.pet_factory.get_food())", "def show_pet(self):\n pet = self.pet_factory.get_pet()\n\n pri...
[ "0.8161232", "0.81274664", "0.74135685", "0.74135685", "0.74135685", "0.74135685", "0.7373951", "0.7348695", "0.7348695", "0.7345544", "0.7345544", "0.7304899", "0.72921777", "0.72867423", "0.72316194", "0.7195379", "0.71254575", "0.7090836", "0.709054", "0.70647824", "0.6952...
0.7029722
20
Load selected iterations and classes 3D for visualization mode.
def _load(self): self.firstIter = 1 self.lastIter = self.protocol.getLastFinishedIter() if self.viewIter.get() == ITER_LAST: self._iterations = [self.lastIter] else: self._iterations = self._getListFromRangeString(self.iterSelection.get()) from matplotlib.ticker import FuncFormatter self._plotFormatter = FuncFormatter(self._formatFreq)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_i3d(eval_type, h5_dir='param/'):\n state_dict = {}\n load_unit3d(state_dict, eval_type, 'Conv3d_1a_7x7', 'conv_1a', h5_dir)\n\n load_unit3d(state_dict, eval_type, 'Conv3d_2b_1x1', 'conv_2b', h5_dir)\n load_unit3d(state_dict, eval_type, 'Conv3d_2c_3x3', 'conv_2c', h5_dir)\n\n load_block(stat...
[ "0.64956695", "0.6306034", "0.60686326", "0.59560525", "0.58952993", "0.57739794", "0.56377673", "0.5542612", "0.5534125", "0.5517615", "0.5504378", "0.5468626", "0.54292387", "0.5424756", "0.5412685", "0.5403881", "0.5403201", "0.5379721", "0.5374727", "0.5370032", "0.536210...
0.50833565
43
Format function for Matplotlib formatter.
def _formatFreq(self, value, pos): inv = 999 if value: inv = 1/value return "1/%0.2f" % inv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def float_format(self):\n ...", "def asformat(self, format):", "def format(self, *args, **kwargs) -> String:\n pass", "def add_formatter(self, fmt):\n if fmt and not isfunction(fmt):\n raise TypeError(\"custom format function must be a type of function\")\n\n if fmt and fmt...
[ "0.6947464", "0.67461205", "0.65912247", "0.64709014", "0.64154315", "0.635182", "0.63386416", "0.63360775", "0.6247381", "0.62060654", "0.62060654", "0.6194569", "0.61453986", "0.6124619", "0.60353225", "0.60240173", "0.6023951", "0.60086197", "0.5962071", "0.5960708", "0.59...
0.0
-1
Build or update a Ticker metrics using a Quotecast object. Only the metrics which can be converted to float are supported. But that should be enough to handle all the real use cases.
def build_ticker_from_quotecast( quotecast: Quotecast, references: Dict[int, List[str]] = None, ticker: Ticker = None, ) -> Ticker: if references is None: references = dict() if ticker is None: ticker = Ticker() # SETUP PRODUCTS & METRICS message_array = json.loads(quotecast.json_data) for message in message_array: if message["m"] == "un": reference = message["v"][0] value = message["v"][1] product, metric = references[reference] ticker.products[product].metrics[metric] = value elif message["m"] == "us": reference = message["v"][0] value = message["v"][1] product, metric = references[reference] if value[4] == "-": date = datetime.datetime.strptime( value, "%Y-%m-%d", ) value = datetime.datetime.timestamp(date) ticker.products[product].metrics[metric] = value elif value[2] == ":": time = datetime.time.fromisoformat(value) value = time.hour * 3600 + time.minute * 60 + time.second ticker.products[product].metrics[metric] = value else: # NOT CONVERTIBLE TO FLOAT raise RuntimeWarning( "Unsupported string metric : " f"{metric} = {message}" ) elif message["m"] == "a_req": references[message["v"][1]] = message["v"][0].rsplit( sep=".", maxsplit=1, ) elif message["m"] == "a_rel": delete_list = [] for reference in references: if ".".join(references[reference]) == message["v"][0]: delete_list.append(reference) for reference in delete_list: del references[reference] elif message["m"] == "h": pass elif message["m"] == "ue": pass elif message["m"] == "d": raise AttributeError(f"Subscription rejected : {message}") else: raise AttributeError(f"Unknown metric : {message}") # SETUP PRODUCT LIST ticker.product_list.extend(ticker.products) # SETUP METADATA ticker.metadata.MergeFrom(quotecast.metadata) return ticker
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.data.update()\n stats = self.data.stats\n ticker = self.data.ticker\n\n if self.type == \"exchangerate\":\n self._attr_state = ticker[self._currency].p15min\n self._attr_unit_of_measurement = self._currency\n elif self.type == \"trad...
[ "0.504665", "0.49457982", "0.482276", "0.47894225", "0.47741964", "0.47666577", "0.4716032", "0.46845242", "0.46796387", "0.46367455", "0.46182653", "0.4578131", "0.4567336", "0.4567215", "0.45670658", "0.4566002", "0.4552697", "0.45424986", "0.45123693", "0.44995657", "0.448...
0.6156283
0
Rebuild the request from history (self.__references).
def rebuild_request(self) -> Quotecast.Request: references = self.references request = Quotecast.Request() for vwd_id, metric in references.values(): request.subscriptions[vwd_id].append(metric) return request
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rebuild(self):\n _logger.info( \"Rebuilding the API Caches...\" )\n\n # fill out the data structures\n self._buildApiTypesList()\n #_buildMayaTypesList()\n \n self._buildMayaReservedTypes(force=True)\n\n self._buildApiRelationships()\n\n # merge in the ma...
[ "0.5566847", "0.546248", "0.54090655", "0.5392603", "0.5335858", "0.5311608", "0.52756536", "0.52722096", "0.52640605", "0.5223405", "0.5222167", "0.5147405", "0.5122049", "0.5031895", "0.50197256", "0.5009398", "0.5008371", "0.49860406", "0.49698722", "0.4964786", "0.4964781...
0.67901736
0
check to see whether an id is for a group
def is_group(id): return id.startswith('G')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def alreay_in_group(self,uid,group_id):\n uid = str(uid)\n user_group_list = self.get_group_list_via_uid(uid)\n return True if group_id in user_group_list else False", "def is_group(s...
[ "0.7496174", "0.7397895", "0.7248163", "0.72468346", "0.7207925", "0.7201284", "0.71829623", "0.715947", "0.7065384", "0.70614374", "0.6950488", "0.69323575", "0.68989813", "0.6898132", "0.686232", "0.6849973", "0.682175", "0.68139756", "0.6812948", "0.6809037", "0.6806396", ...
0.81725055
0
check to see whether an id is for a user
def is_user(id): return id.startswith('U')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()", "def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False", "def check_user(user):\n result_user = search_column_with_constraint(choose_d...
[ "0.79370236", "0.75839674", "0.7334541", "0.732782", "0.72933257", "0.7157885", "0.71560794", "0.70645714", "0.70558435", "0.7010881", "0.6988318", "0.69240403", "0.69037765", "0.6896436", "0.6886491", "0.6883643", "0.6861566", "0.6858304", "0.6853838", "0.6834101", "0.683142...
0.8175753
0
a new session has been created add user's sid to cache with their related chat id
def user_joined(cls, sid, token): session = Session.find(token=token) if not session: return False redis.hset('sid-id', sid, session.user_id) redis.hset('id-sid', session.user_id, sid) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_session(self, session_id):\n sessions = self.get_sessions()\n if session_id not in sessions:\n sessions.append(session_id)\n self.ref_cache.set(self.sid, sessions)", "def add_user_to_session(self,session_id,client_id,display_name):\n self.sessions[session_id][\"...
[ "0.6442786", "0.60261506", "0.5993311", "0.59842753", "0.59716135", "0.5773349", "0.5754616", "0.5753378", "0.5728176", "0.5712133", "0.5694755", "0.5632899", "0.56139004", "0.5612166", "0.5564319", "0.5548212", "0.55396765", "0.5534832", "0.5517975", "0.5509574", "0.548927",...
0.5635927
11
a user has been disconnected from the server. delete its sid
def user_left(cls, sid): id = redis.hget('sid-id', sid) redis.hdel('sid-id', sid) redis.hdel('id-sid', id) return id.decode("utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disconnect():\n\n\tglob.tokens.deleteToken(glob.tokens.getTokenFromUserID(999))", "def connection_lost(self, exc):\n if isinstance(self.current, Session):\n self.current.removeUser(self)\n elif self.current == self:\n del super.clients[self]\n else:\n ano...
[ "0.72285557", "0.7040492", "0.68665254", "0.6458032", "0.64499587", "0.64076656", "0.6401039", "0.6399896", "0.6398915", "0.6398157", "0.6392838", "0.63727134", "0.6348618", "0.63431454", "0.6333374", "0.6315425", "0.62992054", "0.62812126", "0.6270461", "0.6263564", "0.62526...
0.6578083
3
search for a user's socket id
def get_user_sid(cls, user_id): sid = redis.hget('id-sid', user_id) if not sid: return None return sid.decode("utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_socket(self, user):\n for client in self.clients:\n if user == client.get_name():\n return client.get_socket()", "def lookup_friend(self,username):\n if self.isBlank(username) or self.isValidLen(username):\n return False\n safe_input = (usern...
[ "0.6524464", "0.6177678", "0.6079417", "0.60470605", "0.5984913", "0.59413224", "0.5857293", "0.58146036", "0.5801265", "0.5664723", "0.5636931", "0.5636145", "0.5635148", "0.56256527", "0.56242967", "0.56242955", "0.56001824", "0.56000507", "0.55688393", "0.55479366", "0.554...
0.527406
55
get a user id using its sid user has to be joined
def get_sid_id(cls, sid): id = redis.hget('sid-id', sid) if not id: return None return id.decode("utf-8")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user(id):\n pass", "def get_user_id(self):\n return self.id_user", "def fetch_current_user_id(s):", "def get_id(self): \n\t\treturn (self.user_id)", "def get_user_id():\n user_id = session.get(\"user_id\")\n return user_id if user_id else None", "def get_one_user():", "def get_u...
[ "0.71183956", "0.67369145", "0.67288226", "0.6676", "0.66554093", "0.6615446", "0.6597501", "0.6586899", "0.6580105", "0.6580105", "0.65061647", "0.6459379", "0.6445892", "0.64429665", "0.6428113", "0.64264065", "0.6411913", "0.6411254", "0.64103955", "0.64056945", "0.6374203...
0.0
-1
when a user sends a new message to the server
def new_msg(cls, sender_id, recipient_id, text): sender = User.find(id=sender_id) sender_sid = cls.get_user_sid(sender.id) if is_group(recipient_id): recipient_group = Group.find(id=recipient_id) if not recipient_group: raise Exception('recipient was not found') if not recipient_group.has_user(sender): raise Exception('user is not a member of this group') cls._broadcast_group(sender, sender_sid, recipient_group, text) elif is_user(recipient_id): recipient = User.find(id=recipient_id) if not sender.is_friends(recipient): raise Exception('user is not friends with recipient') if recipient.blocked(sender): raise Exception('recipient has blocked you') if not recipient: raise Exception('recipient was not found') cls._broadcast_user(sender, sender_sid, recipient, text) else: raise Exception('bad recipient id')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_message(data):\n pass", "def message_handler(msg):\n logging.info(\"Message Text: %s\" % msg['msg'])\n\n message_entry = Message(request.sid, msg['room'], msg['msg'], msg['time'])\n if msg['msg'] != \"User has connected!\":\n logging.info(\"About to add to DB\")\n db.session....
[ "0.752636", "0.74791414", "0.7469889", "0.73891926", "0.7261544", "0.71904874", "0.7182423", "0.7182423", "0.7182423", "0.7179466", "0.71528774", "0.714717", "0.7144339", "0.71373135", "0.7136156", "0.71050507", "0.71050507", "0.70408386", "0.7027871", "0.7018298", "0.6988954...
0.0
-1
broadcast a new user joining the group
def user_joined_group(cls, group, user): text = "{} joined the group chat".format(user.username) cls._broadcast_group(group, None, group, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify_new_user(self, user):\n # join to default group\n g = self.root.get('community-general')\n if g:\n self.join_group(user, g)", "def join_server(self, data, user):\n # User will spawn in one of following rooms\n user.room = choice((\"100\", \"300\", \"800\",...
[ "0.66464144", "0.6612628", "0.65133005", "0.65117073", "0.64265233", "0.6303679", "0.6245498", "0.6235882", "0.6231058", "0.6172051", "0.61579597", "0.61176723", "0.60869974", "0.6015817", "0.6015379", "0.601436", "0.5992443", "0.59782267", "0.59572095", "0.59416044", "0.5917...
0.71382284
0
broadcast a user leaving the group
def user_left_group(cls, group, user): text = "{} left the group chat".format(user.username) cls._broadcast_group(group, None, group, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def leave_room(self, label):\n user = self.user\n room = await self.get_room(label)\n\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.leave',\n 'label': label,\n 'username': user.username,\n ...
[ "0.68877256", "0.6836113", "0.6618386", "0.6293848", "0.62480164", "0.6209803", "0.61928344", "0.61679953", "0.61453825", "0.6137914", "0.61103255", "0.6071488", "0.59881574", "0.59806395", "0.5946936", "0.592674", "0.5914093", "0.59117216", "0.59084827", "0.5901252", "0.5890...
0.7276704
0
broadcast a new message to a group chat
def _broadcast_group(cls, sender, sender_sid, group, text): # todo make this method async for recipient in group.get_users(): if recipient == sender: continue cls._broadcast_user(sender, sender_sid, recipient, text, group.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_groupchat_message(self, msg):\n self.xmpp.event('groupchat_message', msg)\n self.xmpp.event(\"muc::%s::message\" % msg['from'].bare, msg)", "def sendMessage(self, message):\n\t\tm = domish.Element((None, 'message'))\n\t\tm['from'] = self.jid\n\t\tm['to'] = self.room\n\t\tm['type'] = 'gro...
[ "0.74668014", "0.73062366", "0.70124036", "0.6886197", "0.67108417", "0.6665333", "0.66605836", "0.6642074", "0.66353", "0.66307837", "0.66065943", "0.6557014", "0.65497166", "0.64740735", "0.6465451", "0.6459765", "0.64553064", "0.64509314", "0.6406889", "0.63932914", "0.639...
0.67548275
4
broadcast a new message to a user
def _broadcast_user(cls, sender, sender_sid, recipient, text, chat_id=None): # todo make this method async recipient_sid = cls.get_user_sid(recipient.id) if not recipient_sid: cls._cache_msg(sender.id, recipient.id, text, chat_id) return data = {'sender_id': sender.id, 'recipient_id': recipient.id, 'text': text, 'chat_id': chat_id or 'private', 'time': time()} app.socketio.emit('message', data, room=recipient_sid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def notify(cls, user_id, message):\n # Find the subscription group for user.\n group = None if user_id is None else f\"user_{user_id}\"\n cls.broadcast(group=group, payload=message)", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)", "def broadcast(self, m...
[ "0.7328094", "0.72830236", "0.71688133", "0.7001179", "0.6999174", "0.69816566", "0.6812357", "0.6784205", "0.6777518", "0.6767178", "0.6728205", "0.6724078", "0.6669847", "0.6652716", "0.6629219", "0.6618152", "0.6594717", "0.6588109", "0.6582829", "0.65520984", "0.6547883",...
0.71615946
3
cache a message that failed to be delivered
def _cache_msg(cls, sender_id, recipient_id, text, chat_id=None): # todo make this method async message = Message.new(sender_id, recipient_id, text, chat_id) return message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache_message(self, comm_id, msg):\n if comm_id not in self._cached_messages:\n self._cached_messages[comm_id] = []\n self._cached_messages[comm_id].append(msg)", "def _mark_discarted_messages():\n\n max_retry_value = getattr(settings, \"DJMAIL_MAX_RETRY_NUMBER\", 3)\n queryset...
[ "0.64296436", "0.6014829", "0.5976981", "0.5920454", "0.57477766", "0.56894994", "0.56732804", "0.558145", "0.5558702", "0.5543183", "0.5528251", "0.5498727", "0.54969853", "0.54897916", "0.54788435", "0.5449968", "0.54112744", "0.5403003", "0.5399943", "0.5390648", "0.538709...
0.5689635
5
Start an oef node.
def _start_oef_node(self, network_node):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch_oef():\n script_path = os.path.join(\"scripts\", \"oef\", \"launch.py\")\n configuration_file_path = os.path.join(\"scripts\", \"oef\", \"launch_config.json\")\n print(\"Launching new OEF Node...\")\n subprocess.Popen(\n [\"python3\", script_path, \"-c\", configuration_file_path, \"--...
[ "0.73455715", "0.67564565", "0.6507637", "0.5896183", "0.5798123", "0.56471074", "0.5645573", "0.5598258", "0.5573087", "0.556594", "0.55550206", "0.5545505", "0.5535845", "0.5517319", "0.5511", "0.55073994", "0.5473905", "0.5468678", "0.54667735", "0.54622465", "0.54570323",...
0.82847816
0
Set the test up.
def setup_class(cls): cls.runner = CliRunner() cls.agent_name = "agent_1" cls.cwd = os.getcwd() cls.t = tempfile.mkdtemp() os.chdir(cls.t)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n \n pass", "def setUp(self):\n\n # setup init variables...
[ "0.82482773", "0.82482773", "0.81176686", "0.800283", "0.7907327", "0.78918254", "0.7887326", "0.7848355", "0.7842833", "0.7832785", "0.7832785", "0.781454", "0.78136706", "0.7806924", "0.78026885", "0.78026885", "0.77940094", "0.7776961", "0.7776961", "0.7776961", "0.7776961...
0.0
-1
Test that a generated protocol's serialisation + deserialisation work correctly.
def test_generated_protocol_serialisation(self): # create a message reply_message = {1: "number one", 2: "number two", 7: "number seven"} # message 1 message = TwoPartyNegotiationMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY, reply_message=reply_message, ) # serialise the message encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message) # deserialise the message decoded_message = TwoPartyNegotiationSerializer().decode( encoded_message_in_bytes ) # Compare the original message with the serialised+deserialised message assert decoded_message.message_id == message.message_id assert decoded_message.dialogue_reference == message.dialogue_reference assert decoded_message.dialogue_reference[0] == message.dialogue_reference[0] assert decoded_message.dialogue_reference[1] == message.dialogue_reference[1] assert decoded_message.target == message.target assert decoded_message.performative == message.performative assert decoded_message.reply_message == message.reply_message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\...
[ "0.6951617", "0.68632156", "0.67302066", "0.661847", "0.6527949", "0.6503405", "0.6458234", "0.63932693", "0.63932693", "0.6388795", "0.63429564", "0.63004637", "0.6290241", "0.62630475", "0.6233855", "0.618288", "0.61746454", "0.6163066", "0.6099586", "0.6083544", "0.6072765...
0.7254605
0
Test that a generated protocol could be used in exchanging messages between two agents.
def test_generated_protocol_end_to_end(self): # AEA components ledger_apis = LedgerApis({}, FETCHAI) wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE}) wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE}) identity_1 = Identity( name="my_aea_1", address=wallet_1.addresses.get(FETCHAI), default_address_key=FETCHAI, ) identity_2 = Identity( name="my_aea_2", address=wallet_2.addresses.get(FETCHAI), default_address_key=FETCHAI, ) oef_connection_1 = OEFConnection( address=identity_1.address, oef_addr=HOST, oef_port=PORT ) oef_connection_2 = OEFConnection( address=identity_2.address, oef_addr=HOST, oef_port=PORT ) resources_1 = Resources() resources_2 = Resources() # add generated protocols to resources generated_protocol_configuration = ProtocolConfig.from_json( yaml.safe_load( open( os.path.join( self.cwd, "tests", "data", "generator", "two_party_negotiation", "protocol.yaml", ) ) ) ) generated_protocol = Protocol( TwoPartyNegotiationMessage.protocol_id, TwoPartyNegotiationSerializer(), generated_protocol_configuration, ) resources_1.protocol_registry.register( TwoPartyNegotiationMessage.protocol_id, generated_protocol ) resources_2.protocol_registry.register( TwoPartyNegotiationMessage.protocol_id, generated_protocol ) # create AEAs aea_1 = AEA(identity_1, [oef_connection_1], wallet_1, ledger_apis, resources_1) aea_2 = AEA(identity_2, [oef_connection_2], wallet_2, ledger_apis, resources_2) inform_number = tuple((1370, 1991, 1, 4, 17, 6)) # message 1 message = TwoPartyNegotiationMessage( message_id=1, dialogue_reference=(str(0), ""), target=0, performative=TwoPartyNegotiationMessage.Performative.INFORM, inform_number=inform_number, ) encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message) envelope = Envelope( to=identity_2.address, sender=identity_1.address, protocol_id=TwoPartyNegotiationMessage.protocol_id, message=encoded_message_in_bytes, ) # message 2 reply_message = {1: "number one", 2: "number two", 7: "number seven"} message_2 = TwoPartyNegotiationMessage( message_id=2, dialogue_reference=(str(0), ""), target=1, performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY, reply_message=reply_message, ) encoded_message_2_in_bytes = TwoPartyNegotiationSerializer().encode(message_2) # add handlers to AEA resources agent_1_handler = Agent1Handler( skill_context=SkillContext(aea_1.context), name="fake_skill" ) resources_1.handler_registry.register( ( PublicId.from_str("fetchai/fake_skill:0.1.0"), TwoPartyNegotiationMessage.protocol_id, ), agent_1_handler, ) agent_2_handler = Agent2Handler( encoded_messsage=encoded_message_2_in_bytes, skill_context=SkillContext(aea_2.context), name="fake_skill", ) resources_2.handler_registry.register( ( PublicId.from_str("fetchai/fake_skill:0.1.0"), TwoPartyNegotiationMessage.protocol_id, ), agent_2_handler, ) # add error skill to AEAs error_skill_1 = Skill.from_dir( os.path.join(AEA_DIR, "skills", "error"), aea_1.context ) resources_1.add_skill(error_skill_1) error_skill_2 = Skill.from_dir( os.path.join(AEA_DIR, "skills", "error"), aea_2.context ) resources_2.add_skill(error_skill_2) # Start threads t_1 = Thread(target=aea_1.start) t_2 = Thread(target=aea_2.start) try: t_1.start() t_2.start() time.sleep(1.0) aea_1.outbox.put(envelope) time.sleep(5.0) assert ( agent_2_handler.handled_message.message_id == message.message_id ), "Message from Agent 1 to 2: message ids do not match" assert ( agent_2_handler.handled_message.dialogue_reference == message.dialogue_reference ), "Message from Agent 1 to 2: dialogue references do not match" assert ( agent_2_handler.handled_message.dialogue_reference[0] == message.dialogue_reference[0] ), "Message from Agent 1 to 2: dialogue reference[0]s do not match" assert ( agent_2_handler.handled_message.dialogue_reference[1] == message.dialogue_reference[1] ), "Message from Agent 1 to 2: dialogue reference[1]s do not match" assert ( agent_2_handler.handled_message.target == message.target ), "Message from Agent 1 to 2: targets do not match" assert ( agent_2_handler.handled_message.performative == message.performative ), "Message from Agent 1 to 2: performatives do not match" assert ( agent_2_handler.handled_message.inform_number == message.inform_number ), "Message from Agent 1 to 2: inform_numbers do not match" assert ( agent_1_handler.handled_message.message_id == message_2.message_id ), "Message from Agent 1 to 2: dialogue references do not match" assert ( agent_1_handler.handled_message.dialogue_reference == message_2.dialogue_reference ), "Message from Agent 2 to 1: dialogue references do not match" assert ( agent_1_handler.handled_message.dialogue_reference[0] == message_2.dialogue_reference[0] ), "Message from Agent 2 to 1: dialogue reference[0]s do not match" assert ( agent_1_handler.handled_message.dialogue_reference[1] == message_2.dialogue_reference[1] ), "Message from Agent 2 to 1: dialogue reference[1]s do not match" assert ( agent_1_handler.handled_message.target == message_2.target ), "Message from Agent 2 to 1: targets do not match" assert ( agent_1_handler.handled_message.performative == message_2.performative ), "Message from Agent 2 to 1: performatives do not match" assert ( agent_1_handler.handled_message.reply_message == message_2.reply_message ), "Message from Agent 1 to 2: reply_messages do not match" time.sleep(2.0) finally: aea_1.stop() aea_2.stop() t_1.join() t_2.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_identify(self):\n\n protocol_a, transport_a, tree_a, _ = self.create_protocol('protocol_a')\n protocol_b, transport_b, tree_b, _ = self.create_protocol('protocol_b')\n\n transport_a.get_extra_info.return_value = ('127.0.0.1', 1000)\n transport_b.get_extra_info.return_value = ('...
[ "0.6693443", "0.65527207", "0.64990115", "0.64490074", "0.64464223", "0.6435972", "0.641256", "0.63902915", "0.6338546", "0.6269686", "0.6266628", "0.6260895", "0.6249174", "0.61818177", "0.61632335", "0.61631536", "0.61530423", "0.61333424", "0.6118203", "0.60873365", "0.605...
0.6860271
0
Tear the test down.
def teardown_class(cls): os.chdir(cls.cwd) try: shutil.rmtree(cls.t) except (OSError, IOError): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')", "def tearDow...
[ "0.81449527", "0.77001065", "0.77001065", "0.7650473", "0.7650473", "0.76173466", "0.7492439", "0.74516493", "0.74354964", "0.7418989", "0.73991543", "0.73991543", "0.73991543", "0.73991543", "0.7376718", "0.7350522", "0.73498565", "0.7335127", "0.73328424", "0.73328424", "0....
0.0
-1
Test _specification_type_to_python_type method unsupported type.
def test__specification_type_to_python_type_unsupported_type(self): with self.assertRaises(TypeError): _specification_type_to_python_type("unsupported_type")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_type_error(self):\n self._error_test(TypeError)", "def test_raises_type_error(self):\n wrong_type = dict()\n self.assertRaises(\n TypeError, util.convert_protobuf_to_proto_plus, wrong_type\n )", "def test_value_error_for_computing_missing_type():\n with pytest...
[ "0.6947089", "0.6929546", "0.6844311", "0.6795259", "0.66925305", "0.6652292", "0.6652253", "0.6572005", "0.65506715", "0.6511283", "0.64829165", "0.6474207", "0.64679945", "0.64503706", "0.64311326", "0.6370294", "0.6370191", "0.6354397", "0.6286515", "0.6261668", "0.6253150...
0.91027087
0
Test _union_sub_type_to_protobuf_variable_name method tuple.
def test__union_sub_type_to_protobuf_variable_name_tuple(self, mock): _union_sub_type_to_protobuf_variable_name("content_name", "Tuple") mock.assert_called_once()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _union_sub_type_to_protobuf_variable_name(\n content_name: str, content_type: str\n) -> str:\n if content_type.startswith(\"FrozenSet\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n expanded_type_str = \"set_of_{}\".format(sub_type)\n elif content_type.startswit...
[ "0.7497985", "0.54891527", "0.5454183", "0.5442474", "0.5129124", "0.5115415", "0.51112336", "0.5045163", "0.5033024", "0.5018397", "0.49975044", "0.49971396", "0.49895564", "0.4978763", "0.4951379", "0.49307013", "0.49243486", "0.48797363", "0.48714188", "0.485954", "0.48464...
0.8391995
0
Test _includes_custom_type method positive result.
def test__includes_custom_type_positive(self, *mocks): content_type = "Union[str]" result = self.protocol_generator._includes_custom_type(content_type) self.assertTrue(result) content_type = "Optional[str]" result = self.protocol_generator._includes_custom_type(content_type) self.assertTrue(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _includes_custom_type(content_type: str) -> bool:\n\n if content_type.startswith(\"Optional\"):\n sub_type = _get_sub_types_of_compositional_types(content_type)[0]\n result = _includes_custom_type(sub_type)\n elif content_type.startswith(\"Union\"):\n sub_types = _get_sub_types_of_co...
[ "0.7456461", "0.6151709", "0.57726073", "0.5564573", "0.5552539", "0.5481914", "0.5474497", "0.54267174", "0.5376972", "0.534768", "0.5330371", "0.5328861", "0.5311295", "0.5270746", "0.52539307", "0.5240947", "0.5239048", "0.5234515", "0.5231774", "0.5193669", "0.5185735", ...
0.7806248
0
Implement the setup for the handler.
def setup(self) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, *args, **kwargs):\n pass", "def _setup(self) -> None:\n\t\treturn", "def setUp(self):\n\n installHandler()", "def setUp(self):\n\n installHandler()", "def setup(self,**kwargs):\n pass", "def setup(self):\n raise NotImplementedError(\"Need to be imple...
[ "0.74647546", "0.7427192", "0.74090225", "0.74090225", "0.73212504", "0.7227166", "0.7225722", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7210512", "0.7194558", "0.7194558", "0.7176307", "0.7176307", "0.7176307", "0.7176307", "0.7132219", "0.7116914",...
0.7172614
20
Implement the reaction to a message.
def handle(self, message: Message) -> None: self.handled_message = message
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def custom_mqtt_reaction(self, topic, message):\n raise NotImplementedError(\"Must override method custom_mqtt_reaction\")", "async def process(self, chan_id: str, msg_id: str, emoji: str, member: discord.Member, add: bool):\n logger.debug(f\"Processing reaction: [ add: {add}, msg_id: {msg_id}, emo...
[ "0.6869177", "0.6487918", "0.63162875", "0.6300903", "0.6280189", "0.6270176", "0.6200644", "0.61323017", "0.613172", "0.6130621", "0.6107559", "0.6087531", "0.6074806", "0.60717124", "0.6063858", "0.6055714", "0.60538733", "0.6034891", "0.6023585", "0.60027444", "0.5996093",...
0.55897665
76
Implement the handler teardown.
def teardown(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()", "def _teardown(self):\n # No-op base implementation", "def teardown(self, event):\n pass", "def cleanup(self) -> None:\n self.handler.cleanup()\n super().cleanup()", ...
[ "0.8065864", "0.7863261", "0.78516686", "0.7751276", "0.7695875", "0.76800025", "0.76800025", "0.76762605", "0.76762605", "0.76578486", "0.76578486", "0.76578486", "0.7586377", "0.75539047", "0.7453597", "0.7453597", "0.7453597", "0.7220783", "0.7112508", "0.70983917", "0.708...
0.7647432
13
Implement the setup for the handler.
def setup(self) -> None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, *args, **kwargs):\n pass", "def _setup(self) -> None:\n\t\treturn", "def setUp(self):\n\n installHandler()", "def setUp(self):\n\n installHandler()", "def setup(self,**kwargs):\n pass", "def setup(self):\n raise NotImplementedError(\"Need to be imple...
[ "0.74647546", "0.7427192", "0.74090225", "0.74090225", "0.73212504", "0.7227166", "0.7225722", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7213479", "0.7210512", "0.7194558", "0.7194558", "0.7176307", "0.7176307", "0.7176307", "0.7176307", "0.7132219", "0.7116914",...
0.7172614
21
Implement the reaction to a message.
def handle(self, message: Message) -> None: self.handled_message = message envelope = Envelope( to=message.counterparty, sender=self.context.agent_address, protocol_id=TwoPartyNegotiationMessage.protocol_id, message=self.encoded_message_2_in_bytes, ) self.context.outbox.put(envelope)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def custom_mqtt_reaction(self, topic, message):\n raise NotImplementedError(\"Must override method custom_mqtt_reaction\")", "async def process(self, chan_id: str, msg_id: str, emoji: str, member: discord.Member, add: bool):\n logger.debug(f\"Processing reaction: [ add: {add}, msg_id: {msg_id}, emo...
[ "0.6869177", "0.6487918", "0.63162875", "0.6300903", "0.6280189", "0.6270176", "0.6200644", "0.61323017", "0.613172", "0.6130621", "0.6107559", "0.6087531", "0.6074806", "0.60717124", "0.6063858", "0.6055714", "0.60538733", "0.6034891", "0.6023585", "0.60027444", "0.5996093",...
0.0
-1
Implement the handler teardown.
def teardown(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()", "def _teardown(self):\n # No-op base implementation", "def teardown(self, event):\n pass", "def cleanup(self) -> None:\n self.handler.cleanup()\n super().cleanup()", ...
[ "0.8065864", "0.7863261", "0.78516686", "0.7751276", "0.7695875", "0.76800025", "0.76800025", "0.76762605", "0.76762605", "0.76578486", "0.76578486", "0.76578486", "0.7586377", "0.75539047", "0.7453597", "0.7453597", "0.7453597", "0.7220783", "0.7112508", "0.70983917", "0.708...
0.7647432
14
Does the program read in records, placing data into correct fields of record objects?
def test_CovidCase_creation(self): new_Covid = self.create_CovidCase() self.assertTrue(isinstance(new_Covid, CovidCase)) self.assertEqual(new_Covid.country_id, "TE")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parseRecords(self):\n # dict of parse methods for most common records that will be stored in structured arrays\n FLAG2METHOD = {'PS' : self.parseHighPassRecord,\n 'PC' : self.parseLowPassRecord,\n 'VD' : self.parseDigitalSValRecord}\n # dict of ...
[ "0.62811124", "0.62365377", "0.61066103", "0.60930586", "0.6048932", "0.60215473", "0.6007136", "0.5951948", "0.5928344", "0.59142256", "0.5856558", "0.5852108", "0.5832339", "0.5824252", "0.5793622", "0.579146", "0.5781421", "0.57737976", "0.5772325", "0.571253", "0.5677232"...
0.0
-1
Does the program add a new record into the sequential data structure?
def test_CovidCase_add(self): add_covid = self.create_CovidCase() add_covid.save() self.assertIn(add_covid, CovidCase.objects.all())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_record(self):\n if not self.record_exists(self.args.date):\n record = self.create_record()\n self.records.append(record)\n self.write_json_file(self.records_file, self.records)\n return True\n return False", "def test_append_updated_record_to_queu...
[ "0.6565922", "0.64655966", "0.64488524", "0.635182", "0.6166402", "0.6109293", "0.60871565", "0.60605955", "0.60419774", "0.6038555", "0.6027791", "0.60155684", "0.6006945", "0.5984549", "0.5980958", "0.5957817", "0.5919547", "0.58998924", "0.58906573", "0.5881246", "0.587191...
0.0
-1
Does the program update a record in the sequential data structure as expected?
def test_CovidCase_update(self): u_Covid = self.update_CovidCase() c = CovidCase.objects.get(country_id="UP") c.name_en = "New name" c.save() self.assertEqual(c.name_en, "New name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_record(self):\n pass", "def test_append_updated_record_to_queue_same_data(small_app):\n pid = PersistentIdentifier.get(\"literature\", 11883)\n publication_id = str(pid.object_uuid)\n record = Record.get_record(publication_id)\n\n append_updated_record_to_queue(None, record, re...
[ "0.6519844", "0.6387769", "0.6352885", "0.6277602", "0.6213696", "0.5965861", "0.5937817", "0.592395", "0.58763266", "0.5874582", "0.58480644", "0.58157754", "0.57564855", "0.57024807", "0.5700393", "0.5700325", "0.56963235", "0.56787336", "0.5669062", "0.5668286", "0.5662235...
0.0
-1
Does the program remove a record from the sequential data structure as expected?
def test_CovidCase_delete(self): # setting up by creating and saving the the database del_Covid = self.create_CovidCase() del_Covid.save() del_id = del_Covid.id # we are going to delete by calling the delete function del_deleted = CovidCase.objects.get(id=del_id) del_deleted.delete() self.assertNotIn(del_Covid, CovidCase.objects.all())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self, val):\n if not val in self.record:\n return False\n index = self.record[val]\n self.data[index], self.data[-1] = self.data[-1], self.data[index]\n self.record[self.data[index]] = index\n self.data.pop()\n self.record.pop(val)\n return Tru...
[ "0.68329746", "0.67125225", "0.6622166", "0.66102016", "0.65779585", "0.65033436", "0.6474776", "0.63278514", "0.6315034", "0.629419", "0.6229488", "0.61964864", "0.6193291", "0.6143841", "0.6140655", "0.61280227", "0.6104588", "0.6092655", "0.60867935", "0.6085228", "0.60814...
0.0
-1
Does the program catch any exceptions or errors if the file is missing?
def test_file_error(self): my_reader = DataSetReader() covid_list = CovidCase.objects.all() with self.assertRaises(IOError): my_reader.writeFile(covid_list, "Not_A_File.csv")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def test_no_such_fi...
[ "0.7671936", "0.7364344", "0.7215916", "0.7126976", "0.7051832", "0.7051832", "0.7029998", "0.7013632", "0.69907284", "0.6982232", "0.69754636", "0.6973892", "0.6960266", "0.68636847", "0.68334687", "0.6810809", "0.67925715", "0.67872965", "0.67819893", "0.6771102", "0.672490...
0.0
-1
Run the Viterbi algorithm. N number of tokens (length of sentence) L number of labels
def run_viterbi(emission_scores, trans_scores, start_scores, end_scores): L = start_scores.shape[0] assert end_scores.shape[0] == L assert trans_scores.shape[0] == L assert trans_scores.shape[1] == L assert emission_scores.shape[1] == L N = emission_scores.shape[0] trans_scores += start_scores back_ptrs = np.zeros_like(emission_scores,dtype=np.int32) emission_scores += start_scores em_scores = np.zeros_like(emission_scores) em_scores[0] = start_scores+emission_scores[0] for k in range(1,N): transition_plus_score =trans_scores+np.expand_dims(em_scores[k-1],1) back_ptrs[k] =np.argmax(transition_plus_score,0) em_scores[k] =np.max(transition_plus_score,0)+emission_scores[k] v = [np.argmax(end_scores+em_scores[-1])] v_score = np.max(end_scores+em_scores[-1]) for back_ptr in reversed(back_ptrs[1:]): v.append(back_ptr[v[-1]]) v.reverse() return v_score,v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_viterbi(self, tokens: TokenSeq) -> Tuple[NDArray, NDArray, PosSeq]:", "def label_n_elements(\n self,\n n_elements: int,\n model,\n data_process_fn,\n ) -> int:\n n_to_sample = min(len(self.unlabelled_idx_set), n_elements)\n model.ev...
[ "0.6409644", "0.6073446", "0.6022884", "0.5973923", "0.5945685", "0.58445257", "0.57420796", "0.5737962", "0.56900644", "0.56370634", "0.56175566", "0.5607194", "0.5601182", "0.5584242", "0.55672693", "0.5564353", "0.55216706", "0.5517806", "0.551739", "0.5480801", "0.5472328...
0.53388405
30
Run a single epoch
def eval_model(device, model, sampler, loss_compute, logit_modifier_fxn, token_sampler, print_every, max_len, user_items_df, max_name_len=15, ingr_map=None, base_save_dir='', pad_ingr=None, ppx_only=False, **tensor_kwargs): start = datetime.now() results_dicts = [] # Extract into tuples and list tensor_names, base_tensors = zip(*tensor_kwargs.items()) # Iterate through batches in the epoch model.eval() with torch.no_grad(): total_tokens = 0 total_name_tokens = 0 total_loss = 0.0 total_name_loss = 0.0 print_tokens = 0 for i, batch in enumerate(tqdm(sampler.epoch_batches(), total=sampler.n_batches), 1): batch_users, items = [t.to(device) for t in batch] # Fill out batch information batch_map = dict(zip( tensor_names, get_batch_information_general(items, *base_tensors) )) use_ingr_embedding = batch_map['ingr_tensor'].size(-1) != MAX_INGR * MAX_INGR_TOK user_prior_technique_masks = torch.stack([get_user_prior_techniques_mask( user_ix=uix.item(), item_ix=iix.item(), user_items_df=user_items_df, tech_mask_tensor=tensor_kwargs['tech_mask_tensor'], device=device, normalize=True ) for uix, iix in zip(batch_users, items)], dim=0) # Logistics this_batch_size = batch_map['steps_tensor'].size(0) this_batch_num_tokens = (batch_map['steps_tensor'] != PAD_INDEX).data.sum().item() this_batch_num_name_tokens = (batch_map['name_tensor'] != PAD_INDEX).data.sum().item() name_targets = batch_map['name_tensor'][:, :-1] ''' Teacher forcing - evaluate ''' # Comparing out(token[t-1]) to token[t] (log_probs, _), (name_log_probs, _) = model.forward( device=device, inputs=( batch_map['calorie_level_tensor'], batch_map['name_tensor'], batch_map['ingr_tensor'] ), ingr_masks=batch_map['ingr_mask_tensor'], user_prior_technique_masks=user_prior_technique_masks, targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1, start_token=START_INDEX, teacher_forcing=True, name_targets=name_targets, max_name_len=max_name_len-1, visualize=False ) loss, name_loss = loss_compute( log_probs, batch_map['steps_tensor'][:, 1:], name_outputs=name_log_probs, name_targets=name_targets, norm=this_batch_size, model=model, clip=None ) total_loss += loss total_name_loss += name_loss # Logging total_tokens += this_batch_num_tokens total_name_tokens += this_batch_num_name_tokens print_tokens += this_batch_num_tokens del log_probs, name_log_probs # Short-circuit if we only want to calculate test perplexity if ppx_only: if i % print_every == 0: elapsed = datetime.now() - start print("Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}".format( i, loss / this_batch_size, name_loss / this_batch_size, print_tokens / elapsed.seconds )) start = datetime.now() print_tokens = 0 continue ''' Non-teacher-forcing - Generate! ''' # Generates probabilities (log_probs, output_tokens, ingr_attns, prior_tech_attns), \ (name_log_probs, name_output_tokens) = model.forward( device=device, inputs=( batch_map['calorie_level_tensor'], batch_map['name_tensor'], batch_map['ingr_tensor'] ), ingr_masks=batch_map['ingr_mask_tensor'], user_prior_technique_masks=user_prior_technique_masks, targets=batch_map['steps_tensor'][:, :-1], max_len=max_len-1, start_token=START_INDEX, teacher_forcing=False, logit_modifier_fxn=logit_modifier_fxn, token_sampler=token_sampler, visualize=True, max_name_len=max_name_len-1, name_targets=name_targets, ) del log_probs, name_log_probs # Generated recipe calorie_levels, technique_strs, ingredient_strs, gold_strs, generated_strs, \ prior_items, recipe_reprs = get_batch_generated_recipes( batch_users=batch_users, batch_generated=output_tokens, max_ingr=MAX_INGR, max_ingr_tok=MAX_INGR_TOK, names_generated=name_output_tokens, ingr_map=ingr_map, user_items_df=user_items_df, **batch_map ) for ix in range(len(generated_strs)): # Create save location: test_i<item>_u<user> ii = items[ix].data.item() uu = batch_users[ix].data.item() sample_id = 'test_i{}_u{}'.format(ii, uu) trial_save_dir = os.path.join(base_save_dir, sample_id) if not os.path.exists(trial_save_dir): os.mkdir(trial_save_dir) # Output tokens for heatmap axes out_indices = output_tokens[ix].detach().cpu().numpy().tolist() out_tokens = decode_ids(out_indices) trunc_indices = out_indices[:out_indices.index(END_INDEX)] \ if END_INDEX in out_indices else out_indices output_len = len(trunc_indices) output_techniques = [t for t in TECHNIQUES_LIST if t in generated_strs[ix]] results_dicts.append({ 'u': uu, 'i': ii, 'generated': generated_strs[ix], 'n_tokens': output_len, 'generated_techniques': output_techniques, 'n_techniques': len(output_techniques) }) # Save output with open(os.path.join(trial_save_dir, 'output.txt'), 'w+', encoding='utf-8') as wf: wf.write(recipe_reprs[ix]) # Ingredient Attention ingr_attentions = np.matrix([ a.squeeze().detach().cpu().numpy().tolist() for a in ingr_attns[ix] ]).T ingr_attn_df = pd.DataFrame( ingr_attentions[:len(ingredient_strs[ix])], index=ingredient_strs[ix], columns=out_tokens ) ingr_attn_df = ingr_attn_df[ingr_attn_df.index != ''] ingr_attn_df.to_pickle( os.path.join(trial_save_dir, 'ingredient_attention.pkl') ) # Prior Technique Attention prior_tech_attention = np.matrix([ a.squeeze().detach().cpu().numpy().tolist() for a in prior_tech_attns[ix] ]).T prior_tech_attn_df = pd.DataFrame( prior_tech_attention, index=TECHNIQUES_LIST + ['PAD'], columns=out_tokens ) prior_tech_attn_df = prior_tech_attn_df[(prior_tech_attn_df.T != 0.0).any()] prior_tech_attn_df.to_pickle( os.path.join(trial_save_dir, 'prior_tech_attention.pkl') ) if i % print_every == 0: elapsed = datetime.now() - start print("Epoch Step: {} LM Loss: {:.5f}; Name Loss: {:.5f}; Tok/s: {:.3f}".format( i, loss / this_batch_size, name_loss / this_batch_size, print_tokens / elapsed.seconds )) print('SAMPLE DECODED RECIPE:\n\n{}\n\n'.format(recipe_reprs[0])) start = datetime.now() print_tokens = 0 # Reshuffle the sampler sampler.renew_indices() if total_name_tokens > 0: print('\nName Perplexity: {}'.format( np.exp(total_name_loss / float(total_name_tokens)) )) # Store perplexity ppx = np.exp(total_loss / float(total_tokens)) with open(os.path.join(base_save_dir, 'ppx.pkl'), 'wb') as wf: pickle.dump(ppx, wf) print('PERPLEXITY: {:.5f}'.format( ppx )) if not ppx_only: # Store recipe information -- generated string, # tokens (length), tech, # tech gen_df = pd.DataFrame(results_dicts)[[ 'u', 'i', 'generated', 'n_tokens', 'generated_techniques', 'n_techniques' ]] df_loc = os.path.join(base_save_dir, 'generated_df.pkl') gen_df.to_pickle(df_loc) print('Saved generation DF to {}'.format( df_loc )) print(gen_df.head(3))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def run_epoch( self ):\n # --- Init Epoch ----\n total_epoch_loss = 0.0\n epoch_batches = self.dataset.dataloader( self.config.neuron.epoch_length )\n progress_bar = qqdm(enumerate(epoch_batches), total=len(epoch_batches...
[ "0.7596425", "0.7297351", "0.71258926", "0.71189123", "0.7038193", "0.70254976", "0.69980335", "0.69980335", "0.6921019", "0.69073707", "0.69073707", "0.69073707", "0.69073707", "0.6906141", "0.6859562", "0.6859317", "0.6823435", "0.6796138", "0.67864937", "0.67839265", "0.67...
0.0
-1
Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line.
def convert_text_to_rouge_format(text, title="dummy title"): sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1) if sent != ''] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert(text):\n return NewDocument.from_rst(text).format()", "def preprocess(self, text):\r\n return text", "def preprocess(self, text):\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=...
[ "0.66363716", "0.61474895", "0.6118832", "0.6097702", "0.609217", "0.60894656", "0.6057062", "0.60051125", "0.6003964", "0.59820795", "0.59751576", "0.5925597", "0.5902075", "0.5848546", "0.5844513", "0.5837306", "0.58300614", "0.58283144", "0.5815858", "0.5792594", "0.577824...
0.6489249
1
Bin calculation for x and y Calculates the bin edges for the given data arrays x and y.
def get_2D_bins(x, y, bins, same_bins=False): # precalculated bins [np.ndarray, np.ndarray]: do nothing and return the same bins if isinstance(bins, list): if isinstance(bins[0], np.ndarray) and isinstance(bins[1], np.ndarray): pass elif 'uniform_counts' in bins: try: n = int(bins[1]) bins_x = np.fromiter( (np.nanpercentile(x, (i / n) * 100) for i in range(1, n + 1)), dtype=float) bins_y = np.fromiter( (np.nanpercentile(y, (i / n) * 100) for i in range(1, n + 1)), dtype=float) bins = [bins_x, bins_y] except: raise ValueError(f"Please define number of bins for binning method uniform_counts: bins = ['uniform_bins', n_bins]") else: # calculate bins with np.histogram_bin_edges(), even_width option == int if bins in ['fd', 'doane', 'scott', 'stone', 'rice', 'sturges', 'sqrt'] or isinstance(bins, int): if same_bins: bins_xy = np.histogram_bin_edges([x, y], bins) bins = [bins_xy, bins_xy] else: bins_x = np.histogram_bin_edges(x, bins) bins_y = np.histogram_bin_edges(y, bins) bins = [bins_x, bins_y] elif bins == 'uniform_counts': raise ValueError(f"Please define number of bins for binning method uniform_bins: bins = ['uniform_bins', n_bins]") elif bins == 'unique_values': if same_bins: bins_xy = np.unique([x, y]) bins = [bins_xy, bins_xy] else: bins_x = np.unique(x) bins_y = np.unique(y) bins = [bins_x, bins_y] else: raise ValueError(f"Binning option {bins} not know.") # always return bins as bin edges: [np.ndarray, np.ndarray] return bins
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get2DBins(x, y, binSizeX, binSizeY):\n\n result = []\n xlength = len(x)\n ylength = len(y)\n\n i = 0\n xcount = 0\n for i1 in range(0, xlength, binSizeX):\n i2 = i1 + binSizeX\n if i2 >= xlength:\n i2 = xlength - 1\n xcount += 1\n ycount = 0\n for...
[ "0.7028389", "0.69049263", "0.68282115", "0.6756274", "0.65525955", "0.6534234", "0.6517673", "0.65156", "0.6511489", "0.6493547", "0.6453416", "0.63842267", "0.630369", "0.6285076", "0.62552357", "0.62531275", "0.6207189", "0.6184054", "0.6180757", "0.61535704", "0.61472905"...
0.6405554
11
Shannon Entropy Calculates the Shannon Entropy for the given data array x.
def entropy(x, bins, normalize=False, xy_probabilities=False): # calculate probabilities if xy_probabilities == False if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): p = x + 1e-15 else: p = x else: # get the bins bins = np.histogram_bin_edges(x, bins) # calculate the empirical probabilities count = np.histogram(x, bins=bins)[0] # if counts should be None, raise an error if np.sum(count) == 0: raise ValueError('The histogram cannot be empty. Adjust the bins to ' + 'fit the data') # calculate the probabilities p = (count / np.sum(count)) + 1e-15 # calculate the Shannon Entropy if normalize: # get number of bins nbins = len(p) # maximal entropy: uniform distribution normalizer = np.log2(nbins) return - p.dot(np.log2(p)) / normalizer else: return - p.dot(np.log2(p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ShannonEntropy(self,s):\n e = s[np.nonzero(s)]**2 * np.log(s[np.nonzero(s)]**2)\n return np.sum(e)", "def entropy(x):\n nz = np.nonzero(x)[0]\n return -np.sum(x[nz]*np.log2(x[nz]))", "def H(self, data):\n entropy = 0\n\n if not data:\n return entropy\n\n ...
[ "0.7339232", "0.6906172", "0.67469835", "0.66126776", "0.6580351", "0.6566007", "0.65266234", "0.646055", "0.6361494", "0.6340554", "0.62182045", "0.6194352", "0.6190961", "0.6157335", "0.6148306", "0.61020863", "0.6101131", "0.60966253", "0.6066495", "0.60436225", "0.6029721...
0.6537447
6
Conditional Entropy Calculates the conditional Shannon Entropy for two discrete distributions. This metric gives the entropy of the distribution of x in case the distribution of y is known.
def conditional_entropy(x, y, bins, normalize=False): # get the bins bins = get_2D_bins(x, y, bins) # calculate H(x,y) and H(y) hjoint = joint_entropy(x,y,bins) hy = entropy(y, bins[1]) if normalize: normalizer = entropy(x, bins[0]) conditional_entropy = hjoint - hy # check if conditional entropy and normalizer are very small if conditional_entropy < 1e-4 and normalizer < 1e-4: # return zero to prevent very high values of normalized conditional entropy # e.g. conditional entropy = -1.3e-12, normalizer = -1.6e-12 # -> normalized conditional entropy = 812.5 return 0 else: return conditional_entropy / normalizer else: return hjoint - hy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_entropy(f1, f2):\n\n ce = ee.entropyd(f1) - ee.midd(f1, f2)\n return ce", "def entropy(y):\n return -1 * sum(\n [\n pipe(np.sum(y == value) / len(y), lambda ratio: ratio * np.log(ratio))\n for value in set(y)\n ]\n )", "def conditional_entropy(sel...
[ "0.7481201", "0.6875717", "0.6782211", "0.6750487", "0.6720426", "0.6655979", "0.6648373", "0.6570259", "0.6566688", "0.64887154", "0.64597607", "0.64407265", "0.63953054", "0.6383221", "0.63512045", "0.63497704", "0.63459283", "0.63403004", "0.6330119", "0.6322664", "0.62897...
0.71850336
1
Mutual information Calculates the mutual information of a discrete distribution x given a known discrete distribution y. The mutual information is the amount of information that two distributions share.
def mutual_information(x, y, bins, normalize=False): # assert array length assert len(x) == len(y) # get the bins bins = get_2D_bins(x, y, bins) # calculate entropy(x) and conditional_entropy(x,y) hx = entropy(x, bins[0]) hcon = conditional_entropy(x, y, bins) if normalize: normalizer = np.min([entropy(x, bins[0]), entropy(y, bins[1])]) mutual_info = hx - hcon # check if mutual info and normalizer are very small if mutual_info < 1e-4 and normalizer < 1e-4: # return zero to prevent very high values of normalized mutual information # e.g. mutual information = -1.3e-12, normalizer = -1.6e-12 # -> normalized conditional entropy = 812.5 return 0 else: return mutual_info / normalizer else: return hx - hcon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n ...
[ "0.8168408", "0.78968424", "0.74779177", "0.73788935", "0.72612417", "0.71773905", "0.7003771", "0.66112494", "0.63962644", "0.6395062", "0.6346539", "0.6204402", "0.61775285", "0.61775285", "0.6012243", "0.59877944", "0.58945143", "0.588451", "0.5869148", "0.5841374", "0.580...
0.7191145
5
Cross Entropy Calculates the cross entropy of two discrete distributions x and y.
def cross_entropy(x, y, bins, xy_probabilities=False): # calculate probabilities if probabilities == False if xy_probabilities: # same bins for x and y -> same length of x and y if xy_probabilities == True assert len(x) == len(y) # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error if not np.isclose(sum(y),1,atol=0.0001): raise ValueError('Probabilities in vector y do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): px = x + 1e-15 py = y + 1e-15 else: px = x py = y else: # get the bins, joint bins for x and y (same_bins=True) bins = get_2D_bins(x, y, bins, same_bins=True) # calculate unconditioned histograms hist_x = np.histogram(x, bins=bins[0])[0] hist_y = np.histogram(y, bins=bins[1])[0] px = (hist_x / np.sum(hist_x)) + 1e-15 py = (hist_y / np.sum(hist_y)) + 1e-15 return - px.dot(np.log2(py))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_entropy(x, y):\n\n if len(y.shape) == 1:\n return F.cross_entropy(x, y)\n if y.shape[1] == 1:\n y = y.squeeze(1)\n return F.cross_entropy(x, y)\n\n return torch.mean(\n torch.div(\n F.binary_cross_entropy_with_logits(x, y, reduction=\"none\"),\n ...
[ "0.7399793", "0.7246357", "0.71943384", "0.70277774", "0.6669306", "0.6637702", "0.6602384", "0.65894943", "0.65266645", "0.6473503", "0.6437216", "0.642684", "0.63894004", "0.6365542", "0.6353329", "0.63220906", "0.628778", "0.6279644", "0.62760943", "0.6269529", "0.62406224...
0.7348612
1
r"""Joint Entropy Calculates the joint entropy of two discrete distributions x and y. This is the combined Entropy of X added to the conditional Entropy of x given y.
def joint_entropy(x, y, bins): # assert array length assert len(x) == len(y) # get the bins, x and y get their own bins in case of joint entropy bins = get_2D_bins(x, y, bins) # get the joint histogram joint_hist = np.histogram2d(x, y, bins)[0] # calculate the joint probability and add a small number joint_p = (joint_hist / np.sum(joint_hist)) + 1e-15 # calculate and return the joint entropy return - np.sum(joint_p * np.log2(joint_p))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information(x, y):\r\n\r\n # INSERT YOUR CODE HERE\r\n xvalue, xcount = np.unique(x,return_counts = True)\r\n probx = xcount.astype(float)/len(x)\r\n Hyx = 0.0\r\n for pxval,xval in zip(probx,xvalue):\r\n Hyx += (pxval)*entropy(y[x==xval])\r\n \r\n Ixy = entropy(y) - Hyx\r\n ...
[ "0.6370943", "0.63690835", "0.63631195", "0.6334478", "0.6209416", "0.6186507", "0.60127246", "0.5941165", "0.58904195", "0.5855695", "0.5840326", "0.5787013", "0.5770392", "0.57627195", "0.5750125", "0.5739848", "0.5735412", "0.56943065", "0.56919396", "0.5672802", "0.565470...
0.7735844
0
r"""KullbackLeibler Divergence Calculates the KullbackLeibler Divergence between two discrete distributions x and y. X is considered to be an empirical discrete distribution while y is considered to be the real discrete distribution of the underlying population.
def kullback_leibler(x, y, bins, xy_probabilities=False): if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x),1,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error if not np.isclose(sum(y),1,atol=0.0001): raise ValueError('Probabilities in vector y do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): px = x + 1e-15 py = y + 1e-15 else: px = x py = y else: # get the bins, joint bins for x and y (same_bins=True) bins = get_2D_bins(x, y, bins, same_bins=True) # calculate unconditioned histograms hist_x = np.histogram(x, bins=bins[0])[0] hist_y = np.histogram(y, bins=bins[1])[0] #calculate probabilities px = (hist_x / np.sum(hist_x)) py = (hist_y / np.sum(hist_y)) # calculate the cross entropy and unconditioned entropy of y hcross = cross_entropy(px, py, bins, xy_probabilities=True) hx = entropy(px, bins, xy_probabilities=True) return hcross - hx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kl_bern(x, y):\n x = min(max(x, eps), 1-eps)\n y = min(max(y, eps), 1-eps)\n return x*log(x/y) + (1-x)*log((1-x)/(1-y))", "def kl_divergence(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not s...
[ "0.70139825", "0.6948134", "0.6635383", "0.6421419", "0.63527554", "0.6301051", "0.62800944", "0.62187314", "0.6194453", "0.6156323", "0.6144021", "0.61299276", "0.61106426", "0.6105154", "0.6101856", "0.6100986", "0.6093369", "0.6083329", "0.60363257", "0.6034415", "0.601664...
0.7132836
0
r"""JensenShannon Divergence Calculates the JensenShannon Divergence (JSD) between two discrete distributions x and y. JSD quantifies the difference (or similarity) between two probability distributions and uses the KL divergence to calculate a smoothed normalized score [0, 1] that is symmetrical.
def jensen_shannon(x, y, bins, calc_distance=False, xy_probabilities=False): # assert array length assert len(x) == len(y) if xy_probabilities: # if x does not sum up to 1, raise an error if not np.isclose(sum(x), 1 ,atol=0.0001): raise ValueError('Probabilities in vector x do not sum up to 1.') # if y does not sum up to 1, raise an error if not np.isclose(sum(y), 1, atol=0.0001): raise ValueError('Probabilities in vector y do not sum up to 1.') # add a small number to all probabilities if zero occurs if x.any(0): px = x + 1e-15 py = y + 1e-15 else: px = x py = y else: # get the bins, joint bins for x and y (same_bins=True) bins = get_2D_bins(x, y, bins, same_bins=True) # calculate unconditioned histograms hist_x = np.histogram(x, bins=bins[0])[0] hist_y = np.histogram(y, bins=bins[1])[0] # calculate probabilities px = (hist_x / np.sum(hist_x)) + 1e-15 py = (hist_y / np.sum(hist_y)) + 1e-15 # calculate m pm = 0.5 * (px + py) # calculate kullback-leibler divergence between px and pm & py and pm kl_xm = kullback_leibler(px, pm, bins=bins, xy_probabilities=True) kl_ym = kullback_leibler(py, pm, bins=bins, xy_probabilities=True) if calc_distance: return (0.5 * kl_xm + 0.5 * kl_ym)**0.5 else: return (0.5 * kl_xm + 0.5 * kl_ym)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_js_divergence(df_1, df_2, n_bins=30):\n a = np.concatenate((df_1, df_2), axis=0)\n e, p = prob_mass_fun(df_1, n = n_bins, range = (a.min(), a.max()))\n _, q = prob_mass_fun(df_2, n = e, range = (a.min(), a.max()))\n\n return scipy.spatial.distance.jensenshannon(p, q)", "def js_divergence(...
[ "0.7116401", "0.6758938", "0.6689075", "0.654391", "0.6027384", "0.60244334", "0.6005269", "0.59004337", "0.58797586", "0.58616793", "0.58290076", "0.58030003", "0.57559144", "0.57503366", "0.569734", "0.5691851", "0.5684341", "0.5684016", "0.5652677", "0.5649112", "0.5614351...
0.6525315
4
Method for parsing CLI arguments using argparse.
def parse_parameters(): parser = argparse.ArgumentParser( description="Get all dependent review IDs") parser.add_argument("-r", "--review-id", type=str, required=True, help="Review ID") parser.add_argument("-o", "--out-file", type=str, required=False, help="The out file with the reviews IDs") return parser.parse_args()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_cli_arguments():\n parser = argparse.ArgumentParser('Generates a MANIFEST file used by the '\n 'HMP2 AnADAMA2 workflows.')\n parser.add_argument('-b', '--broad-data-sheet', required=True,\n help='Broad data product status spreadsheet. '\n ...
[ "0.80792505", "0.8012675", "0.78432906", "0.78108615", "0.7765357", "0.7751876", "0.77110696", "0.76705295", "0.7647746", "0.7644992", "0.7644233", "0.76380914", "0.75653404", "0.7562569", "0.75440955", "0.7535946", "0.75321084", "0.75190026", "0.75150883", "0.7505613", "0.74...
0.0
-1
Main method to get dependent review IDs of a specific review request on the ReviewBoard.
def main(): parameters = parse_parameters() review_request_url = "%s/api/review-requests/%s/" % (REVIEWBOARD_URL, parameters.review_id) handler = ReviewBoardHandler() review_request = handler.api(review_request_url)["review_request"] review_ids = handler.get_dependent_review_ids(review_request) if parameters.out_file: with open(parameters.out_file, 'w') as f: for r_id in review_ids: f.write("%s\n" % (str(r_id))) else: for r_id in review_ids: print("%s\n" % (str(r_id)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_review_request(self, rid):\r\n rsp = self.api_call('api/review-requests/%s/' % rid)\r\n return rsp['review_request']", "def get_review_request(self, request_id, api_root):\n try:\n request = api_root.get_review_request(review_request_id=request_id)\n except APIError, e:\n ...
[ "0.57654417", "0.5477173", "0.5378847", "0.5101541", "0.5080031", "0.4982286", "0.49632528", "0.49589026", "0.49061635", "0.48880798", "0.488128", "0.4835317", "0.48325068", "0.4829637", "0.48200688", "0.48102397", "0.47959515", "0.4773191", "0.4731234", "0.4722772", "0.47219...
0.6459412
0
Initalize with a usersupplied list of segments.
def __init__(self, segments, lemma = None, case = None): self.segments = segments if isinstance(self.segments, str): self.segments = [Segment.new_segment(s) for s in self.segments] self.lemma = lemma self.case = case
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def form_segment(self, node_oid):\n # init empty segment and stuff\n new_segment = S...
[ "0.6232747", "0.596336", "0.58711517", "0.5702215", "0.55154556", "0.5394213", "0.53759134", "0.53305984", "0.5308964", "0.529063", "0.5197425", "0.51839024", "0.5133967", "0.5053822", "0.50320536", "0.5010542", "0.50038457", "0.4999929", "0.49838173", "0.4962347", "0.4960938...
0.6051361
1
Create a WordForm of the given CV shape with random segments.
def random_segs(cls, shape, lemma = None, case = None): # For each C or V segment in `shape`, initialize a random Segment of the # appropriate type. Initialize a new WordForm with all these Segments. return cls([Segment(seg_type = seg) for seg in shape], lemma, case)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_word(self):\r\n\r\n template = self.word_constructions.get()\r\n word = \"\"\r\n for c in template:\r\n if c == \"v\":\r\n letter = self.get_letter(100)\r\n else:\r\n letter = self.get_letter(0)\r\n word += letter\r\n\r\...
[ "0.61004114", "0.5693294", "0.55114466", "0.5438077", "0.53612614", "0.5311946", "0.52376354", "0.51894677", "0.5161035", "0.5152379", "0.5143327", "0.5127287", "0.51046485", "0.50831926", "0.50809175", "0.5080614", "0.5072338", "0.5021304", "0.50183684", "0.4984602", "0.4969...
0.72299457
0
Add the suffix vowel.
def add_suffix(self, suffix): # Append the suffix vowel to this WordForm. self.segments.append(Segment.new_segment(suffix))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest", "def get_vowel_names():", "def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given ...
[ "0.5983021", "0.5954164", "0.59408945", "0.5778313", "0.5686185", "0.5563964", "0.5542913", "0.55272454", "0.54683185", "0.5462567", "0.54531056", "0.5446963", "0.54377186", "0.5427365", "0.53498006", "0.53402376", "0.53384125", "0.5302798", "0.52940315", "0.5291557", "0.5278...
0.7781676
0