query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
calls parse_sheet to each sheet in the given file
def parse_files(self): """ @param name: name of the file """ """ @type name: string """ df = pd.DataFrame() if not self.xlfnames: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source not found or cannot be open') logging.error('error happened: no excel files found') return False for fileName in self.xlfnames: try: xlfname = self.folder + '/' + fileName # xl = pd.ExcelFile(xlfname) except Exception as e: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source not found or cannot be open', e) return False try: # code for one file per sheet # for sheet in xl.sheet_names: # self.parse_sheet(xl,sheet) # code for one file for all for sheet in xl.sheet_names: df_tmp = self.parse_sheet_to_df(xl, sheet, df) df = df.append(df_tmp, ignore_index=True) except Exception as e: self.producer("THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR", 'data source format is not as expected', e) return False return self.write_to_file(df)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_xlsx(self, filename):\n xlsx = pd.ExcelFile(filename)\n for sheet in xlsx.sheet_names:\n table_index_header = cfg.get_list(\"table_index_header\", sheet)\n self.input_data[sheet] = xlsx.parse(\n sheet,\n index_col=list(range(int(table_index...
[ "0.6666606", "0.64332336", "0.6416094", "0.5954307", "0.59148884", "0.5886274", "0.58763015", "0.5842564", "0.57256263", "0.5725077", "0.5706473", "0.57055527", "0.5680622", "0.56387514", "0.563543", "0.5528164", "0.55189323", "0.5491748", "0.54883724", "0.54661864", "0.53742...
0.54823756
19
This function sends data to kafka bus
def producer(self, topic, msg, e=None): producer = KafkaProducer(bootstrap_servers=['HOST_IP', 'HOST_IP', 'HOST_IP'] ,api_version=(2, 2, 1),security_protocol='SSL', ssl_check_hostname=True, ssl_cafile='/home/oulu/certs/ca-cert', ssl_certfile='/home/oulu/certs/cutler-p3-c1-00.crt', ssl_keyfile='/home/oulu/certs/cutler-p3-c1-00.key') msg_b = str.encode(msg) producer.send(topic, msg_b).get(timeout=30) if (e): logging.exception('exception happened')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kafka_publish_message(self, message):\n self.kf_sender = self.kf_producer.send(self.kf_topic, value=message.encode('utf-8'));", "def callback(self, data):\n\n self.connection = pika.BlockingConnection(self.params)\n self.channel = self.connection.channel()\n\n # The fanout exchang...
[ "0.68818235", "0.6817525", "0.6561873", "0.6313053", "0.6285782", "0.62410563", "0.6211618", "0.6198164", "0.61639136", "0.61561596", "0.6113484", "0.60954404", "0.60725266", "0.6064175", "0.60134137", "0.59805125", "0.59797984", "0.5979177", "0.5969394", "0.5968888", "0.5966...
0.58462435
39
Builds and returns (in the form returned by decoderawtransaction) a transaction that spends the given utxo, pays CHI to some output
def build_tx (self, utxo, chiOut, name, nameAddr, value): nameData = self.nodes[0].name_show (name) inputs = [nameData, utxo] outputs = {nameAddr: Decimal ('0.01')} outputs.update (chiOut) tx = self.nodes[0].createrawtransaction (inputs, outputs) nameOp = { "op": "name_update", "name": name, "value": value, } tx = self.nodes[0].namerawtransaction (tx, 0, nameOp) res = self.nodes[0].decoderawtransaction (tx["hex"]) res["hex"] = tx["hex"] return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sochain_utxo_to_xchain_utxo(utxo):\n hash = utxo['txid']\n index = utxo['output_no']\n \n value = round(float(utxo['value']) * 10 ** 8)\n script = bytearray.fromhex(utxo['script_hex']) #utxo['script_hex']\n witness_utxo = Witness_UTXO(value, script)\n return UTXO(hash, index, witness_utxo...
[ "0.65796894", "0.630716", "0.62986237", "0.6173014", "0.60841966", "0.5914946", "0.5743247", "0.5731016", "0.5521235", "0.55083513", "0.5494718", "0.5486861", "0.54759413", "0.54701596", "0.54108584", "0.54077893", "0.53893286", "0.5363596", "0.5303798", "0.52935517", "0.5290...
0.70295274
0
Signs a transaction (in format of build_tx) with the given node, and returns the decoderawtransactiontype result again.
def sign (self, node, tx): signed = node.signrawtransactionwithwallet (tx["hex"]) res = node.decoderawtransaction (signed["hex"]) res.update (signed) return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_transaction():\n data = request.get_json()\n\n try:\n tx = Transaction.from_dict(data)\n except TypeError:\n response = dict(message='Improper transaction json provided.')\n status_code = 400\n return jsonify(response), status_code\n\n signature = tx.sign(node.walle...
[ "0.6919308", "0.6778266", "0.6380237", "0.627283", "0.61029476", "0.6083491", "0.5987487", "0.58167666", "0.57365465", "0.5712428", "0.56981504", "0.56660604", "0.56095326", "0.55846405", "0.55624753", "0.5552044", "0.54918426", "0.5468147", "0.54531074", "0.54221904", "0.536...
0.77226585
0
Do not return anything, modify root inplace instead.
def recoverTree(self, root: TreeNode) -> None: arr1=[] self.toList(root,arr1) print (arr1) num1=None num2=arr1[-1] l=len(arr1) i=0 arr2=[]+arr1 arr2.sort() for i in range(l): if not arr1[i]==arr2[i]: if num1: num2=arr1[i] break num1=arr1[i] self.replace(root,num1,num2) print(root)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uproot(self):\n self.__root__ = self\n return self", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substit...
[ "0.71675247", "0.7090078", "0.7052984", "0.69619083", "0.67892647", "0.6614594", "0.6551307", "0.65119416", "0.65119416", "0.65119416", "0.65119416", "0.64319086", "0.6413781", "0.6397017", "0.6397017", "0.6370123", "0.63613737", "0.63289756", "0.6325578", "0.630594", "0.6276...
0.0
-1
This a True test to see if the column is selected
def testGetColumnSolution(self): actionlist = [1,2,3,4,5] for action in actionlist: if action == 1: val = getColumnSelection(action) self.assertEqual(val,"bookID") if action == 2: val = getColumnSelection(action) self.assertEqual(val,"bookAuthor") if action == 3: val = getColumnSelection(action) self.assertEqual(val,"ISBN") if action == 4: val = getColumnSelection(action) self.assertEqual(val,"numPurchased") if action == 5: val = getColumnSelection(action) self.assertEqual(val,"numCheckedOut") if action == 6: val = getColumnSelection(action) self.assertEqual(val,"bookTitle") if action == 7: val = getColumnSelection(action) self.assertEqual(val,"bookPrice")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_select_type(self) -> bool:\n row_type = self.get_type()\n return row_type.startswith('select')", "def is_select(self) -> bool:\n return self.statement.is_select", "def is_select_one(self) -> bool:\n select_one_starts = (\n 'select_one ',\n 'select_one_ex...
[ "0.6952466", "0.6665726", "0.6658204", "0.6457513", "0.64384896", "0.6400926", "0.63496137", "0.6185076", "0.61790794", "0.6170231", "0.6149248", "0.6016119", "0.5973125", "0.59719217", "0.59619623", "0.59614056", "0.593753", "0.59325904", "0.59170127", "0.59004337", "0.59003...
0.0
-1
This a False test to see if the column is selected
def testBadGetColumnSolution(self): actionlist = ["ISBN",9,8,10,"5","","1"] for action in actionlist: val = getColumnSelection(action) self.assertFalse(val)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_select_type(self) -> bool:\n row_type = self.get_type()\n return row_type.startswith('select')", "def is_select(self) -> bool:\n return self.statement.is_select", "def requires_selection(self) -> bool:\n return True", "def HasSelection(self):\n sel = super(EditraBase...
[ "0.68835926", "0.6650075", "0.64459693", "0.64009845", "0.63434935", "0.6285487", "0.6278165", "0.6227736", "0.6068364", "0.606204", "0.603624", "0.59898543", "0.59765804", "0.591728", "0.5915179", "0.59128124", "0.5842638", "0.58308476", "0.5822645", "0.58154434", "0.579638"...
0.6274609
7
First concat state `indexes`, `preds` and `target` since they were stored as lists. After that, compute list of groups that will help in keeping together predictions about the same query. Finally, for each group compute the `_metric` if the number of positive targets is at least 1, otherwise behave as specified by `self.empty_target_action`.
def compute(self) -> Tensor: if self.samples: return self.average_precisions.float() / self.total else: # pred_image_indices = torch.cat(self.pred_image_indices, dim=0) pred_probs = torch.cat(self.pred_probs, dim=0) pred_labels = torch.cat(self.pred_labels, dim=0) pred_bboxes = torch.cat(self.pred_bboxes, dim=0) # target_image_indices = torch.cat(self.target_image_indices, dim=0) target_labels = torch.cat(self.target_labels, dim=0) target_bboxes = torch.cat(self.target_bboxes, dim=0) # pred_index = torch.nonzero((pred_labels == 1)) # pred_probs = pred_probs[pred_index] # pred_bboxes = pred_bboxes[pred_index] # target_index = torch.nonzero((target_labels == 1)) # target_bboxes = target_bboxes[target_index] # _, index_sorted = torch.sort(pred_probs) # pred_bboxes = pred_bboxes[index_sorted].cpu().detach().numpy() # target_bboxes = target_bboxes.cpu().detach().numpy() pred_probs = pred_probs.cpu().detach().numpy() pred_labels = pred_labels.cpu().detach().numpy() pred_bboxes = pred_bboxes.cpu().detach().numpy() target_labels = target_labels.cpu().detach().numpy() target_bboxes = target_bboxes.cpu().detach().numpy() pred_probs = pred_probs[pred_labels == 1] pred_bboxes = pred_bboxes[pred_labels == 1] target_bboxes = target_bboxes[target_labels == 1] preds_sorted_idx = np.argsort(pred_probs)[::-1] pred_bboxes = pred_bboxes[preds_sorted_idx] x, y = calculate_precision_recall(target_bboxes, pred_bboxes) if len(x) >= 2: return auc(x, y) else: return 0 # return mean_average_precision( # pred_image_indices, # pred_probs, # pred_labels, # pred_bboxes, # target_image_indices, # target_labels, # target_bboxes, # self.iou_threshold, # self.ap_calculation, # )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_summaries(self):\n max_outputs = 3\n summaries = []\n\n # images\n # ------------------------------------------------\n summary_input_shape = image_utils.get_image_summary_shape(self._input_shape_visualisation)\n\n # input images\n input_summary_reshape = tf.reshape(self._input_valu...
[ "0.55227154", "0.5356214", "0.5345962", "0.53439975", "0.52835166", "0.5247036", "0.5236918", "0.52299756", "0.5213777", "0.5200016", "0.518836", "0.5177476", "0.5166062", "0.51618797", "0.51440424", "0.51244515", "0.5098735", "0.5092885", "0.5085687", "0.50846714", "0.507650...
0.0
-1
Read a speech file by name.
def load_file(self, filename): path = os.path.join(self.path_to_sentences, filename) log.info('Reading file %s', path) _, int_sentence = scipy.io.wavfile.read(path) sent = int_sentence.T / np.iinfo(int_sentence.dtype).min if self.force_mono and sent.ndim == 2: return sent[1] else: return sent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFile(filename):\r\n speechFile = open(filename, \"r\")\r\n speech = speechFile.read()\r\n speechFile.close()\r\n return speech", "def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid ...
[ "0.77624923", "0.6965475", "0.67104244", "0.6667067", "0.66525364", "0.66401553", "0.6530066", "0.64281446", "0.64041317", "0.6391962", "0.6273382", "0.62610954", "0.62465245", "0.6240044", "0.62310964", "0.62310964", "0.6208363", "0.62026536", "0.61815035", "0.61784554", "0....
0.0
-1
Return a list of all the files in the corpus.
def files_list(self): path = os.path.join(self.path_to_sentences) log.info("Listing files from directory: %s", path) all_files = os.listdir(path) wav_files_only = [filename for filename in all_files if filename.lower().endswith('.wav')] return wav_files_only
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def get_corpus_filenames():\n corpus_file_list = []\n corpus_files = csv.reader(open('./...
[ "0.7367868", "0.7332538", "0.7282746", "0.7256595", "0.72560257", "0.7246942", "0.7218564", "0.7056109", "0.7021829", "0.69263995", "0.68938583", "0.6842887", "0.68216294", "0.67668146", "0.6749649", "0.6749649", "0.67369485", "0.67292917", "0.67268735", "0.67252684", "0.6723...
0.6863361
11
Read files from disk, starting from the first one.
def load_files(self, n=None): if not n: n = len(self.files) for _, name in zip(list(range(n)), self.files): yield self.load_file(name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_next_file(self):\n\n if self._file_ptr == len(self.files):\n raise pipeline.PipelineStopIteration\n\n # Collect garbage to remove any prior data objects\n gc.collect()\n\n # Fetch and remove the next item in the list\n file_ = self.files[self._file_ptr]\n ...
[ "0.64000064", "0.6366881", "0.62643075", "0.6083623", "0.60555285", "0.57724005", "0.5749551", "0.5745724", "0.5738006", "0.5731174", "0.5714755", "0.5696059", "0.56833583", "0.5662904", "0.5600594", "0.5582189", "0.5556307", "0.5550086", "0.55438334", "0.5493082", "0.5475053...
0.54052305
23
Pick section of signal
def pick_section(signal, section=None): len_noise = signal.shape[-1] if section is None: len_sig = len_noise ii = 0 elif isinstance(section, int): len_sig = section ii = np.random.randint(0, len_noise - len_sig) else: len_sig = np.asarray(section).shape[-1] ii = np.random.randint(0, len_noise - len_sig) return signal[..., ii:ii + len_sig]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def phasing_signal(self, phasing, r, c):\r\n def signal():\r\n value = phasing.currentIndex()\r\n if value >= 0 and value <= 2:\r\n globals.sections[r, c] = value\r\n return signal", "def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboar...
[ "0.6373907", "0.6050952", "0.5974559", "0.54288924", "0.54075724", "0.5390018", "0.5377656", "0.5331255", "0.5308175", "0.5240958", "0.5233869", "0.51971555", "0.5189088", "0.5158308", "0.5147491", "0.5136177", "0.51356614", "0.50236404", "0.49819353", "0.49715742", "0.492783...
0.6796413
0
Returns the speechshaped noise appropriate for the speech material.
def ssn(self, x=None): section = self.pick_section(self._ssn, x) if self.force_mono and section.ndim > 1: return section[0] return section
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def noise(self):\n return self._noise", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # re...
[ "0.6940478", "0.6876316", "0.67471814", "0.67298526", "0.6711006", "0.6499605", "0.641281", "0.63947856", "0.6361855", "0.624543", "0.6218755", "0.60979575", "0.6082138", "0.6052489", "0.6041792", "0.59897363", "0.59645706", "0.59612185", "0.59013486", "0.5857421", "0.5834204...
0.0
-1
Set level of a sentence, in dB.
def set_level(self, x, level): return x * 10 ** ((level - self.ref_level) / 20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")", "def setLevel(self, level):\n self.lvl = level", "def __change_le...
[ "0.636659", "0.62779075", "0.62480867", "0.62194335", "0.6102128", "0.59673667", "0.5937393", "0.59064263", "0.59064263", "0.59064263", "0.58078915", "0.5776728", "0.57651097", "0.5743857", "0.5737821", "0.5697885", "0.56706774", "0.5657614", "0.5645768", "0.56377774", "0.562...
0.5897673
10
Calculate the average level across all sentences. The levels are calculated according to the toolbox's reference level. Returns
def average_level(self): spl = [utils.dbspl(x) for x in self.load_files()] return np.mean(spl), np.std(spl)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rouge_l_sentence_level(eval_sentences, ref_sentences):\n\n f1_scores = []\n for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):\n m = float(len(ref_sentence))\n n = float(len(eval_sentence))\n lcs = _len_lcs(eval_sentence, ref_sentence)\n f1_scores.append(_f_lcs(lcs, m, n))\n ...
[ "0.69772524", "0.61759144", "0.61583227", "0.61080706", "0.6031079", "0.59640235", "0.57921195", "0.5750104", "0.57479537", "0.5734042", "0.5672754", "0.5670753", "0.566068", "0.5644736", "0.5632639", "0.5631451", "0.5594872", "0.559045", "0.5566701", "0.55663264", "0.5546832...
0.6812981
1
Fixed lines start with an area code enclosed in brackets. The area codes vary in length but always begin with 0.
def isfixline(number): if number[0] == '(': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def padded_area_code(phone_number):\r\n area_code = grab_area_code(phone_number)\r\n return area_code + \"*******\"", "def area_code(self):\n return self.number[:3]", "def clean_open_close_brace(self):\n # Loop over all lines, check for braces and replace them with \\n{ and \\n}\n br...
[ "0.59898686", "0.59150344", "0.56030583", "0.55835414", "0.5426462", "0.54129606", "0.53990805", "0.5327246", "0.52900195", "0.52449477", "0.51972073", "0.51676416", "0.5162044", "0.515133", "0.51285326", "0.5108765", "0.5095171", "0.50828093", "0.5079937", "0.50754184", "0.5...
0.5302146
8
Telemarketers' numbers have no parentheses or space, but they start with the area code 140.
def ismobile(number): if number[0] in ['7', '8', '9']: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area_code(self):\n return self.number[:3]", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def _remove_area_code(phone):\n\n if not phone.startswith('+46'):\n return phone\n else:\n return '0' + phone[3:]", "def area(lado):\n\treturn...
[ "0.62425196", "0.57021415", "0.56712484", "0.54320395", "0.54288685", "0.5344969", "0.52775437", "0.52460563", "0.5242392", "0.5193709", "0.5157009", "0.51362306", "0.51212054", "0.50575703", "0.50537056", "0.50524485", "0.5049207", "0.5046117", "0.5036556", "0.5035202", "0.4...
0.0
-1
Mobile numbers have no parentheses, but have a space in the middle of the number to help readability. The prefix of a mobile number is its first four digits, and they always start with 7, 8 or 9.
def istele(number): if number[:3] == '140': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_phone(number):\n numberlist = re.findall(\"\\d\",number)\n new_number = \"\".join(numberlist)\n if len(new_number) == 8:\n \tnew_number = \"010\" + new_number\n\tnew_number = new_number[-11:]\n\tif new_number.startswith('1'):\n\t\tnew_number = \"+86-\" + new_number\n\telse:\n\t\tnew_number = ...
[ "0.7378809", "0.728158", "0.7243709", "0.71238893", "0.6969404", "0.6930221", "0.6905321", "0.6822984", "0.670466", "0.6703273", "0.6692987", "0.6657773", "0.66574764", "0.66568744", "0.6644845", "0.65901685", "0.6581246", "0.656387", "0.6562163", "0.64722866", "0.64605606", ...
0.0
-1
Instantiate a new OperationArgument
def __init__(self, name: str, arg_type_name: str, is_required=False): self.key = name self.value = arg_type_name self.required = is_required
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugme...
[ "0.6846367", "0.6258833", "0.6123263", "0.6116206", "0.6112215", "0.6097888", "0.60650367", "0.6055484", "0.6029025", "0.5997257", "0.5876527", "0.58544487", "0.5838185", "0.5809771", "0.5809771", "0.5809771", "0.5809771", "0.5809771", "0.57846487", "0.57816875", "0.5754001",...
0.5423515
54
Instantiate a new Operation.
def __init__(self, field: "SchemaTypeField", settings: Settings): from qlient import helpers self.settings = settings self.name = field.name self.description = field.description self.arguments = helpers.adapt_arguments(field.args) self.return_type = field.type self._return_fields: Union[Tuple[SelectedField], None] = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, operation, constargs, randomargs):\n Operation.__init__(self)\n self.operation = operation\n self.constargs = constargs\n self.randomargs = randomargs\n if type(operation) is str:\n import CCAugmentation.outputs as cca_out\n import CCAugme...
[ "0.7434616", "0.7018311", "0.6947794", "0.68689376", "0.68165016", "0.6655647", "0.651799", "0.64971197", "0.6486915", "0.6486268", "0.64711845", "0.6437761", "0.64105827", "0.6380135", "0.63713217", "0.6350577", "0.6326084", "0.6294592", "0.6282748", "0.6282437", "0.6206516"...
0.0
-1
Recursively look up a certain amount of return fields depending on the current recursion depth. The depth can be set via the settings. client = Client("...", settings=Settings(max_recursion_depth=3))
def get_return_fields(self, all_types: "Dict[str, SchemaType]") -> Tuple[SelectedField]: if self._return_fields is None: from qlient import helpers self._return_fields = helpers.adapt_return_fields( self.return_type, all_types, self.settings.max_recursion_depth ) return self._return_fields
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recurse(self):\n url = self._api + '?recursive=1'\n json = self._json(self._get(url), 200)\n return Tree(json, self._session) if json else None", "def depth_limited_search(initial_state, goal_state, limit):\n\n return recursive_dls(createRootNode(initial_state), goal_state, limit)", ...
[ "0.5792529", "0.53290737", "0.5041367", "0.50318784", "0.4979942", "0.4966645", "0.49606967", "0.49531704", "0.4941977", "0.49168393", "0.49085727", "0.48915526", "0.48759022", "0.48753", "0.4858908", "0.4832988", "0.47985405", "0.47869855", "0.47741127", "0.47645083", "0.475...
0.4593388
34
Instantiate a new Directive
def __init__(self, raw_directive: Dict): self.name: str = raw_directive.get("name") self.description: str = raw_directive.get("description") self.locations: List[str] = raw_directive.get("locations", []) self.args: Dict[str, Argument] = Schema.parse_arguments(raw_directive.get("args", []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_directive(cls, directive, app):\n return cls(directive,\n app,\n arguments=directive.arguments,\n content=directive.content,\n options=directive.options)", "def run(self):\n node = DirectiveNode(\n self.name...
[ "0.7304532", "0.60879993", "0.59436953", "0.5747961", "0.5662996", "0.5511462", "0.52373666", "0.51641417", "0.51499546", "0.51448756", "0.508324", "0.5068346", "0.5061711", "0.50400126", "0.5034657", "0.5017803", "0.50147015", "0.49971962", "0.4973718", "0.49727347", "0.4969...
0.6045261
2
Instantiate a new Argument
def __init__(self, raw_arg: Dict): self.name = raw_arg.get("name") self.description = raw_arg.get("description") self.type = TypeDefer(raw_arg.get("type")) if raw_arg.get("type") is not None else None self.default_value = raw_arg.get("defaultValue")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name=\"alpha\", attr=None):\n Arg.__init__(self, name, attr)", "def from_param(cls, arg):\n return cls(arg)", "def add_argument(*args, **kwargs):\n return _Argument(args, frozenset(kwargs.items()))", "def __init__(self, name, flags, attr=None):\n Arg.__init__(self, ...
[ "0.6991429", "0.6833005", "0.67898697", "0.6781041", "0.6746932", "0.6634749", "0.65116656", "0.64632064", "0.6431998", "0.64295053", "0.63775975", "0.637756", "0.6214503", "0.6194126", "0.6143888", "0.6139867", "0.61160153", "0.6082575", "0.6075455", "0.6035835", "0.60151345...
0.5699178
68
Instantiate a new TypeDefer
def __init__(self, raw_defer: Dict): self.kind = raw_defer.get("kind") self.name = raw_defer.get("name") self.of_type: TypeDefer = TypeDefer(raw_defer.get("ofType")) if raw_defer.get("ofType") is not None else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instantiate():\n d = defer.Deferred()", "def __init__(self, type_):\n\n self.type = type_", "def Instance(self) -> TypeManager:", "def __call__(self, *args):\n return TypeCall(self, args)", "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attrib...
[ "0.64138234", "0.54614604", "0.54489744", "0.54139173", "0.5383102", "0.5300838", "0.52134734", "0.5191931", "0.5120031", "0.50648534", "0.49831468", "0.49792293", "0.49551898", "0.49469346", "0.49264267", "0.49214688", "0.48970965", "0.48778772", "0.48720497", "0.48700586", ...
0.7597686
0
Instantiate a new SchemaTypeField
def __init__(self, raw_field: Dict): self.name = raw_field.get("name") self.description = raw_field.get("description") self.args: Dict[str, Argument] = Schema.parse_arguments(raw_field.get("args", [])) self.type: TypeDefer = TypeDefer(raw_field.get("type")) if raw_field.get("type") is not None else None self.is_deprecated: bool = raw_field.get("isDeprecated") self.deprecation_reason: str = raw_field.get("deprecationReason")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n ...
[ "0.7356506", "0.718628", "0.6659049", "0.6587988", "0.6470674", "0.6427356", "0.63884383", "0.6371193", "0.6355834", "0.6309193", "0.62948745", "0.6287319", "0.62753356", "0.62753356", "0.6181912", "0.6081761", "0.6023993", "0.6019796", "0.601481", "0.6001073", "0.59939086", ...
0.5908709
25
Instantiate a new SchemaTypeInputField
def __init__(self, raw_input: Dict): self.name = raw_input.get("name") self.description = raw_input.get("description") self.type: TypeDefer = TypeDefer(raw_input.get("type")) if raw_input.get("type") is not None else None self.default_value = raw_input.get("defaultValue")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_field_schema(col_schema: dict) -> bigquery.SchemaField:\n name = to_safe_name(col_schema['name'])\n return bigquery.SchemaField(\n name,\n col_schema.get('type'),\n col_schema.get('mode', 'NULLABLE'),\n col_schema.get('description', '')\n ...
[ "0.67198443", "0.657528", "0.6384678", "0.62780166", "0.61373305", "0.61338854", "0.61338854", "0.61186373", "0.5996262", "0.5976817", "0.59587806", "0.59354585", "0.58839595", "0.5859686", "0.58279943", "0.5807643", "0.5797024", "0.5765275", "0.5757921", "0.57278675", "0.571...
0.5212429
82
Instantiate a new SchemaTypeInterface
def __init__(self, raw_interface: Dict): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_schema(self, schema: str):\n return", "def __init__(self, schema: GraphQLSchema):\n\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\n f\"DSLSchema needs a schema as parameter. Received: {type(schema)}\"\n )\n\n self._schema: Grap...
[ "0.64936477", "0.64496356", "0.64394224", "0.6386912", "0.62401533", "0.6212064", "0.61844695", "0.60685897", "0.60447806", "0.60024077", "0.5941799", "0.5912843", "0.59004474", "0.5892521", "0.5882622", "0.58692586", "0.584908", "0.58294225", "0.5798403", "0.5789837", "0.577...
0.0
-1
Instantiate a new SchemaTypeEnum
def __init__(self, raw_enum: Dict): self.name: str = raw_enum.get("name") self.description: str = raw_enum.get("description") self.is_deprecated: bool = raw_enum.get("isDeprecated") self.deprecation_reason: str = raw_enum.get("deprecationReason")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateEnumSchema(\n self,\n descriptor: EnumDescriptor,\n ) -> None:\n if self.schema_objs is None: # Check required by mypy.\n raise AssertionError(\"OpenAPI type schemas not initialized.\")\n\n enum_schema_obj: EnumSchema = {\n \"type\": \"string\",\n }\n\n if descriptor....
[ "0.6965221", "0.5952822", "0.5935833", "0.59076416", "0.58250195", "0.57742614", "0.5734689", "0.5734217", "0.5627075", "0.5590269", "0.5535121", "0.55136585", "0.54969114", "0.5492475", "0.5481712", "0.5452931", "0.54466367", "0.54267055", "0.5416778", "0.54110134", "0.54047...
0.0
-1
Instantiate a new SchemaType
def __init__(self, raw_type: Dict): self.kind = raw_type.get("kind") self.name = raw_type.get("name") self.description = raw_type.get("description") self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get("fields") or [] if f] self.input_fields = [SchemaTypeInputField(i) for i in raw_type.get("inputFields") or [] if i] self.interfaces = [SchemaTypeInterface(i) for i in raw_type.get("interfaces") or [] if i] self.enum_values = [SchemaTypeEnum(e) for e in raw_type.get("enumValues") or [] if e] self.possible_types = raw_type.get("possibleTypes")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_schema(self, schema: str):\n return", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def _schema_type(self) -> Optional[type]:\n return MovieSchema", "def __init__(self, schema=None)...
[ "0.7132769", "0.679428", "0.6635432", "0.659345", "0.6592825", "0.6465888", "0.6453353", "0.63999885", "0.6380593", "0.63766026", "0.636462", "0.63383156", "0.6335716", "0.63211817", "0.62860376", "0.6270915", "0.625923", "0.6207403", "0.62045443", "0.62017065", "0.6196828", ...
0.63470966
11
Create a new Schema instance. Firstly the schema will be loaded synchronously from the endpoint and stored as raw json for further processing. Then the request types will be parsed. Those are "Query", "Mutation" and "Subscription". After that the schema types and directives are parsed.
def __init__(self, endpoint: str, transporter: Transporter, settings: Settings, cache: Optional[Cache]): self.endpoint = endpoint self.transport = transporter self.settings = settings self.cache = cache if self.cache is not None: schema_introspection = self.cache.retrieve(self.endpoint, SCHEMA_KEY) if schema_introspection is None: schema_introspection = self.introspect_schema(endpoint, transporter) self.cache.store(self.endpoint, SCHEMA_KEY, schema_introspection) else: schema_introspection = self.introspect_schema(endpoint, transporter) # graphql schema properties self.raw_schema = schema_introspection.get(self.settings.default_response_key, {}).get("__schema", {}) self.query_type: str = self.parse_query_type(self.raw_schema) self.mutation_type: str = self.parse_mutation_type(self.raw_schema) self.subscription_type: str = self.parse_subscription_type(self.raw_schema) self.types: Dict[str, SchemaType] = self.parse_types(self.raw_schema.get("types", [])) self.directives: Dict[str, Directive] = self.parse_directives(self.raw_schema.get("directives", [])) # custom schema properties self.queries: Tuple[Operation] = self.parse_operations(self.query_type) self.mutations: Tuple[Operation] = self.parse_operations(self.mutation_type) self.subscriptions: Tuple[Operation] = self.parse_operations(self.subscription_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, raw_type: Dict):\n\n self.kind = raw_type.get(\"kind\")\n self.name = raw_type.get(\"name\")\n self.description = raw_type.get(\"description\")\n self.fields: List[SchemaTypeField] = [SchemaTypeField(f) for f in raw_type.get(\"fields\") or [] if f]\n self.input...
[ "0.62221414", "0.6016679", "0.5968359", "0.5946103", "0.5910788", "0.59025574", "0.5863402", "0.58183753", "0.5814539", "0.57973915", "0.57765687", "0.5719145", "0.57047343", "0.5694497", "0.565712", "0.56565285", "0.56380844", "0.56073123", "0.55723673", "0.5570385", "0.5504...
0.67564994
0
Make a synchronous request to the endpoint and return the response as json.
def introspect_schema(cls, endpoint: str, transport: Transporter) -> Dict: return request_schema(endpoint, transport.session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n re...
[ "0.69455296", "0.69230396", "0.6754933", "0.67506415", "0.673607", "0.66864365", "0.6583713", "0.6581421", "0.653614", "0.6509615", "0.6475695", "0.64537466", "0.643002", "0.6425151", "0.64179856", "0.636375", "0.6342285", "0.63297737", "0.6329244", "0.6312979", "0.63046056",...
0.0
-1
Parse the query type from the root schema. This can either return a string or None. The latter when the endpoint does not support queries.
def parse_query_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "queryType")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def _schema_type(self) -> Optional[type]:\n return SearchMetaSchema", "def _sch...
[ "0.70593387", "0.5939255", "0.5924762", "0.5911261", "0.5840816", "0.57200265", "0.57065624", "0.5640995", "0.5613772", "0.5587588", "0.55517954", "0.54139596", "0.53744495", "0.5370533", "0.53518116", "0.5295712", "0.52832526", "0.5279426", "0.52663785", "0.52663785", "0.526...
0.8196511
0
Parse the mutation type from the root schema. This can either return a string or None. The latter when the endpoint does not support mutations.
def parse_mutation_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "mutationType")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]:\n query_type = raw_schema.get(op_type, {})\n if not query_type:\n return None\n return query_type.get(\"name\")", "def set_mutation_type(self, mut_type=''):\n if mut_type:\n # specified...
[ "0.6133898", "0.5771993", "0.55856854", "0.55424494", "0.55416554", "0.5471137", "0.52651036", "0.52617633", "0.5190931", "0.5140541", "0.50858825", "0.5052965", "0.5041517", "0.5041517", "0.50238866", "0.50197643", "0.49824637", "0.4972244", "0.4961925", "0.49438107", "0.494...
0.80457693
0
Parse the subscription type from the root schema. This can either return a string or None. The latter when the endpoint does not support subscriptions.
def parse_subscription_type(raw_schema: Dict) -> Union[str, None]: return Schema.parse_operation_type(raw_schema, "subscriptionType")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscription_type(self) -> str:\n return pulumi.get(self, \"subscription_type\")", "def typ(self) -> Optional[str]:\n return self.get(\"/Type\")", "def get_type_from_doc(doc):\n try:\n return doc.replace('\\n',' ').split('-> ')[1].split(' ')[0]\n except:\n return None", ...
[ "0.660987", "0.5427683", "0.5366075", "0.5344622", "0.531378", "0.5235341", "0.52084655", "0.514429", "0.514082", "0.5107135", "0.5022559", "0.4995361", "0.4984606", "0.49678668", "0.49628568", "0.49607036", "0.49584213", "0.4956243", "0.49261507", "0.49177787", "0.49068826",...
0.8100664
0
Parse an operation type from the root schema. This can either return a string or None. The latter when the endpoint does not support the passed by operation.
def parse_operation_type(raw_schema: Dict, op_type: str) -> Union[str, None]: query_type = raw_schema.get(op_type, {}) if not query_type: return None return query_type.get("name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")", "def get_operation_type(self, operation_name):\n # type: (Optional[str]) -> Optional[str]\n operations_map = self.operations_map\n if not operation_name and len(operations_map) ==...
[ "0.67735726", "0.6728999", "0.6351823", "0.63367105", "0.59829354", "0.59829354", "0.59829354", "0.59829354", "0.5836189", "0.5831232", "0.5829081", "0.5590442", "0.5587668", "0.55367893", "0.5521317", "0.55096656", "0.5479498", "0.5452105", "0.5383543", "0.5341075", "0.53038...
0.79709095
0
Parse all operations for a given operation type.
def parse_operations(self, operation_type: str) -> Tuple[Operation]: if operation_type is None: return tuple() query_type: SchemaType = self.types.get(operation_type) if query_type is None: return tuple() return tuple([Operation(f, self.settings) for f in query_type.fields])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_operation(self, data, ip):\n json_decoded = json.loads(data)\n op = json_decoded['OPERATION']\n if op in self._callbacks:\n self.logger.info(\"Got Operation: \" + op)\n self._callbacks[op](json_decoded, ip)\n else:\n self.logger.error(\"Unknown...
[ "0.6407225", "0.5742777", "0.56923646", "0.5603622", "0.55325913", "0.54617524", "0.5408668", "0.5370978", "0.5358051", "0.5347028", "0.5345967", "0.53198713", "0.52810514", "0.5279077", "0.5233153", "0.5233153", "0.5233153", "0.5233153", "0.52281266", "0.52163815", "0.520678...
0.7862727
0
Parse all types from the raw schema response.
def parse_types(schema_types: List[Dict]) -> Dict[str, SchemaType]: result = {} for schema_type in schema_types: new_type = SchemaType(schema_type) result[new_type.name] = new_type return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_types(self):\n for root in self.roots:\n for types in root.iter('types'):\n for node in types.iter('type'):\n type_name = GLGenerator.get_name(node)\n text = GLGenerator.get_text(node).strip()\n if '*' in text and ...
[ "0.6339696", "0.5975076", "0.5879143", "0.58355993", "0.57179266", "0.5602357", "0.55450535", "0.54961365", "0.5487783", "0.5469786", "0.54546714", "0.5419225", "0.54117334", "0.5395699", "0.53922814", "0.5358962", "0.53277004", "0.53108835", "0.5305968", "0.52904135", "0.528...
0.5321174
17
Parse a list of arguments into a dictionary where the key is the name of the argument and the argument itself is the value.
def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]': if not args: return {} result = {} for a in args: if not a: continue arg = Argument(a) result[arg.name] = arg return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def arglist2dict(args):\n arg_dict = {}\n\n if len(args) == 0:\n return arg_dict\n\n if not args[0].startswith('--'):\n raise ValueError(f\"Positional keywords are not supported: {args[0]}\")\n\n i = 0\n while i < len(args):\n arg = args[i]\n ...
[ "0.75726336", "0.7397223", "0.7325528", "0.71652824", "0.7037274", "0.6999478", "0.6991102", "0.69558775", "0.69498295", "0.69380295", "0.6924944", "0.69120455", "0.6848294", "0.6756323", "0.6723195", "0.66975105", "0.6656867", "0.6617072", "0.6584447", "0.6567549", "0.653636...
0.85187536
0
Parse a list of directives into a dictionary where the key is the name of the directive and the value is the directive itself.o
def parse_directives(schema_directives: List[Dict]) -> Dict[str, Directive]: result = {} for schema_directive in schema_directives: new_directive = Directive(schema_directive) result[new_directive.name] = new_directive return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directives():\n cmd = \"{} -L\".format(_detect_os())\n ret = {}\n out = __salt__[\"cmd.run\"](cmd)\n out = out.replace(\"\\n\\t\", \"\\t\")\n for line in out.splitlines():\n if not line:\n continue\n comps = line.split(\"\\t\")\n desc = \"\\n\".join(comps[1:])\n ...
[ "0.6590887", "0.60470355", "0.60470355", "0.58491236", "0.5815651", "0.57697064", "0.57697064", "0.5671237", "0.5338338", "0.52868664", "0.52477413", "0.5196038", "0.51685214", "0.51405686", "0.5023584", "0.49965042", "0.4984144", "0.49639156", "0.4882775", "0.4877682", "0.48...
0.77479357
0
Creates a neural network that takes as input a batch of images (3 dimensional tensors) and outputs a batch of outputs (1 dimensional tensors)
def __init__( self, input_shape: Tuple[int, int, int], encoding_size: int, output_size: int ): super(VisualQNetwork, self).__init__() height = input_shape[0] width = input_shape[1] initial_channels = input_shape[2] conv_1_hw = self.conv_output_shape((height, width), 8, 4) conv_2_hw = self.conv_output_shape(conv_1_hw, 4, 2) self.final_flat = conv_2_hw[0] * conv_2_hw[1] * 32 self.conv1 = torch.nn.Conv2d(initial_channels, 16, [8, 8], [4, 4]) self.conv2 = torch.nn.Conv2d(16, 32, [4, 4], [2, 2]) self.dense1 = torch.nn.Linear(self.final_flat, encoding_size) self.dense2 = torch.nn.Linear(encoding_size, output_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(networ...
[ "0.7127509", "0.6960329", "0.6916975", "0.67752737", "0.6708114", "0.66940296", "0.66714424", "0.6586228", "0.6581007", "0.6574923", "0.6569222", "0.6543639", "0.6534254", "0.65283036", "0.64970976", "0.6490959", "0.6476524", "0.6472872", "0.6470503", "0.64555347", "0.6444669...
0.0
-1
Computes the height and width of the output of a convolution layer.
def conv_output_shape( h_w: Tuple[int, int], kernel_size: int = 1, stride: int = 1, pad: int = 0, dilation: int = 1, ): h = floor( ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1 ) w = floor( ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1 ) return h, w
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conv2d_output_shape(height, width, filter_height, filter_width, out_channels, stride):\n return (out_channels, ((height - filter_height) / stride + 1), ((width - filter_width) / stride + 1))", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + ...
[ "0.7580664", "0.72124845", "0.72124845", "0.7204811", "0.72040206", "0.71981996", "0.71090627", "0.7094966", "0.70632535", "0.6922401", "0.6799323", "0.6729911", "0.66794556", "0.6671254", "0.66672844", "0.66361433", "0.66361433", "0.66361433", "0.660082", "0.6562536", "0.653...
0.7131408
6
Given a Unity Environment and a QNetwork, this method will generate a buffer of Experiences obtained by running the Environment with the Policy derived from the QNetwork.
def generate_trajectories( env: BaseEnv, q_net: VisualQNetwork, buffer_size: int, epsilon: float ): # Create an empty Buffer buffer: Buffer = [] # Reset the environment env.reset() # Read and store the Behavior Name of the Environment behavior_name = list(env.behavior_specs)[0] # Read and store the Behavior Specs of the Environment spec = env.behavior_specs[behavior_name] # Create a Mapping from AgentId to Trajectories. This will help us create # trajectories for each Agents dict_trajectories_from_agent: Dict[int, Trajectory] = {} # Create a Mapping from AgentId to the last observation of the Agent dict_last_obs_from_agent: Dict[int, np.ndarray] = {} # Create a Mapping from AgentId to the last observation of the Agent dict_last_action_from_agent: Dict[int, np.ndarray] = {} # Create a Mapping from AgentId to cumulative reward (Only for reporting) dict_cumulative_reward_from_agent: Dict[int, float] = {} # Create a list to store the cumulative rewards obtained so far cumulative_rewards: List[float] = [] while len(buffer) < buffer_size: # While not enough data in the buffer # Get the Decision Steps and Terminal Steps of the Agents decision_steps, terminal_steps = env.get_steps(behavior_name) # For all Agents with a Terminal Step: for agent_id_terminated in terminal_steps: # Create its last experience (is last because the Agent terminated) last_experience = Experience( obs=dict_last_obs_from_agent[agent_id_terminated].copy(), reward=terminal_steps[agent_id_terminated].reward, done=not terminal_steps[agent_id_terminated].interrupted, action=dict_last_action_from_agent[agent_id_terminated].copy(), next_obs=terminal_steps[agent_id_terminated].obs[0], ) # Clear its last observation and action (Since the trajectory is over) dict_last_obs_from_agent.pop(agent_id_terminated) dict_last_action_from_agent.pop(agent_id_terminated) # Report the cumulative reward cumulative_reward = ( dict_cumulative_reward_from_agent.pop(agent_id_terminated) + terminal_steps[agent_id_terminated].reward ) cumulative_rewards.append(cumulative_reward) # Add the Trajectory and the last experience to the buffer buffer.extend(dict_trajectories_from_agent.pop(agent_id_terminated)) buffer.append(last_experience) # For all Agents with a Decision Step: for agent_id_decisions in decision_steps: # If the Agent does not have a Trajectory, create an empty one if agent_id_decisions not in dict_trajectories_from_agent: dict_trajectories_from_agent[agent_id_decisions] = [] dict_cumulative_reward_from_agent[agent_id_decisions] = 0 # If the Agent requesting a decision has a "last observation" if agent_id_decisions in dict_last_obs_from_agent: # Create an Experience from the last observation and the Decision Step exp = Experience( obs=dict_last_obs_from_agent[agent_id_decisions].copy(), reward=decision_steps[agent_id_decisions].reward, done=False, action=dict_last_action_from_agent[agent_id_decisions].copy(), next_obs=decision_steps[agent_id_decisions].obs[0], ) # Update the Trajectory of the Agent and its cumulative reward dict_trajectories_from_agent[agent_id_decisions].append(exp) dict_cumulative_reward_from_agent[agent_id_decisions] += ( decision_steps[agent_id_decisions].reward ) # Store the observation as the new "last observation" dict_last_obs_from_agent[agent_id_decisions] = ( decision_steps[agent_id_decisions].obs[0] ) # Generate an action for all the Agents that requested a decision # Compute the values for each action given the observation actions_values = ( q_net(torch.from_numpy(decision_steps.obs[0])).detach().numpy() ) # Pick the best action using argmax print("ACTION VALS", actions_values) actions_values += epsilon * ( np.random.randn(actions_values.shape[0], actions_values.shape[1]) ).astype(np.float32) actions = np.argmax(actions_values, axis=1) actions.resize((len(decision_steps), 1)) # Store the action that was picked, it will be put in the trajectory later for agent_index, agent_id in enumerate(decision_steps.agent_id): dict_last_action_from_agent[agent_id] = actions[agent_index] # Set the actions in the environment # Unity Environments expect ActionTuple instances. action_tuple = ActionTuple() action_tuple.add_discrete(actions) env.set_actions(behavior_name, action_tuple) # Perform a step in the simulation env.step() return buffer, np.mean(cumulative_rewards)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_experiences(self):\n for i in range(self.num_frames_per_proc):\n # Do one agent-environment interaction\n\n preprocessed_obs0 = self.preprocess_obss(self.obs0, device=self.device)\n \n preprocessed_obs1 = self.preprocess_obss(self.obs1, device=self.dev...
[ "0.5506977", "0.5496875", "0.514291", "0.5113934", "0.49886903", "0.4943921", "0.49165386", "0.49162146", "0.48796564", "0.48758185", "0.48588508", "0.48554084", "0.4835768", "0.48339865", "0.4816758", "0.48112303", "0.47756678", "0.47557953", "0.47492647", "0.4732167", "0.47...
0.63224953
0
Performs an update of the QNetwork using the provided optimizer and buffer
def update_q_net( q_net: VisualQNetwork, optimizer: torch.optim, buffer: Buffer, action_size: int ): BATCH_SIZE = 1000 NUM_EPOCH = 3 GAMMA = 0.9 batch_size = min(len(buffer), BATCH_SIZE) random.shuffle(buffer) # Split the buffer into batches batches = [ buffer[batch_size * start : batch_size * (start + 1)] for start in range(int(len(buffer) / batch_size)) ] for _ in range(NUM_EPOCH): for batch in batches: # Create the Tensors that will be fed in the network obs = torch.from_numpy(np.stack([ex.obs for ex in batch])) reward = torch.from_numpy( np.array([ex.reward for ex in batch], dtype=np.float32).reshape(-1, 1) ) done = torch.from_numpy( np.array([ex.done for ex in batch], dtype=np.float32).reshape(-1, 1) ) action = torch.from_numpy(np.stack([ex.action for ex in batch])) next_obs = torch.from_numpy(np.stack([ex.next_obs for ex in batch])) # Use the Bellman equation to update the Q-Network target = ( reward + (1.0 - done) * GAMMA * torch.max(q_net(next_obs).detach(), dim=1, keepdim=True).values ) mask = torch.zeros((len(batch), action_size)) mask.scatter_(1, action, 1) prediction = torch.sum(qnet(obs) * mask, dim=1, keepdim=True) criterion = torch.nn.MSELoss() loss = criterion(prediction, target) # Perform the backpropagation optimizer.zero_grad() loss.backward() optimizer.step()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_optimizer(self, context, optimizer, host):\n pass", "def learn(self):\n ## obtain sample batch using priority based sampling.\n states, actions, rewards, next_states, dones, weights, sample_inds = self.buffer.sample_batch(BETA)\n \n ## obtain the discounted sum of re...
[ "0.6433153", "0.5484779", "0.5369425", "0.53328884", "0.52981716", "0.52732706", "0.5266272", "0.521479", "0.5200708", "0.51710343", "0.5088552", "0.50860375", "0.50787526", "0.507192", "0.507192", "0.50715756", "0.505829", "0.5000706", "0.4999721", "0.4979605", "0.496521", ...
0.62525785
1
Search for lback index self._in_loop becomes true in the second state of the loop
def _get_lback_index(self, model, last) -> int: assert last > 0 # last state cannot be loop-back. assert model.get_value(self.totime(self._in_loop, last)).is_true() assert model.get_value(self.totime(self._in_loop, 0)).is_false() idx = last - 1 while model.get_value(self.totime(self._in_loop, idx)).is_true(): idx -= 1 assert idx >= 0 assert model.get_value(self.totime(self._in_loop, idx + 1)).is_true() assert model.get_value(self.totime(self._in_loop, idx)).is_false() assert model.get_value(self.totime(self.start_loop, idx)).is_true() return idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_loop(self):\n tortoise = self.head\n hare = self.head\n while hare:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return True\n return False", "def bookkeep(self) :\n\t\tself.loopiter += 1", "def KeepAdvancingSolutionLoop(self):\n ...
[ "0.6223293", "0.6218441", "0.6189063", "0.6179153", "0.608254", "0.59855604", "0.5839952", "0.58170766", "0.57790136", "0.57635117", "0.5739355", "0.57139474", "0.57138425", "0.5695078", "0.56928277", "0.56292295", "0.5568077", "0.55579966", "0.55414546", "0.55264825", "0.551...
0.684357
0
returns list of active Hints and sequence of `states`. For each state reports location of each active hint and type of the transition to reach the following state
def _model2hint_comp(self, model, first: int, last: int) \ -> Tuple[List[Hint], List[List[Tuple[int, bool, TransType]]], List[Tuple[RankFun, int, int]]]: assert isinstance(first, int) assert isinstance(last, int) assert hasattr(model, "get_value") assert 0 <= first < last assert all(h.ts_lvals is not None for h in self.hints) assert all(h.ts_loc_symbs is not None for h in self.hints) # set of active hints should be constant in the loop. assert all(all(model.get_value(self.totime(is_active, step)).is_true() for step in range(first, last+1)) or all(model.get_value(self.totime(is_active, step)).is_false() for step in range(first, last+1)) for idx, is_active in enumerate(self.hint_active)) # hint_active predicates should be frozen. assert all(self.totime(act, first) == act for act in self.hint_active) # Filter active hints active_hints = [self.hints[idx] for idx, is_active in enumerate(self.hint_active) if model.get_value(is_active).is_true()] # No hints used in the current trace. if len(active_hints) == 0: return [], [], [] locval2idx_lst = [{val: idx for idx, val in enumerate(h.ts_lvals)} for h in active_hints] x_loc_idxs: List[int] = [] for h, locval2idx in zip(active_hints, locval2idx_lst): val = self.i_mgr.And( s if model.get_value(self.totime(s, first)).is_true() else self.i_mgr.Not(s) for s in h.ts_loc_symbs) assert val in locval2idx x_loc_idxs.append(locval2idx[val]) hints_steps = [[] for _ in range(first, last)] hints_rfs = [] last_rf = None last_rf_start_idx = None for curr, step in zip(hints_steps, range(first, last)): # fill curr with info of active_hints loc_idxs = x_loc_idxs x_loc_idxs = [] assert len(active_hints) == len(locval2idx_lst) assert len(active_hints) == len(loc_idxs) for h, locval2idx, loc_idx in zip(active_hints, locval2idx_lst, loc_idxs): # find location of h at next step val = self.i_mgr.And( s if model.get_value(self.totime(s, step + 1)).is_true() else self.i_mgr.Not(s) for s in h.ts_loc_symbs) assert val in locval2idx x_loc_idx = locval2idx[val] assert isinstance(x_loc_idx, int) assert 0 <= x_loc_idx < len(h) x_loc_idxs.append(x_loc_idx) trans_type = None is_ranked = False if model.get_value(self.totime(h.t_is_stutter, step)).is_true(): trans_type = TransType.STUTTER if h[loc_idx].rf is not None: rf_pred = self.totime(h[loc_idx].rf.is_ranked, step) is_ranked = model.get_value(rf_pred).is_true() elif model.get_value(self.totime(h.t_is_ranked, step)).is_true(): trans_type = TransType.RANKED is_ranked = True rf = h[loc_idx].rf assert rf is not None if model.get_value(self.totime(self.i_mgr.Not(rf.is_ranked), step + 1)).is_true(): if not last_rf: assert last_rf_start_idx is None last_rf = rf last_rf_start_idx = step - first assert last_rf is not None assert last_rf_start_idx is not None assert 0 <= last_rf_start_idx <= step - first hints_rfs.append((last_rf, last_rf_start_idx, step - first)) last_rf = None last_rf_start_idx = None else: assert last_rf is None or last_rf == rf last_rf = rf last_rf_start_idx = step - first + 1 else: assert model.get_value(self.totime(h.t_is_progress, step)).is_true() trans_type = TransType.PROGRESS curr.append((loc_idx, is_ranked, trans_type)) if __debug__: assert step < last # check model is in the identified restricted region. formula = self.totime(h[loc_idx].region, step) assert model.get_value(formula).is_true() formula = self.totime(h[loc_idx].assume, step) assert model.get_value(formula).is_true() formula = self.totime(h[x_loc_idx].region, step + 1) assert model.get_value(formula).is_true() formula = self.totime(h[x_loc_idx].assume, step + 1) assert model.get_value(formula).is_true() # check that the identified transition holds in model. if trans_type == TransType.STUTTER: assert x_loc_idx == loc_idx trans = h[loc_idx].stutterT formula = self.totime(trans, step) assert model.get_value(formula).is_true() if h[loc_idx].rf is not None: rf = h[loc_idx].rf.expr formula = self.i_mgr.Equals(self.totime(rf, step), self.totime(rf, step + 1)) assert model.get_value(formula).is_true() elif trans_type == TransType.RANKED: assert h[loc_idx].rf is not None assert x_loc_idx == loc_idx trans = h[loc_idx].rankT formula = self.totime(trans, step) assert model.get_value(formula).is_true() formula = self.totime(h[loc_idx].rf.progress_pred(), step) assert model.get_value(formula).is_true() else: assert trans_type == TransType.PROGRESS assert x_loc_idx in h[loc_idx].dsts trans = self.totime(h[loc_idx].progress(x_loc_idx), step) assert model.get_value(trans).is_true() if h[x_loc_idx].rf is not None: ranked = self.totime( self.i_mgr.Not(h[loc_idx].rf.is_ranked), step) assert model.get_value(ranked).is_true() # end debug return active_hints, hints_steps, hints_rfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_states(self):\n return self.get_next_states()", "def get_all_states(self):\n return tuple(self._transition_probs.keys())", "def next_states(self):\n return self._states[1:]", "def get_reward_states(self):\n state1 = State(7, 7)\n return [state1]", "def get_states...
[ "0.6335304", "0.6253233", "0.6187453", "0.60887307", "0.6078322", "0.606271", "0.6043362", "0.60380375", "0.59895456", "0.59725285", "0.5967622", "0.5963157", "0.59513724", "0.59513724", "0.58921933", "0.58813024", "0.5870557", "0.58609515", "0.58528286", "0.58261985", "0.578...
0.0
-1
Build dictionary from predicates to the corresponding truth assignment as prescribed by the selected hints.
def _hint_comp2assume(self, hints: List[Hint], steps: List[List[Tuple[int, bool, TransType]]], first: int) -> Tuple[FrozenSet[FNode], FrozenSet[FNode]]: assert all(isinstance(h, Hint) for h in hints) assert all(isinstance(s, list) for s in steps) assert all(len(s) == len(hints) for s in steps) assert all(isinstance(s, tuple) for step in steps for s in step) assert all(len(s) == 3 for step in steps for s in step) assert all(isinstance(s[0], int) for step in steps for s in step) assert all(isinstance(s[1], bool) for step in steps for s in step) assert all(isinstance(s[2], TransType) for step in steps for s in step) assert isinstance(first, int) assert first >= 0 if len(hints) == 0: return frozenset(), frozenset() def assign_true(pred: FNode, res: Set[FNode]): assert isinstance(pred, FNode) assert isinstance(res, set) preds = [pred] while preds: pred = preds.pop() if pred.is_and(): preds.extend(pred.args()) elif pred.is_not(): assign_false(pred.arg(0), res) elif not pred.is_true(): assert not pred.is_false() res.add(self.cn(pred)) def assign_false(pred: FNode, res: Set[FNode]): assert isinstance(pred, FNode) assert isinstance(res, set) preds = [pred] while preds: pred = preds.pop() if pred.is_or(): preds.extend(pred.args()) elif pred.is_not(): assign_true(pred.arg(0), res) elif not pred.is_false(): assert not pred.is_true() if pred.is_lt() or pred.is_le(): res.add(self.cn(not_rel(self.i_env, pred))) else: res.add(self.cn(self.i_mgr.Not(pred))) res_regions_trans: Set[FNode] = set() res_assumes: Set[FNode] = set() for step_idx, step in enumerate(steps): c_time = step_idx + first x_step_idx = (step_idx + 1) % len(steps) for hint_idx, (hint, (loc_idx, is_ranked, trans_t)) in enumerate( zip(hints, step)): assert isinstance(hint, Hint) assert isinstance(loc_idx, int) assert isinstance(trans_t, TransType) loc = hint[loc_idx] assign_true(self.totime(loc.region, c_time), res_regions_trans) assign_true(self.totime(loc.assume, c_time), res_assumes) if loc.rf is not None: if is_ranked: assign_true(self.totime(loc.rf.is_ranked, c_time), res_regions_trans) else: assign_false(self.totime(loc.rf.is_ranked, c_time), res_regions_trans) x_loc_idx = steps[x_step_idx][hint_idx][0] assert isinstance(x_loc_idx, int) if trans_t == TransType.PROGRESS: trans = loc.progress(x_loc_idx) elif trans_t == TransType.STUTTER: trans = loc.stutterT else: assert trans_t == TransType.RANKED trans = loc.rankT assert trans is not None assert isinstance(trans, FNode) assert not trans.is_false() assert trans in self.i_mgr.formulae.values() assign_true(self.totime(trans, c_time), res_regions_trans) assert all(self.cn(p) == p for p in res_regions_trans) assert all(self.cn(p) == p for p in res_assumes) return frozenset(res_regions_trans), frozenset(res_assumes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildPredicateHash(self, subject):\n properties = {}\n for s,p,o in self.store.triples((subject, None, None)):\n oList = properties.get(p, [])\n oList.append(o)\n properties[p] = oList\n return properties", "def predicate_nodes(self) -> Dict[str, Dict[str...
[ "0.62115085", "0.5905862", "0.54716694", "0.5459201", "0.54483634", "0.5425386", "0.53802824", "0.5355178", "0.5344006", "0.53094596", "0.5296737", "0.52827144", "0.52518475", "0.5238643", "0.5196496", "0.5182272", "0.51804197", "0.51510084", "0.5123071", "0.51040995", "0.508...
0.4805963
52
Stores in a random location in the Linked list
def add(self, item): if self.count == 0: random_location = 0 else: random_location = random.randint(0, self.count - 1) self.insert(Node(item), random_location)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_location(self):\n return random.choice(self.locations_list)", "def random_pos(self, ):\n self.pos_item['needle'] = self.shuffle_pos()\n self.pos_item['ether'] = self.shuffle_pos()\n self.pos_item['tube'] = self.shuffle_pos()", "def randVacantPoint(L):\n pliste = vacant...
[ "0.6553104", "0.6297904", "0.6075966", "0.5918239", "0.58364403", "0.5792438", "0.57132536", "0.57105196", "0.56871873", "0.56744456", "0.56678665", "0.56591904", "0.5640225", "0.5628301", "0.5613032", "0.5598745", "0.5593619", "0.5581534", "0.5567704", "0.55652964", "0.55643...
0.7000383
0
saw online, a lineartimegrowth algorithm
def findRelativeRanks(nums): compare_lst = copy.deepcopy(nums) compare_lst.sort(reverse=True) for i in nums: compare_index = compare_lst.index(i) nums_index = nums.index(i) if compare_index > 2: nums[nums_index] = str(compare_index + 1) elif compare_index == 0: nums[nums_index] = 'Gold Medal' elif compare_index == 1: nums[nums_index] = 'Silver Medal' else: nums[nums_index] = 'Bronze Medal' return nums
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_trajectory():\n pass", "def M_D_1(arrival_time,max_time,service_time=1/90):\n #conversion in seconds\n max_seconds = max_time*60*60\n sim_time = 0.0 # simulation time\n t_1 = 0.0 # time for next event (arrival)\n t_2 = max_seconds # time for next event (departure)\n t_n = 0.0 #la...
[ "0.5885385", "0.5741723", "0.5741651", "0.5738419", "0.5645377", "0.55954784", "0.5511052", "0.5502609", "0.54691374", "0.5444442", "0.5437097", "0.5412522", "0.53915256", "0.53911185", "0.53891474", "0.5327796", "0.5316556", "0.5316556", "0.52812976", "0.5263162", "0.5256186...
0.0
-1
read in raw data from Matlab file
def read_mat(filename, offset, count_read, init_flag): import numpy as np import os import sys from scipy.io import loadmat import logging sample_size = 2*2 if (init_flag == 1): logging.debug('Reading matlab file with init_flag == 1') f = loadmat(filename) # Read in the frequency in Hz fs = np.squeeze(f['fs']) ecg = np.squeeze(f['ecg']) pp = np.squeeze(f['pp']) # Determine the file size in bytes (1 byte = 8 bits) file_size = (1+len(ecg)+len(pp))*2 data_info = np.array([file_size, fs]) return(data_info) else: logging.debug('Reading matlab file with init_flag == 0') # Adjust the offset offset = int(offset/sample_size) try: f = loadmat(filename) ecg = np.squeeze(f['ecg']) pp = np.squeeze(f['pp']) ecg_data = ecg[offset:offset+count_read] pp_data = pp[offset:offset+count_read] data = np.zeros(len(ecg_data) + len(pp_data)) data[0::2] = ecg_data data[1::2] = pp_data # plt.plot(ecg) # plt.show(block=True) except EOFError: logging.error('Reached end of input file, can not read another ' 'block') print('Finished processing all data...') print('Heart Rate Monitor Finished') sys.exit() # If any Nan's are present, convert to 0 data = np.nan_to_num(data) return(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_raw(rawfile, shape, dtype=np.uint16, kind='middleton'):\n\n # -- alert\n print(\"READ_RAW: reading {0}...\".format(rawfile))\n\n\n # -- read file\n if kind=='middleton':\n return np.fromfile(open(rawfile),dtype) \\\n .reshape(shape[2],shape[0],shape[1])[:,:,::-1] \\\n ...
[ "0.72182655", "0.7005095", "0.6830073", "0.67079604", "0.6699259", "0.66830707", "0.66713506", "0.65715826", "0.6527353", "0.6522386", "0.64666754", "0.6460394", "0.6455985", "0.6454096", "0.6370719", "0.6363364", "0.6317326", "0.6306467", "0.6299659", "0.6298647", "0.6292569...
0.61785805
40
Gets a plane from 3 points
def plane_equation(p1, p2, p3): a1 = p2[0] - p1[0] b1 = p2[1] - p1[1] c1 = p2[2] - p1[2] a2 = p3[0] - p1[0] b2 = p3[1] - p1[1] c2 = p3[2] - p1[2] a = b1 * c2 - b2 * c1 b = a2 * c1 - a1 * c2 c = a1 * b2 - b1 * a2 # Points are collinear if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6): return None # All clear d = (- a * p1[0] - b * p1[1] - c * p1[2]) return a, b, c, d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def project_3d_points_to_plane(points, p1, p2 ,p3, numpoints):\n p1 = np.asarray(p1)\n p2 = np.asarray(p2)\n p3 = np.asarray(p3)\n\n # get vectors in plane\n v1 = p3 - p1\n v2 = p2 - p1\n\n # compute cross product\n cp = np.cross(v1, v2)\n a, b, c = cp # normal to plane is ax + by + cz\n...
[ "0.75500053", "0.75069356", "0.7475429", "0.7409829", "0.7394293", "0.72587293", "0.70594555", "0.6911756", "0.68775445", "0.6856724", "0.68564415", "0.68380785", "0.68015355", "0.68015355", "0.67951053", "0.67129314", "0.6660467", "0.66463685", "0.6608463", "0.6591655", "0.6...
0.7128078
6
Returns distance from the point to the plane
def distance_to_plane(plane, pt): if plane is None: return None d = abs((plane[0] * pt[0] + plane[1] * pt[1] + plane[2] * pt[2] + plane[3])) e = (math.sqrt(plane[0] * plane[0] + plane[1] * plane[1] + plane[2] * plane[2])) # Not the best assumption, but will work for the task. if abs(e) < 1e-10: return 1e10 return d / e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distanceTo(self, point):\n return np.linalg.norm([self.x - point.x, self.y - point.y, self.z - point.z])", "def distance_point_plane(point, plane):\n base, normal = plane\n vector = subtract_vectors(point, base)\n return fabs(dot_vectors(vector, normal))", "def plane_distance(p, plane):\n ...
[ "0.8156893", "0.8134189", "0.7893171", "0.7741359", "0.7599741", "0.759446", "0.749491", "0.7362298", "0.73259467", "0.7189307", "0.7166196", "0.71618855", "0.71567243", "0.7114216", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.7011797", "0.7011797", ...
0.70035726
22
Check if more than 50% of the points match the condition.
def points_match(plane, p, points, threshold): match = 0 for point in points: if distance_to_plane(plane, point) <= p: match += 1 if match >= threshold: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_check(self):\n if sum(x >= y for x, y in zip(self.elbos[-100:], self.elbos[-99:])) > 50 and\\\n self.elbos[-1] - self.elbos[-100] < 1e-3*abs(self.elbos[-100]):\n return True", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n ...
[ "0.64471173", "0.63462853", "0.6216969", "0.62000114", "0.61590135", "0.6073671", "0.60690844", "0.60135484", "0.5999499", "0.59925807", "0.59915054", "0.5986727", "0.59851974", "0.5972579", "0.5971275", "0.5952998", "0.59379154", "0.5935169", "0.59073", "0.58985734", "0.5869...
0.59552014
15
Gets the exact of the points which falls into the "condition" (50% or more)
def points_percentage(plane, p, points, total): match = 0 for point in points: if distance_to_plane(plane, point) <= p: match += 1 return match / total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bisecter(func, step=0.1):\n points = list(func.points(step))\n area = sum(map(lambda p: p[1], points))\n\n current = 0.\n for x, y in points:\n current += y\n if current >= area / 2:\n return x", "def condition_bounds(self) -> Tuple[float, float]:\n raise NotImplem...
[ "0.6175329", "0.60130394", "0.5999688", "0.5822206", "0.5822206", "0.5821196", "0.5762961", "0.5731761", "0.572735", "0.5670634", "0.5668073", "0.5665829", "0.56631714", "0.56522", "0.5645953", "0.5621847", "0.5578821", "0.55761683", "0.5570446", "0.55677325", "0.55677325", ...
0.0
-1
Running functional tests for multiple parameters/fixtures.
def test_sequence(self, output, input_): input_ = "\n".join(input_) g = Genes(input_) s = Sequence(genes=g, ages=g.size) s.run() self.assertEquals(s.population.get_survivor(Sequence.IMPOSSIBLE), output)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tests():", "def run_tests(tests):\n return [test(t) for t in tests]", "def runTests(self):\n \n pass", "def pytest_generate_tests(metafunc):\n for param in ['env', 'browser', 'logging_level', 'env_file', 'name', 'jenkins_url', 'slack', 'output', 'email_retries',\n 'email_...
[ "0.6993551", "0.69358134", "0.6821566", "0.68078095", "0.6793133", "0.6716086", "0.66775966", "0.6666555", "0.6655967", "0.66501856", "0.66326547", "0.66166264", "0.66117173", "0.6609923", "0.6597693", "0.65284544", "0.6498083", "0.6488252", "0.64796436", "0.6477362", "0.6452...
0.0
-1
Returns true if c is a printable character. We do this by checking for ord value above 32 (space), as well as CR (\r), LF (\n) and tab (\t)
def is_printable(c): return ord(c)>=32 or c in ['\r','\n', '\t']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_printable(s):\n for c in s:\n if c not in PRINTABLE_CHARACTERS:\n return False\n return True", "def is_printable(b):\n return b in e(string.printable)", "def is_p4d_printable(c):\n if ord(c) < 0x20:\n return False\n if ord(c) == 0x7F:\n return False\n re...
[ "0.77798957", "0.7536381", "0.7419245", "0.7221991", "0.718354", "0.7084998", "0.7084998", "0.70768076", "0.70703983", "0.6941782", "0.6763601", "0.66158307", "0.66158307", "0.6564988", "0.6561958", "0.6391701", "0.6390051", "0.6385484", "0.63393587", "0.63295555", "0.6323699...
0.89795405
0
Filter control characters out of the string buf, given a list of control codes that represent backspaces, and a regex of escape sequences. backspaces are characters emitted when the user hits backspace. This will probably vary from terminal to terminal, and this list should grow as new terminals are encountered. escape_regex is a Regex filter to capture all escape sequences.
def sanitize(buf, backspaces=['\x08\x1b[K', '\x08 \x08'], escape_regex=re.compile(r'\x1b(\[|\]|\(|\))[;?0-9]*[0-9A-Za-z](.*\x07)?')): # Filter out control characters # First, handle the backspaces. for backspace in backspaces: try: while True: ind = buf.index(backspace) buf = ''.join((buf[0:ind-1],buf[ind+len(backspace):])) except: pass strip_escapes = escape_regex.sub('',buf) # strip non-printable ASCII characters clean = ''.join([x for x in strip_escapes if is_printable(x)]) return clean
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def escape(self, text, escape_chars):\n _bs = \"\\\\\"\n # backslash is always escaped\n text = text.replace(_bs, _bs * 2)\n for _el in escape_chars:\n assert _el != _bs, \"Backslash has been already escaped\"\n text = text.replace(_el, _bs + _el)\n return t...
[ "0.49353287", "0.49150157", "0.49105355", "0.46912605", "0.4635989", "0.4623271", "0.4604767", "0.4595183", "0.45459825", "0.4540293", "0.45323464", "0.450759", "0.44997773", "0.44368735", "0.44090384", "0.44047463", "0.43948525", "0.43837532", "0.43768844", "0.43508714", "0....
0.6139948
0
Signal handler that gets installed
def signal_handler(self,sig,data): self.resize_child_window()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_signal_handler():\n signal.signal(signal.SIGUSR1, sig_handler)\n signal.signal(signal.SIGTERM, term_handler)\n #logger.warning(\"Signal handler installed.\")", "def signal(self, args):\n pass", "def _signal_handler(*_: typing.Any) -> None:\n shutdown_event.set()", "def _signal_han...
[ "0.81946003", "0.7705526", "0.76633954", "0.75827134", "0.74916065", "0.742279", "0.71166563", "0.71097434", "0.70967144", "0.70619386", "0.70392495", "0.6911852", "0.69012845", "0.6881658", "0.6872002", "0.6856131", "0.6803024", "0.6799461", "0.6750118", "0.6702888", "0.6686...
0.0
-1
Tells the child process to resize its window
def resize_child_window(self): s = struct.pack('HHHH', 0, 0, 0, 0) x = fcntl.ioctl(0,termios.TIOCGWINSZ,s) fcntl.ioctl(self.child_fd,termios.TIOCSWINSZ,x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def resizeEvent(self, event):\n self.resized.emit()\n return super(PiWndow, self).resizeEvent(event)", "def signal_handler(self...
[ "0.6948274", "0.6908833", "0.67389023", "0.6696905", "0.6490529", "0.6470183", "0.6419227", "0.64030665", "0.6388671", "0.63874537", "0.6313409", "0.6285529", "0.6282814", "0.62690467", "0.62603426", "0.62542343", "0.6253225", "0.6249994", "0.62148416", "0.61682737", "0.61676...
0.7933913
0
Launch the appropriate shell as a login shell It will be either bash or tcsh depending on what the user is currently running. It checks the SHELL variable to figure it out.
def run_shell(): shell = get_shell() if shell not in ['bash','tcsh']: raise ValueError, "Unsupported shell (only works with bash and tcsh)" os.execvp(shell,(shell,"-l"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loginShell(self, shell=None):\n\n\t\tif shell is None:\n\t\t\traise exceptions.BadArgumentError(\n\t\t\t\t_(u'You must specify a shell'))\n\n\t\tif shell not in LMC.configuration.users.shells:\n\t\t\t\traise exceptions.BadArgumentError(_(u'Invalid shell \"{0}\". '\n\t\t\t\t\t'Valid shells are {1}.').format(sty...
[ "0.6784456", "0.6614977", "0.6381293", "0.6362046", "0.6199895", "0.6197317", "0.6101484", "0.60623527", "0.60271186", "0.59255606", "0.58501774", "0.5833172", "0.5826648", "0.5798579", "0.57554454", "0.56577605", "0.5635898", "0.56230944", "0.55727273", "0.55245143", "0.5498...
0.69821674
0
Retrieve the name of the directory that will store the logfiles. If the SHELLLOGGERDIR environment variable is set, use that. Otherwise, default to ~/.shelllogger
def get_log_dir(): env_var = "SHELLLOGGERDIR" if os.environ.has_key(env_var): return os.environ[env_var] else: return os.path.expanduser('~/.shelllogger')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')", "def log_dir():\r\n if LogOptions._LOG_DIR is None:\r\n LogOptions._LOG_DIR = app.get_options().twitter_common_log_log_dir\r\n return LogOptions._LOG_DIR", "def get_logging_dir(self):\n ...
[ "0.7954862", "0.7615036", "0.75734735", "0.7549197", "0.7508237", "0.74224484", "0.7221798", "0.71972513", "0.7154093", "0.7087117", "0.706262", "0.6988383", "0.6986975", "0.69751024", "0.68904305", "0.6860191", "0.6823196", "0.67315704", "0.6727358", "0.6695155", "0.6671013"...
0.88646066
0
Call when session is complete. Returns the name of the XML file
def done(self): self.logfile.write("]]></result>\n</cli-logger-entry>\n</cli-logger>\n") self.logfile.close() if self.debugfilename is not None: self.debugfile.write("</cli-debug>") return self.raw_to_xml()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()", "def get_filename(self) -> s...
[ "0.5961969", "0.55481595", "0.5306143", "0.5272178", "0.52593595", "0.52341264", "0.5226595", "0.51102614", "0.5057698", "0.5023233", "0.49440277", "0.4889504", "0.48876208", "0.48515698", "0.48515698", "0.48138982", "0.47858512", "0.47524884", "0.47405386", "0.47252148", "0....
0.49102354
11
Convert the .raw file, with illegal characters and escape keys, to a proper XML version. Returns the name of the XML file
def raw_to_xml(self): xmlfilename = self.logfilename.replace('.raw','.xml') fout = codecs.open(xmlfilename, encoding="utf-8", mode="w") for line in codecs.open(self.logfilename,encoding="utf-8"): fout.write(sanitize(line)) fout.close() return xmlfilename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sanitizeXML(filename):\n #we have to remove all illegal characters from crossref xml\n full_path = os.path.abspath(filename)\n path, filename = os.path.split(full_path)\n with open(full_path, 'r') as in_file:\n with open(os.path.join(path,\"tmp\"+filename), 'w') as out_file:\n for...
[ "0.63195187", "0.59571916", "0.590457", "0.5790034", "0.56660265", "0.5458497", "0.53709275", "0.5369985", "0.5312219", "0.5296952", "0.5284404", "0.52644795", "0.52433074", "0.5239629", "0.5220292", "0.5200755", "0.5199187", "0.51793045", "0.5175041", "0.51562613", "0.514544...
0.76060027
0
Record to the debug log
def debug_log(self, buf, shell): # Handle Shell output if shell == True: self.debugfile.write("<shell time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" ) self.debugfile.write("<![CDATA["+buf+"]]></shell>\n") # Handle User Input else: self.debugfile.write("<user time=\" " + datetime.datetime.now().strftime("%H:%M:%S ") + "\" >" ) self.debugfile.write("<![CDATA["+buf+"]]></user>\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug(self, tag, message, exc_info=False): \n \n self.log(logging.debug,tag, message, exc_info)", "def debug(self, msg):\r\n self.logger.debug(msg)", "def debug_log(self, msg, *args, **kwargs):\n if self.debug:\n self.log.debug(msg, *args, **kwargs)", "def logdebug(s...
[ "0.77450716", "0.7598348", "0.7553728", "0.7524138", "0.75175226", "0.7485843", "0.7438288", "0.7433718", "0.7425045", "0.7416185", "0.74065304", "0.73936254", "0.7383948", "0.7310336", "0.72510886", "0.7248811", "0.7204248", "0.7197199", "0.71952343", "0.71625316", "0.715215...
0.0
-1
Strip all control characters and nonUTF8 characters from a file. Prints the output to standard out
def sanitize_file(infilename, outfilename): fout = codecs.open(outfilename, encoding="utf-8", mode="w") for line in codecs.open(infilename, encoding="utf-8"): fout.write(sanitize(line))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sanitize_characters(raw_input_file, clean_output_file):\n input_file = codecs.open(raw_input_file, 'r', encoding='ascii', errors='ignore')\n output_file = open(clean_output_file, 'w', encoding='ascii', errors='ignore')\n\n for line in input_file:\n # removes extra newline\n line = line.r...
[ "0.73079276", "0.68050337", "0.638918", "0.6264593", "0.6186959", "0.61030376", "0.6031194", "0.59303033", "0.5906998", "0.5861738", "0.58548003", "0.5850855", "0.58266735", "0.58138204", "0.5802653", "0.5782039", "0.57783765", "0.572407", "0.57045716", "0.5688748", "0.568763...
0.6622482
2
get the report template
def _report_template(): current_dir = Path(__file__).parent with open(current_dir / "report_template.html", "r") as f: template = f.read() template = re.sub(r"\s{2,}", " ", template) template = re.sub(r"\n", "", template) template = re.sub(r"> <", "><", template) return template
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_template(self):\n try:\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_code = Path(f'{template_path}/{self._get_template_filename()}').read_text()\n # substitute template parts\n template_code = self._substitute_template_parts(...
[ "0.76926655", "0.73982304", "0.7120592", "0.70143956", "0.6962399", "0.6962399", "0.6962399", "0.6943154", "0.68456966", "0.6840729", "0.6796202", "0.6777228", "0.67390454", "0.6727346", "0.6701621", "0.6578594", "0.6480725", "0.6409205", "0.6401734", "0.63737816", "0.6356212...
0.74037963
1
Render exception_data as an html report
def render_exception_html(exception_data, report_template=None): report_template = report_template or _report_template() jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), extensions=["jinja2.ext.autoescape"]) exception_data["repr"] = repr return jinja_env.from_string(report_template).render(exception_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_exception(self, exception_trace=''):\n txt = [80 * '*', '\\n', BANNER, '\\n', 80 * '*', '\\n', '\\n', '\\n']\n txt.extend(exception_trace)\n cherrypy.response.headers['Content-Type'] = 'text/plain'\n return as_bytes(txt)", "def create_exception_report(exc_type, exc_value, ...
[ "0.6812304", "0.6662681", "0.6421623", "0.6406252", "0.6321645", "0.63113713", "0.61840075", "0.6142265", "0.6085713", "0.6048839", "0.59619063", "0.5947996", "0.5916094", "0.5889431", "0.58686614", "0.5866581", "0.5849284", "0.58273417", "0.5797272", "0.57954353", "0.5734677...
0.7821494
0
Render exception_data as a json object
def render_exception_json(exception_data): return json.dumps(exception_data, default=_json_serializer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exceptionhandler(e):\n response = e.get_response()\n response.data = json.dumps({\n \"code\" : e.code,\n \"name\": e.name,\n \"description\": e.description\n })\n response.content_type = \"application/json\"\n\n return response", "def jsonif...
[ "0.72541064", "0.67585784", "0.6713111", "0.6690408", "0.66427636", "0.66312206", "0.65327764", "0.65234625", "0.6517418", "0.6510992", "0.64126235", "0.6402634", "0.63840944", "0.63827544", "0.63217765", "0.6246733", "0.62432253", "0.61892617", "0.6140482", "0.6116676", "0.6...
0.9000337
0
JSON serializer for objects not serializable by default json code
def _json_serializer(obj): if isinstance(obj, (datetime, date)): return obj.isoformat(sep=" ") if isinstance(obj, (types.TracebackType, TracebackFrameProxy)): return "<Traceback object>" return saferepr(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')", "def jsonify(obj):\n raise NotImplementedError", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def serialize(self, obj):\n return json.dumps(obj)", "def json_fr...
[ "0.8023734", "0.79166824", "0.7667077", "0.7573799", "0.7536935", "0.7529485", "0.74354947", "0.73595595", "0.73340595", "0.73321056", "0.7332048", "0.7322426", "0.7223231", "0.722149", "0.72154963", "0.72111374", "0.72008944", "0.72008944", "0.7184688", "0.7184688", "0.71846...
0.6736821
71
Return a dictionary containing exception information. if exc_type, exc_value, and tb are not provided they will be supplied by sys.exc_info()
def get_exception_data(exc_type=None, exc_value=None, tb=None, get_full_tb=False, max_var_length=4096 + 2048): head_var_length = int(max_var_length / 2) tail_var_length = max_var_length - head_var_length if not tb: exc_type, exc_value, tb = sys.exc_info() frames = get_traceback_frames(exc_value=exc_value, tb=tb, get_full_tb=get_full_tb) for i, frame in enumerate(frames): if "vars" in frame: frame_vars = [] for k, v in frame["vars"]: try: v = pformat(v) except Exception as e: try: v = saferepr(e) except Exception: v = "An error occurred rendering the exception of type: " + repr(e.__class__) # The force_escape filter assume unicode, make sure that works if isinstance(v, bytes): v = v.decode("utf-8", "replace") # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > max_var_length: v = f"{v[0:head_var_length]}... \n\n<trimmed {len(v)} bytes string>\n\n ...{v[-tail_var_length:]}" frame_vars.append((k, escape(v))) frame["vars"] = frame_vars frames[i] = frame unicode_hint = "" if exc_type and issubclass(exc_type, UnicodeError): start = getattr(exc_value, "start", None) end = getattr(exc_value, "end", None) if start is not None and end is not None: unicode_str = exc_value.args[1] unicode_hint = force_text(unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))], "ascii", errors="replace") try: unicode_hint.encode("utf8") except UnicodeEncodeError: unicode_hint = unicode_hint.encode("utf8", "surrogateescape") c = { "unicode_hint": unicode_hint, "frames": frames, "sys_executable": sys.executable, "sys_version_info": "%d.%d.%d" % sys.version_info[0:3], "server_time": datetime.now(timezone.utc), "sys_path": sys.path, "platform": platform.uname()._asdict(), } # Check whether exception info is available if exc_type: c["exception_type"] = exc_type.__name__ if exc_value: c["exception_value"] = force_text(exc_value, errors="replace") if frames: c["lastframe"] = frames[-1] return c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exc_info(self):\n return self._exc_info", "def __exc_info(self):\n exctype, excvalue, tb = sys.exc_info()\n if sys.platform[:4] == 'java': ## tracebacks look different in Jython\n return (exctype, excvalue, tb)\n return (exctype, excvalue, tb)", "def exc_info(self):\n...
[ "0.77512074", "0.7694684", "0.76201236", "0.72803456", "0.70208013", "0.668976", "0.66021264", "0.64593", "0.64557695", "0.6422902", "0.6302152", "0.6271954", "0.61457765", "0.60565907", "0.60373074", "0.60152197", "0.5867774", "0.5857275", "0.58364946", "0.58230966", "0.5801...
0.6381562
10
Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context).
def get_lines_from_file(filename, lineno, context_lines, loader=None, module_name=None): source = None if loader is not None and hasattr(loader, "get_source"): with suppress(ImportError): source = loader.get_source(module_name) if source is not None: source = source.splitlines() if source is None: with suppress(OSError, IOError): with open(filename, "rb") as fp: source = fp.read().splitlines() if source is None: return None, [], None, [] try: # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = "ascii" for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br"coding[:=]\s*([-\w.]+)", line) if match: encoding = match.group(1).decode("ascii") break source = [str(sline, encoding, "replace") for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1 : upper_bound] return lower_bound, pre_context, context_line, post_context except Exception as e: try: context_line = f'<There was an error displaying the source file: "{repr(e)}" The loaded source has {len(source)} lines.>' except Exception: context_line = "<There was an error displaying the source file. Further, there was an error displaying that error>" return lineno, [], context_line, []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_lines_from_file(filename, lineno, context_lines):\n\n try:\n source = open(filename).readlines()\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = \\\n [line.strip('\\n') for line in source[lower_bound:lineno]]...
[ "0.7550956", "0.66815025", "0.635524", "0.60288763", "0.56448764", "0.5592623", "0.55494016", "0.55332536", "0.5494302", "0.5492887", "0.5485184", "0.5472898", "0.5435706", "0.54235774", "0.54235774", "0.54235774", "0.54235774", "0.54235774", "0.54235774", "0.5400774", "0.539...
0.68244785
1
Create an exception report and return its location
def create_exception_report(exc_type, exc_value, tb, output_format, storage_backend, data_processor=None, get_full_tb=False): exception_data = get_exception_data(exc_type, exc_value, tb, get_full_tb=get_full_tb) if data_processor: exception_data = data_processor(exception_data) if output_format == "html": text = render_exception_html(exception_data) elif output_format == "json": text = render_exception_json(exception_data) else: raise TypeError("Exception report format not correctly specified") filename = gen_error_filename(extension=output_format) report_location = storage_backend.write(filename, text) return report_location
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_exception(self, msg: str):", "def formatReport(cls, instance, trcback, context=1):\n\n\theader = []\n\theader.append(\"Exception in '{0}'.\".format(getInnerMostFrame(trcback).f_code.co_name))\n\theader.append(\"Exception class: '{0}'.\".format(cls.__name__))\n\theader.append(\"Exception description: '...
[ "0.61463684", "0.5946372", "0.59041274", "0.577588", "0.57041526", "0.569233", "0.5647634", "0.56435865", "0.5620892", "0.5555634", "0.54271317", "0.5426168", "0.5407387", "0.5407387", "0.538084", "0.53600556", "0.53557533", "0.533768", "0.5310631", "0.5296912", "0.5202553", ...
0.6724956
0
Create the root node of the BST.
def __init__(self, name): debug.printMsg("We Initiated a BST with no root node") self.name = name self.root = None self.size = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_root(self):\n try:\n _check_call(_LIB.TreeliteTreeBuilderSetRootNode(\n self.tree.handle,\n ctypes.c_int(self.node_key)))\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node mu...
[ "0.7032419", "0.6934794", "0.68248075", "0.67966664", "0.6757742", "0.6755082", "0.67382634", "0.67275465", "0.67275465", "0.6668411", "0.6649478", "0.66437757", "0.6606337", "0.6603094", "0.6603094", "0.65813065", "0.65738034", "0.6492516", "0.6490673", "0.6471958", "0.64572...
0.6229294
29
Returns the length of the BST
def length(self): return self.length
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def total_len(BST):\r\n if isinstance(BST,tuple):\r\n return total_len(BST[0]) + total_len(BST[1])\r\n else:\r\n return len(BST)", "def size(self) -> int:\n #binary search tree == empty\n if self.root is None:\n return 0\n\n #recursive helper count nodes\n ...
[ "0.78509456", "0.7732251", "0.76259017", "0.7583946", "0.7449236", "0.744216", "0.7385411", "0.7341175", "0.72633183", "0.7180082", "0.7144463", "0.71312356", "0.71181273", "0.7093196", "0.7090866", "0.7089732", "0.70622516", "0.70397943", "0.701053", "0.7006638", "0.6984938"...
0.0
-1
overload the in operator. credit.org
def __contains__(self,key): if self.recursiveLookup(key,self.root): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regular(self):", "def CL(self):", "def exo2():", "def operation(self):\n pass", "def idealOpAmp():", "def two(self):", "def __pow__(self,*args):\r\n pass", "def express(self):\n raise NotImplementedError", "def __pow__(self, *args, **kwargs): # real signature unknown\n pas...
[ "0.6408418", "0.6184584", "0.61502206", "0.6102789", "0.60980195", "0.6068953", "0.59730977", "0.5938999", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", "0.5932693", ...
0.0
-1
internal function returns length
def __len__(self): return self.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__():", "def __len__():", "def __len__():", "def Length(self) -> int:", "def Length(self) -> int:", "def length(self):\n ...", "def size(self) -> int:", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def total_length():\n return...
[ "0.8433408", "0.8433408", "0.8433408", "0.8313579", "0.8313579", "0.8182767", "0.8016835", "0.79593396", "0.793603", "0.789175", "0.7891721", "0.7867253", "0.7867253", "0.7867253", "0.7857477", "0.78541964", "0.78541964", "0.78541964", "0.78541964", "0.78541964", "0.78541964"...
0.0
-1
Allows to override how we insert things
def __setitem__(self,k,v): self.insert(k,v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self):\n pass", "def before_insert(self, obj, st):\n pass", "def on_insert(self) -> None:", "def insert(self, data):\r\n pass", "def after_insert(self, obj, st):\n pass", "def insert_data(self):\n\n pass", "def _insert_op(self, op):", "def DocumentElement...
[ "0.7783864", "0.7460584", "0.7289235", "0.71610194", "0.7091829", "0.6844495", "0.6827948", "0.6589303", "0.65811783", "0.6558969", "0.65067995", "0.63486093", "0.6267669", "0.62151104", "0.6206384", "0.6192331", "0.6132293", "0.60734904", "0.604549", "0.60312724", "0.6014594...
0.5370425
100
This function will insert data into the BST using a log_2(n) algorithm
def insert(self, key, data): debug.printMsg('Insert for "' + key + '" With data: ' + str(data) ) # if there is no root node if not self.root: debug.printMsg("No root was found, create one") self.root = Node(key, data) else: debug.printMsg("Root was found, starting recursive insert") self.recursiveInsert(key, data, self.root) # increment the size of the BST debug.printMsg("Incrementing size of BST") self.size = self.size + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bst_insert(root, data):\n if root is None:\n root = Tree(d=data)\n elif data > root.data:\n root.right = bst_insert(root.right, data)\n else:\n root.left = bst_insert(root.left, data)\n return root", "def _recursive_insert(self, data, node):\n\n\t\t#NOTE - this line prevents ...
[ "0.72828513", "0.71956825", "0.71459913", "0.70616907", "0.69816333", "0.69398624", "0.6857214", "0.6762853", "0.6751353", "0.6740436", "0.672814", "0.6700522", "0.6673687", "0.66709936", "0.6667268", "0.6640333", "0.66368526", "0.6587889", "0.65469646", "0.65461093", "0.6486...
0.7049252
4
This is the main algorithm for insert
def recursiveInsert(self, key, data, curr): debug.printMsg("Entered recursiveInsert") # check if the key is greater than current node key # we will go right debug.printMsg("checking whether we go right or left") if key > curr.key: debug.printMsg("we go right") # now check if there is a right node already debug.printMsg("checking if we have available space") if curr.hasRightChild(): debug.printMsg("nope, calling recursiveInsert again") # well, we're shit out of luck and need to go further self.recursiveInsert(key, data, curr.right) else: debug.printMsg("yep, we'll insert it here") # we found an empty spot curr.right = Node(key, data, curr) else: debug.printMsg("we go left") # now check if there is a left node already if curr.hasLeftChild(): debug.printMsg("checking if we have available space") # well, we're shit out of luck and need to go further self.recursiveInsert(key, data, curr.left) else: # we found an empty spot debug.printMsg("yep, we'll insert it here") curr.left = Node(key, data, curr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _insert_op(self, op):", "def insert(self):\n pass", "def insert(self, data):\r\n pass", "def insert(self, i, x) -> None:\n pass", "def testInsert(self):\n\n for i in xrange(randint(50,150)):\n self.s.insert(i, None)", "def insert_values():\n pass", "def test_insert(s...
[ "0.7743201", "0.7556854", "0.72071356", "0.717223", "0.71497864", "0.697614", "0.6849138", "0.67920786", "0.6714414", "0.6667265", "0.6652461", "0.6634596", "0.65935534", "0.6587159", "0.658314", "0.64836735", "0.6482253", "0.64740515", "0.64148915", "0.6397918", "0.6386229",...
0.0
-1
Gets a specific key from the BST
def lookup(self, key): # check that this tree actually has a root node debug.printMsg("Call made to Lookup") debug.printMsg("checking if we have a BST") if self.root: debug.printMsg("Calling Recursive Lookup") (result, err) = self.recursiveLookup(key, self.root) # if we did not find anything if err: debug.printMsg("Oops, we couldn't find anything") return None else: # we found a result debug.printMsg("we found: ") return result else: debug.printMsg("Oops, the BST seems to not exist") # root doesnt exist return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)", "def search(self, key):\n x = self.root\n\n while x is not self.nil:\n if key == x.key:\n break\n\n if key < x.key:...
[ "0.75607723", "0.7343995", "0.7311506", "0.7305017", "0.7293168", "0.7194743", "0.7154004", "0.7130973", "0.71049505", "0.7073313", "0.704876", "0.70440394", "0.70387834", "0.7038038", "0.70353025", "0.70056677", "0.69532704", "0.6952328", "0.6924428", "0.69083416", "0.687940...
0.7251455
5
Recusrisvely searched the BST using log_2(n) algorithm to find the key is there is
def recursiveLookup(self, key, curr): # basically repeat insert debug.printMsg("Entered recursiveLookup") # if we found a match break debug.printMsg('Checking base condition: ' + key + ' = ' + curr.key) if key == curr.key: debug.printMsg("Success, found") return (curr, None) # if the key is larger than curr elif key > curr.key: debug.printMsg("Nope, now checking if we should go right") debug.printMsg("yep") debug.printMsg("Check if we still have room to search") if curr.hasRightChild(): debug.printMsg("Moving further right") # move onto the next node along the search path return self.recursiveLookup(key, curr.right) else: debug.printMsg("Nope, ran out of search path. bummer") # hit the end and there was no match return (None, True) else: debug.printMsg("Nope, we're going left") debug.printMsg("Check if we still have room to search") if curr.hasLeftChild(): debug.printMsg("Moving further left") return self.recursiveLookup(key, curr.left) else: debug.printMsg("Shit balls, we ran out of search path") return (None, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_search_tree():\n\n class Node(object):\n def __init__(self, key):\n self.left = None\n self.right = None\n self.key = key\n\n def insert(node, key):\n \"\"\" Insertion method for a binary search tree \"\"\"\n # If the tree is empty, return a ne...
[ "0.7437438", "0.7111154", "0.69640565", "0.69203466", "0.6868692", "0.6824069", "0.6769169", "0.6727306", "0.67060405", "0.667904", "0.6678879", "0.65389585", "0.65307343", "0.6508008", "0.6506147", "0.649953", "0.6497162", "0.64918184", "0.6488997", "0.6484715", "0.6478493",...
0.6536112
12
Publish a registration to the core, listing the API commands.
def register_to_core(self): self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cli(ctx, input, output):\n print(\"Registering...\")\n ctx.obj = dict(\n component=Registration,\n input=input,\n output=output,\n stack=ImageStack.from_path_or_url(input),\n )", "def register_routes(self, api):\n # Device Registration\...
[ "0.6083113", "0.6011665", "0.59751356", "0.5912697", "0.5612715", "0.55548126", "0.5551435", "0.5506128", "0.54874337", "0.53894794", "0.5374884", "0.5348596", "0.5333862", "0.53083724", "0.52938396", "0.52787286", "0.52458644", "0.5237489", "0.5214423", "0.52052796", "0.5195...
0.6798816
0
Subscribe to the queue matching the instance's name. Pass the command to the process_command function.
def subscribe_to_commands(self): self.basic_consume(self.process_command, queue=self.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_command(self, cmd):\n self.command_queue.put(cmd)", "def Enqueue(self, command):\n\n self.queue.put(command)", "def command(self, command_string):\n self.__command_queue.append(command_string)", "def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)...
[ "0.63098824", "0.62418693", "0.6216443", "0.6078142", "0.6037645", "0.5994807", "0.59508395", "0.5937547", "0.5917672", "0.5872839", "0.58456135", "0.58215594", "0.5785163", "0.5764144", "0.5762315", "0.5658088", "0.55472076", "0.55459744", "0.5537582", "0.5512194", "0.550374...
0.75445706
0
Call the command(s) that correspond to the message
def process_command(self, ch, method, properties, body): body_json = json.parse(body) for key in body_json: if self.commands.get(key) is not None: self.commands[key](body_json[key])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd(self, message):\n pass", "def execute(self, irc_c, msg, cmd):", "def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the dev...
[ "0.77365535", "0.7425924", "0.70829546", "0.69705456", "0.6875241", "0.6782992", "0.67032224", "0.6695595", "0.6694858", "0.66467553", "0.664355", "0.66403383", "0.6640279", "0.6631685", "0.6593153", "0.65646577", "0.655279", "0.65517825", "0.6549944", "0.6543722", "0.6538904...
0.0
-1
Publishes a created event object to the core.
def publish_event(self, event): self.channel.basic_publish(exchange='', routing_key='peripheral_event', body=json.dumps({self.name: dict(event)}))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_event(self):\n pass", "def signal_creation_event(bb_object):\n global EVENTS\n EVENTS.append( (creation_event, bb_object.data) )", "def publishEvent(eventName,publisher, msg):", "async def createEvent(self, event: Event) -> None:", "def create_event(self, **kwargs):\n eve...
[ "0.71787494", "0.698229", "0.692631", "0.66697675", "0.66172796", "0.6506185", "0.6419534", "0.6369318", "0.6355999", "0.63556665", "0.6323316", "0.63051665", "0.6303744", "0.6295329", "0.6221811", "0.6215113", "0.6196844", "0.61157924", "0.6028132", "0.60246474", "0.6015976"...
0.60949755
18
Run the chat client application loop. When this function exists, the application will stop
def run_chat_client(): while must_run: print_menu() action = select_user_action() perform_user_action(action) print("Thanks for watching. Like and subscribe! 👍")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MainLoop(self):\n self.pleaseQuit=0\n\n self.logger.info(\"Starting main eventloop\")\n try:\n self.irc.process_forever(1)\n except KeyboardInterrupt:\n self.logger.warn(\"Received interrupt, disconnecting from irc\")\n #self.irc.disconnect_all(\"^C ...
[ "0.6931434", "0.688994", "0.68419933", "0.6826242", "0.6802301", "0.67320764", "0.6727489", "0.668575", "0.6628602", "0.6600071", "0.656014", "0.6525707", "0.6512084", "0.6486234", "0.6479219", "0.64297473", "0.6423571", "0.64088786", "0.6408701", "0.6398141", "0.6387875", ...
0.78608006
0
Print the menu showing the available options
def print_menu(): print("==============================================") print("What do you want to do now? ") print("==============================================") print("Available options:") i = 1 for a in available_actions: if current_state in a["valid_states"]: # Only hint about the action if the current state allows it print(" %i) %s" % (i, a["description"])) i += 1 print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n ...
[ "0.83484006", "0.80100524", "0.79167485", "0.79120266", "0.7911782", "0.7821441", "0.7816", "0.7809606", "0.7785762", "0.77756435", "0.77276397", "0.7672742", "0.7655172", "0.75879997", "0.7581685", "0.7477574", "0.74568194", "0.73829365", "0.73222256", "0.7274187", "0.722757...
0.828377
1
Ask the user to choose and action by entering the index of the action
def select_user_action(): number_of_actions = len(available_actions) hint = "Enter the number of your choice (1..%i):" % number_of_actions choice = input(hint) # Try to convert the input to an integer try: choice_int = int(choice) except ValueError: choice_int = -1 if 1 <= choice_int <= number_of_actions: action = choice_int - 1 else: action = None return action
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_action(self):\r\n pass", "def get_action(player):\n print_action(player)\n chosen_action_index = int(input('Please indicate your selection from the following list by inputting the number: '))\n return player.available_actions[chosen_action_index]", "def select_action(self):\n ...
[ "0.77464926", "0.7433682", "0.72635955", "0.7202781", "0.71235913", "0.70796835", "0.70719695", "0.70426613", "0.69958633", "0.69911724", "0.69026655", "0.69006485", "0.68855083", "0.6853275", "0.68034893", "0.67918736", "0.67697215", "0.6714882", "0.6669915", "0.66696125", "...
0.77177477
1
Perform the desired user action
def perform_user_action(action_index): if action_index is not None: print() action = available_actions[action_index] if current_state in action["valid_states"]: function_to_run = available_actions[action_index]["function"] if function_to_run is not None: function_to_run() else: print("Internal error: NOT IMPLEMENTED (no function assigned for the action)!") else: print("This function is not allowed in the current system state (%s)" % current_state) else: print("Invalid input, please choose a valid action") print() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perform_action(self, action_data):\n pass", "def _do_action(self):\n pass", "def _do_action(self):\n pass", "def take_action(self, *args, **kwargs):\r\n pass", "def do_action_for_input(self, user_input):\n if user_input == CommandLineProgram.ACTION.HELP:\n ...
[ "0.77129984", "0.7649776", "0.7649776", "0.7479478", "0.7274967", "0.71987295", "0.7164105", "0.70513093", "0.70129836", "0.69782907", "0.69473976", "0.6920116", "0.6919698", "0.69076014", "0.6898974", "0.6837431", "0.6837431", "0.6832801", "0.6799796", "0.6762275", "0.673282...
0.7465346
4
Expects a `config` with the settings found in pertestcoverage/configs/config_fixed_by_commit_rawdata.yml Throws errors if something is missing, all the settings are listed at the top of the script.
def run(args=None, config=None): if args: parser = AnalysisParser('config') args = parser.parse_analysis_args(args) config = args.config if not config: raise Exception("Missing `config` dict argument.") numpatches = config['numpatches'] changesets_list = config['changesets'] outputdir = config['outputdir'] analyze_all = config['analyze_all'] if 'analyze_all' in config else False mozcentral_path = config['mozcentral_path'] if 'mozcentral_path' in config else None runname = config['runname'] if 'runname' in config else None include_guaranteed = config['include_guaranteed'] if 'include_guaranteed' in config else False use_active_data = config['use_active_data'] if 'use_active_data' in config else False skip_py = config['skip_py'] if 'skip_py' in config else True suites_to_analyze = config['suites_to_analyze'] platforms_to_analyze = config['platforms_to_analyze'] from_date = config['from_date'] timestr = str(int(time.time())) custom_script = config['custom_scheduling'] custom_classname = config['custom_classname'] custom_class = import_class(custom_script, custom_classname) custom_class_obj = custom_class(config) failed_tests_query_json = { "from":"unittest", "where":{ "and":[ {"eq":{"repo.changeset.id12":None}}, {"eq":{"repo.branch.name":None}}, {"eq":{"task.state":"failed"}}, {"eq":{"result.ok":"false"}}, {"or":[ {"regex":{"job.type.name":".*%s.*" % suite}} for suite in suites_to_analyze ]}, {"or": [ {"regex":{"job.type.name":".*%s.*" % platform}} for platform in platforms_to_analyze ]}, ] }, "limit":100000, "select":[{"name":"test","value":"result.test"}] } log.info("Getting FBC entries...") changesets = get_fixed_by_commit_entries( localdata=not use_active_data, activedata=use_active_data, suites_to_analyze=suites_to_analyze, platforms_to_analyze=platforms_to_analyze, from_date=from_date, local_datasets_list=changesets_list, save_fbc_entries=outputdir ) # For each patch histogram1_datalist = [] tests_for_changeset = {} changesets_counts = {} count_changesets_processed = 0 all_changesets = [] for count, tp in enumerate(changesets): if count_changesets_processed >= numpatches: continue if len(tp) == 4: changeset, suite, repo, test_fixed = tp else: continue orig_test_fixed = test_fixed test_fixed = test_fixed.split('ini:')[-1] if 'mochitest' not in suite and 'xpcshell' not in suite: test_fixed = format_testname(test_fixed) changeset = changeset[:12] log.info("") log.info("On changeset " + "(" + str(count) + "): " + changeset) log.info("Running analysis: %s" % str(runname)) log.info("Test name: %s" % test_fixed) # Get patch currhg_analysisbranch = hg_branch(repo) files_url = HG_URL + currhg_analysisbranch + "json-info/" + changeset data = get_http_json(files_url) files_modified = data[changeset]['files'] orig_files_modified = files_modified.copy() # Get tests that use this patch failed_tests_query_json['where']['and'][0] = {"eq": {"repo.changeset.id12": changeset}} failed_tests_query_json['where']['and'][1] = {"eq": {"repo.branch.name": repo}} log.info("Checking for test failures...") all_tests = [] failed_tests = [] try: failed_tests = query_activedata(failed_tests_query_json) except Exception as e: log.info("Error running query: " + str(failed_tests_query_json)) all_failed_tests = [] if 'test' in failed_tests: all_failed_tests = [test for test in failed_tests['test']] if pattern_find(test_fixed, all_failed_tests): log.info("Test was not completely fixed by commit: " + str(test_fixed)) continue log.info("Test was truly fixed. Failed tests: " + str(all_failed_tests)) # Perform scheduling all_tests_not_run = [] returned_data = custom_class_obj.analyze_fbc_entry( (changeset, suite, repo, orig_test_fixed), test_fixed ) if 'skip' in returned_data and returned_data['skip']: continue if not returned_data['success']: all_tests_not_run.append(test_fixed) log.info("Number of tests: " + str(len(all_tests))) log.info("Number of failed tests: " + str(len([test_fixed]))) log.info("Number of files: " + str(len(files_modified))) log.info("Number of tests not scheduled by per-test: " + str(len(all_tests_not_run))) log.info("Tests not scheduled: \n" + str(all_tests_not_run)) cset_count = 1 if changeset not in changesets_counts: changesets_counts[changeset] = cset_count else: changesets_counts[changeset] += 1 cset_count = changesets_counts[changeset] changeset_name = changeset + "_" + str(cset_count) tests_for_changeset[changeset_name] = { 'patch-link': HG_URL + currhg_analysisbranch + "rev/" + changeset, 'numfiles': len(files_modified), 'numtests': len(all_tests), 'numtestsfailed': 1, 'numtestsnotrun': len(all_tests_not_run), 'files_modified': files_modified, 'suite': suite, 'runname': runname, 'orig-test-related': orig_test_fixed, 'test-related': test_fixed, 'testsnotrun': all_tests_not_run, } for entry in returned_data: tests_for_changeset[entry] = returned_data[entry] all_changesets.append(changeset) histogram1_datalist.append((1, 1-len(all_tests_not_run), changeset)) count_changesets_processed += 1 numchangesets = len(all_changesets) total_correct = sum([ 1 if not tests_for_changeset[cset + "_" + str(cset_count)]['testsnotrun'] else 0 for cset in all_changesets ]) log.info("Running success rate = {:3.2f}%".format(float((100 * (total_correct/numchangesets))))) log.info("") ## Save results (number, and all tests scheduled) if outputdir: log.info("\nSaving results to output directory: " + outputdir) timestr = str(int(time.time())) save_json(tests_for_changeset, outputdir, timestr + '_per_changeset_breakdown.json') f = plt.figure() numchangesets = len(all_changesets) total_correct = sum([ 1 if not tests_for_changeset[cset + "_1"]['testsnotrun'] else 0 for cset in all_changesets ]) total_incorrect = sum([ 1 if tests_for_changeset[cset + "_1"]['testsnotrun'] else 0 for cset in all_changesets ]) b2 = plt.pie( [ 100 * (total_correct/numchangesets), 100 * (total_no_coverage_data/numchangesets) ], colors=['green', 'red'], labels=[ 'Successfully scheduled', 'Not successfully scheduled' ], autopct='%1.1f%%' ) plt.legend() log.info("Completed analysis for run: %s" % str(runname)) log.info("Total number of changesets in pie chart: " + str(numchangesets)) log.info("Close figures to end analysis.") log.info("Changesets analyzed (use these in other analysis types if possible): \n" + str(all_changesets)) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_root_values_on_existing_file(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n fail_fast = true\n blabla = \"what\"\n something = true\n another_thing = \"yep\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos...
[ "0.6037803", "0.60280555", "0.59878725", "0.5981646", "0.5943651", "0.56515247", "0.5617633", "0.5590721", "0.5510498", "0.54906505", "0.54795974", "0.54218674", "0.5404527", "0.5396753", "0.53920645", "0.5391988", "0.53679043", "0.5367287", "0.53581583", "0.53472453", "0.532...
0.5162899
41
Return the contents of the "output" div on the page. The fixtures are configured to update this div when the user interacts with the page.
def output(self): text_list = self.q(css='#output').text if len(text_list) < 1: return None return text_list[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n\n EmptyPromise(self.q(css='div#ready').is_present, \"Click ready\").fulfill()\n self.q(css='div#fixture button').first.click()\n EmptyPromise(self.q(css='div#output').is_present, \"Output available\").fulfill()", "def output(self):\n\t\tif (self.isLoaded()):\n\t\t...
[ "0.65045804", "0.63367486", "0.61806214", "0.6148067", "0.6148067", "0.5885983", "0.5870854", "0.5743912", "0.571378", "0.5711744", "0.56615424", "0.5633449", "0.56062645", "0.5534683", "0.5534136", "0.54999006", "0.5468458", "0.5467302", "0.54517037", "0.5444819", "0.544434"...
0.6102804
5
Click the button on the page, which should cause the JavaScript to update the output div.
def click_button(self): self.q(css='div#fixture input').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def trigger_output(self):\n self.q(css='div#fixture button').first.click()", "def click_button(self):\n self.widgets.get('button').click()", "def display(self):\n\t\tprint('The button in the window was clicked!...
[ "0.7255249", "0.7255249", "0.71749187", "0.6997265", "0.6930125", "0.6644868", "0.65429854", "0.6372312", "0.63093776", "0.6224634", "0.6215617", "0.61377794", "0.61235803", "0.6058383", "0.6054273", "0.60490453", "0.6046816", "0.6021236", "0.60204387", "0.6019323", "0.600221...
0.6700987
5
Input `text` into the text field on the page.
def enter_text(self, text): self.q(css='#fixture input').fill(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generic_input_text(self, element_id, text):\n self._generic_input(element_id, text)", "def input(self, text):\n self.element.clear()\n self.element.send_keys(text)", "def input_text(self, element: Union[WebElement, Tuple[By, str]], text: str):\n element = self.find_element(eleme...
[ "0.7562028", "0.74759895", "0.74647695", "0.7401078", "0.73765755", "0.7364279", "0.7364279", "0.7364279", "0.7364279", "0.7364279", "0.7364279", "0.7331148", "0.72840446", "0.72499853", "0.72281253", "0.7209747", "0.7139128", "0.7137083", "0.7108322", "0.70740104", "0.703111...
0.78288025
0
Select the car with ``car_value`` in the dropdown list.
def select_car(self, car_value): self.q(css=u'select[name="cars"] option[value="{}"]'.format(car_value)).first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_car_selected(self, car):\n return self.q(css=u'select[name=\"cars\"] option[value=\"{}\"]'.format(car)).selected", "def the_option_with_value(value: str) -> \"SelectByValue\":\n return SelectByValue(value)", "def select_option(self, selector, value):\n from selenium.webdriver.common...
[ "0.6586301", "0.6283673", "0.624815", "0.5832125", "0.5829928", "0.56058097", "0.55932057", "0.5486654", "0.5476527", "0.54241836", "0.54193985", "0.5406573", "0.5401394", "0.52843726", "0.5263898", "0.52331984", "0.51806915", "0.5153212", "0.5153212", "0.5153212", "0.5129445...
0.8951591
0
Return ``True`` if the given ``car`` is selected, ``False`` otherwise.
def is_car_selected(self, car): return self.q(css=u'select[name="cars"] option[value="{}"]'.format(car)).selected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_selected(self) -> bool:\n return self.proto.is_selected", "def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False", "def is_selected(self) -> bool:\r\n retu...
[ "0.6459931", "0.64561963", "0.64533645", "0.5970567", "0.5970567", "0.5941123", "0.59283936", "0.5885889", "0.58118576", "0.5765825", "0.57478505", "0.5745376", "0.55844533", "0.5575426", "0.5548363", "0.5547362", "0.5532758", "0.5526049", "0.55059886", "0.5495182", "0.548899...
0.86627126
0
Toggle the box for the pill with `pill_name` (red or blue).
def toggle_pill(self, pill_name): self.q(css=u"#fixture input#{}".format(pill_name)).first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toggle_color_picker(self, wid, color_name='square_fill_ink'):\n print(\"TOGGLE COLOR PICKER\", getattr(wid, color_name), self.color_picker)\n is_open = self.color_dropdown and self.color_dropdown.attach_to\n if is_open:\n self.color_dropdown.dismiss()\n ...
[ "0.5491586", "0.5272434", "0.51687783", "0.5140479", "0.5102771", "0.50949675", "0.49874344", "0.49179575", "0.48726612", "0.4838384", "0.4801307", "0.47595447", "0.47443792", "0.47273305", "0.4704383", "0.4703406", "0.46984416", "0.4670263", "0.46450222", "0.46340665", "0.46...
0.8160374
0
Click the ``Confirm`` button and confirm the dialog.
def confirm(self): with self.handle_alert(confirm=True): self.q(css='button#confirm').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)", "def _tap_on_confirm_button(self, yes=True, msg=\"Confirm dialog button\"):\n btn = self.UTILS.element.getElement(DOM.DownloadManager.download_confirm_yes if\n y...
[ "0.7253925", "0.7246785", "0.72352403", "0.7205", "0.716402", "0.7038067", "0.70259655", "0.700242", "0.6850886", "0.6840978", "0.682001", "0.6809211", "0.6784496", "0.67724127", "0.6758107", "0.6704055", "0.65794057", "0.6546828", "0.65444165", "0.6512818", "0.6483923", "0...
0.85760707
0
Click the ``Confirm`` button and cancel the dialog.
def cancel(self): with self.handle_alert(confirm=False): self.q(css='button#confirm').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def confirm_action(message):\n if not click.confirm(message + \" Continue?\"):\n logger.info(\"User cancels action. Exiting...\")\n exit(0)\n else: return", "def ask_...
[ "0.80490994", "0.7658619", "0.75887465", "0.718055", "0.7045372", "0.6991206", "0.6899526", "0.6881284", "0.6846061", "0.683484", "0.681738", "0.68133414", "0.6789782", "0.6765807", "0.6764634", "0.6748562", "0.6728313", "0.67058414", "0.6674141", "0.66671133", "0.6665826", ...
0.8330856
0
Click the ``Alert`` button and confirm the alert.
def dismiss(self): with self.handle_alert(): self.q(css='button#alert').first.click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_alert_pop_up(self):\n\n # locators\n alert_button = 'alertbtn'\n\n # steps\n locate_alert_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, alert_button))\n )\n locate_alert_button.click()\n alert = self.dr...
[ "0.80544215", "0.79212785", "0.6950813", "0.6772512", "0.67127967", "0.6550232", "0.6544324", "0.6449943", "0.6366633", "0.6313416", "0.6296438", "0.62774825", "0.6252475", "0.6232385", "0.61924887", "0.6176421", "0.615149", "0.6099906", "0.60878664", "0.5922482", "0.5895606"...
0.6783705
3
Count the number of div.test elements.
def num_divs(self): return len(self.q(css='div.test').results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_number_of_testcase_elements(self):\n testcases = self.root.findall('testcase')\n self.assertEqual(len(testcases), 4)", "def test_count(self):\n return len(self.tests) + sum(suite.test_count for suite in self.suites)", "def test_element_count(self):\n\t\ts = Student_Analytics()\n\t...
[ "0.72423947", "0.6702707", "0.66566426", "0.6639454", "0.6568075", "0.6378697", "0.6344244", "0.6308958", "0.6286621", "0.62182474", "0.61914384", "0.61866164", "0.617343", "0.6128987", "0.6103108", "0.6089674", "0.60860085", "0.60737354", "0.6061457", "0.604047", "0.6024079"...
0.83748364
0
Return list of text for each div.test element.
def div_text_list(self): return self.q(css='div.test').text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def div_html_list(self):\n return self.q(css='div.test').html", "def texts(self):\n return [elem.text for elem in self.web_elements]", "def _get_text(self, element):\n # for text in element.itertext():\n for text in self.iter_main_text(element):\n yield text.strip()", "...
[ "0.7000479", "0.6322341", "0.61989576", "0.6173835", "0.6019634", "0.59161645", "0.5873848", "0.58302426", "0.57565117", "0.5753218", "0.57449764", "0.57032067", "0.56822264", "0.5670111", "0.5669928", "0.56666976", "0.5641307", "0.5638164", "0.5599844", "0.55976945", "0.5587...
0.8607301
0