query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Load healthchecks from name.
def loadTestsFromName(self, name, module=None): suite = super(HealthCheckLoader, self).loadTestsFromName(name, module) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadTestsFromNames(self, names, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromNames(names,\n module)\n return self.filter_suite(suite)", "def load(name):\n return []", "def deserialize(data):\n health...
[ "0.6285826", "0.5823373", "0.5733951", "0.56802326", "0.56788814", "0.56367904", "0.5605188", "0.5547318", "0.5484213", "0.5442093", "0.5427454", "0.53923076", "0.53107464", "0.5194388", "0.51722986", "0.51498437", "0.5135828", "0.511639", "0.5114361", "0.50697315", "0.504881...
0.65243924
0
Load healthchecks from names.
def loadTestsFromNames(self, names, module=None): suite = super(HealthCheckLoader, self).loadTestsFromNames(names, module) return self.filter_suite(suite)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadTestsFromName(self, name, module=None):\n suite = super(HealthCheckLoader, self).loadTestsFromName(name, module)\n return self.filter_suite(suite)", "def deserialize(data):\n healthchecks = []\n if data is None:\n return []\n for k, v in data.iteritems():\n hc = Healt...
[ "0.6379476", "0.59809893", "0.58267564", "0.5661063", "0.56127495", "0.5603886", "0.5571648", "0.55102366", "0.5353344", "0.5339407", "0.53231657", "0.5272784", "0.5235252", "0.5182474", "0.5097241", "0.5064218", "0.5057405", "0.504696", "0.5042437", "0.5037706", "0.49992388"...
0.7157677
0
Validate the public key if it is related to the given EC curve and formats the public key to a uncompressed byte string. Afterwards the function create a hash value of the uncompressed public key value
def get_public_key_fingerprint(curve: object, temp_public_key: object) \ -> object: vk = VerifyingKey.from_string(bytes.fromhex(temp_public_key), curve=curve) uncompressed_pub_key = vk.to_string('uncompressed') pub_key_hash_fingerprint = hashlib.sha256(uncompressed_pub_key) return pub_key_hash_fingerprint.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forge_public_key(value) -> bytes:\n prefix = value[:4]\n res = base58.b58decode_check(value)[4:]\n\n if prefix == 'edpk':\n return b'\\x00' + res\n elif prefix == 'sppk':\n return b'\\x01' + res\n elif prefix == 'p2pk':\n return b'\\x02' + res\n\n raise ValueError(f'Unrec...
[ "0.70852506", "0.69108117", "0.6854899", "0.68222594", "0.6751205", "0.65805644", "0.65697986", "0.65697986", "0.64765847", "0.6456603", "0.6373267", "0.6364895", "0.6352023", "0.63497204", "0.6304584", "0.6302101", "0.6292437", "0.620116", "0.6189922", "0.6148473", "0.611294...
0.70131516
1
Raised when the paramter u is given
def format_public_key(unformated_pk): return unformated_pk.replace(':', '')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def h(self, u=None, v=None):\n pass", "def error(self, *args, **kwargs):", "async def handle_user_input_error(self, ctx: Context, e: errors.UserInputError) -> None:\n if isinstance(e, errors.MissingRequiredArgument):\n embed = self._get_error_embed(\"Missing required argument\", e.para...
[ "0.5714732", "0.5587336", "0.5585282", "0.5577008", "0.5479378", "0.54428875", "0.5332927", "0.52931833", "0.5276477", "0.52281755", "0.5210547", "0.5198676", "0.51869273", "0.5171002", "0.51632804", "0.515921", "0.51172", "0.5115", "0.5114484", "0.51063645", "0.5105348", "...
0.0
-1
Create a directory to write output to.
def make_output_dir(experiment_dir, identifier): output_dir = Path(experiment_dir, identifier).resolve() output_dir.mkdir(parents=True, exist_ok=True) return output_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datet...
[ "0.8209889", "0.8200543", "0.81390864", "0.80648106", "0.79630965", "0.7929463", "0.78599143", "0.77202404", "0.76051563", "0.7550654", "0.7502131", "0.73690355", "0.73690355", "0.73546755", "0.72924715", "0.7292125", "0.7285806", "0.7255322", "0.7252684", "0.72381806", "0.72...
0.75952923
9
Construct a filename from varying experimental parameters.
def construct_filename(output_dir, file_descriptor, extension, *args, **kwargs): if len(args) == 0 and len(kwargs) == 0: return Path(output_dir, '{}{}'.format(file_descriptor, extension)) elif len(args) == 0: return Path(output_dir, '{}_{}{}'.format('_'.join([f'{k}{v}' for k, v in kwargs.items() if v is not None]), file_descriptor, extension)) elif len(kwargs) == 0: return Path(output_dir, '{}_{}{}'.format('_'.join([ar for ar in args if ar is not None]), file_descriptor, extension)) else: return Path(output_dir, '{}_{}_{}{}'.format('_'.join([ar for ar in args if ar is not None]), '_'.join([f'{k}{v}' for k, v in kwargs.items() if v is not None]), file_descriptor, extension))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_filename(self):\n expansion_string = '_'.join(sorted(args.exp)) if args.exp else 'noexp'\n return 'quad--{}--{}{}{}_{}{}_{}{}_{}{}{}{}{}_{}{}--{:02}_{:02}--{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}_{:02}--{}.log'.format(self.pts_total, hex(self.cnt_T)[-1:], self.cnt_S, self.cnt_U, self....
[ "0.7097925", "0.66764444", "0.6525119", "0.64324546", "0.62918216", "0.6271143", "0.62647027", "0.6250804", "0.6229975", "0.6168056", "0.6167285", "0.61520386", "0.6138422", "0.6122441", "0.61129963", "0.6110849", "0.6091121", "0.60664004", "0.6061048", "0.60583425", "0.59809...
0.59908104
20
Save model hyperparameters/metadata to output directory. model_options is an argparse Namespace, and is converted to a dictionary and pickled.
def save_model_options(output_dir, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'model_options', '.pkl', training_data, predictor, model_options.model, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(vars(model_options), f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(output_dir, model, gene, model_options, predictor='classify'):\n\n if not isinstance(model_options.training_data, str):\n training_data = '.'.join(model_options.training_data)\n else:\n training_data = model_options.training_data\n\n output_file = construct_filename(output_dir...
[ "0.7630544", "0.7089318", "0.7027596", "0.6957321", "0.6957321", "0.6957321", "0.6866317", "0.68396884", "0.6824873", "0.6792376", "0.6774526", "0.66822237", "0.6664956", "0.6664538", "0.66604686", "0.6630448", "0.66119593", "0.6600588", "0.6593003", "0.65876275", "0.6582033"...
0.7801818
0
Save serialized (pickled) classifier to output directory.
def save_model(output_dir, model, gene, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'model', '.pkl', gene, training_data, model_options.model, predictor, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(model, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump(cls, classifier, filename=None):\n filename = filename or Configuration.get_instance().classifier_file\n with open(filename, 'w') as output_file:\n pickle.dump(classifier, output_file)", "def save(self):\n joblib.dump(\n self.classifier, \"data/models/badlymapp...
[ "0.76594853", "0.74418044", "0.73540056", "0.73168766", "0.7297143", "0.7217382", "0.7135482", "0.71012557", "0.6987071", "0.69320947", "0.69144404", "0.68949693", "0.67491394", "0.67209446", "0.654902", "0.6546712", "0.6524628", "0.65131825", "0.6482549", "0.6466641", "0.645...
0.61762613
54
Save parameter info to output directory.
def save_best_params(output_dir, best_params, gene, model_options, predictor='classify'): if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data output_file = construct_filename(output_dir, 'params', '.pkl', gene, training_data, model_options.model, predictor, s=model_options.seed) with open(output_file, 'wb') as f: pkl.dump(best_params, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save_params(self, output_folder: str, checkpoint: int):\n arg_params, aux_params = self.module.get_params() # sync aux params across devices\n self.module.set_params(arg_params, aux_params)\n self.params = arg_params\n params_base_fname = C.PARAMS_NAME % checkpoint\n self.s...
[ "0.72130847", "0.6946201", "0.6819849", "0.67463195", "0.66937596", "0.664994", "0.65934867", "0.6579225", "0.64846045", "0.6477415", "0.6457938", "0.64488935", "0.6443824", "0.640003", "0.6393134", "0.63881093", "0.6384607", "0.6339176", "0.6293206", "0.62779415", "0.6267205...
0.5662651
89
Check if results already exist for a given experiment identifier. If the file does not exist, return the filename.
def check_output_file(output_dir, identifier, shuffle_labels, model_options, predictor='classify', fold_no=None, titration_ratio=None): signal = 'shuffled' if shuffle_labels else 'signal' if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data if isinstance(model_options.n_dim, list): n_dim = '.'.join(map(str, model_options.n_dim)) else: n_dim = model_options.n_dim check_file = construct_filename(output_dir, 'coefficients', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) if check_file.is_file(): raise ResultsFileExistsError( 'Results file already exists for identifier: {}\n'.format( identifier) ) return check_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resultExist(probName,algoName,fitName,inst,s,c,n,k,q,w,m,t,e):\n if probName == 'NKQ':\n nameOfF = './result/'+probName+'-'+algoName+'-F'+fitName+'-M'+m+'-I'+str(inst)+'-S'+str(s)+'-W'+str(w)+'-N'+str(n)+'-K'+str(k)+'-C'+str(c)+'-Q'+str(q)+'-T'+str(t)+'-E'+str(e)+'.txt'\n elif probName == 'NK' or ...
[ "0.65933585", "0.65233624", "0.63628626", "0.62309206", "0.6212523", "0.5945689", "0.5904068", "0.58523875", "0.58294135", "0.5818082", "0.5772402", "0.5763797", "0.5763763", "0.5752898", "0.5715975", "0.57131594", "0.57079446", "0.57061094", "0.57015955", "0.569571", "0.5647...
0.5988968
5
Save results of a single experiment for a single identifier.
def save_results(output_dir, check_file, results, exp_string, identifier, shuffle_labels, model_options, predictor='classify', fold_no=None, titration_ratio=None): signal = 'shuffled' if shuffle_labels else 'signal' if not isinstance(model_options.training_data, str): training_data = '.'.join(model_options.training_data) else: training_data = model_options.training_data if isinstance(model_options.n_dim, list): n_dim = '.'.join(map(str, model_options.n_dim)) else: n_dim = model_options.n_dim if predictor == 'classify': auc_df = pd.concat(results[ '{}_auc'.format(exp_string) ]) output_file = construct_filename(output_dir, 'auc_threshold_metrics', '.tsv.gz', identifier, training_data, model_options.model, signal, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) auc_df.to_csv( output_file, sep="\t", index=False, float_format="%.5g" ) aupr_df = pd.concat(results[ '{}_aupr'.format(exp_string) ]) output_file = construct_filename(output_dir, 'aupr_threshold_metrics', '.tsv.gz', identifier, training_data, model_options.model, signal, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) aupr_df.to_csv( output_file, sep="\t", index=False, float_format="%.5g" ) if '{}_coef'.format(exp_string) in results: coef_df = pd.concat(results[ '{}_coef'.format(exp_string) ]) coef_df.to_csv( check_file, sep="\t", index=False, float_format="%.5g" ) metrics_df = pd.concat(results[ '{}_metrics'.format(exp_string) ]) if '{}_preds'.format(exp_string) in results: preds_df = pd.concat(results[ '{}_preds'.format(exp_string) ]) else: preds_df = None if '{}_param_grid'.format(exp_string) in results: params_df = pd.concat(results[ '{}_param_grid'.format(exp_string) ]) else: params_df = None output_file = construct_filename(output_dir, 'metrics', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) metrics_df.to_csv( output_file, sep="\t", index=False, float_format="%.5g" ) if preds_df is not None: output_file = construct_filename(output_dir, 'preds', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no, t=titration_ratio) preds_df.to_csv( output_file, sep="\t", float_format="%.5g" ) if params_df is not None: output_file = construct_filename(output_dir, 'param_grid', '.tsv.gz', identifier, training_data, model_options.model, signal, predictor, s=model_options.seed, n=n_dim, f=fold_no) params_df.to_csv(output_file, sep="\t")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_result(self, file_id, data):\n filename = path.join(\n self._ext_config['dirresults'],\n \"{0}.{1}\".format(file_id, self.type_file)\n )\n with open(filename, 'w') as file:\n file.write(data)\n logging.info(\"File %s has beed saved!\" % filename...
[ "0.69104457", "0.6841078", "0.66724694", "0.65627193", "0.64784026", "0.64572316", "0.6454891", "0.64234656", "0.6373616", "0.63578653", "0.6343226", "0.6339512", "0.63307554", "0.6286586", "0.6275107", "0.62282044", "0.61683846", "0.616206", "0.61609125", "0.61108345", "0.60...
0.5672618
61
Generate and format log output.
def generate_log_df(log_columns, log_values): return pd.DataFrame(dict(zip(log_columns, log_values)), index=[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory:...
[ "0.7084474", "0.67634064", "0.6613596", "0.64954156", "0.64463615", "0.64409524", "0.6432035", "0.63565314", "0.6338335", "0.6240202", "0.6228313", "0.6208995", "0.616877", "0.6147516", "0.6111839", "0.6076145", "0.6073458", "0.6070906", "0.6070906", "0.60508394", "0.60479283...
0.0
-1
Append log output to log file.
def write_log_file(log_df, log_file): if log_file.is_file(): # if log file already exists append to it, without the column headers log_df.to_csv(log_file, mode='a', sep='\t', index=False, header=False) else: # if log file doesn't exist create it, with column headers log_df.to_csv(log_file, sep='\t', index=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append_to_logfile(self):\n with open(self.path, \"a+\") as f:\n for item in self.logs:\n f.write(item)\n self.logs.clear()", "def write_log(self, log_output):\r\n with open(self.log_link, \"a\") as log_file:\r\n log_file.writelines(log_output + \"...
[ "0.8269278", "0.77975404", "0.7335851", "0.73168206", "0.72050554", "0.71168935", "0.6989999", "0.696974", "0.6952153", "0.69020504", "0.68942434", "0.6814251", "0.68042284", "0.67517656", "0.6726408", "0.66658777", "0.6634301", "0.6611948", "0.65880245", "0.65626615", "0.651...
0.60229826
53
Create a new credit card instance. The initial balance is zero. customer the name of the customer (e.g., 'John Bowman') bank the name of the bank acnt the account identifier (eg., '5391 0375 9387 5309') limit credit card (measured in dollars)
def __init__(self,customer, bank, acnt,limit): self._customer=customer self._bank=bank self._account=acnt self._limit=limit self._balance=0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(customer, **data):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n response, _ = http_client.post(routes.url(routes.CARD_RESOURCE, customer_id=customer), data)\n return resources.Card(**response)", "def __i...
[ "0.74948144", "0.73887783", "0.72569084", "0.7231468", "0.68172675", "0.6624415", "0.65744996", "0.6494921", "0.6418747", "0.6410303", "0.63047266", "0.6296109", "0.6277576", "0.62600785", "0.62094873", "0.6208367", "0.61989254", "0.61363536", "0.61312497", "0.61093074", "0.6...
0.7120257
4
Return name of the customer
def get_customer(self): return self._customer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def customer_name(self):\n return self._customer_name", "def customer(self):\n return self.__customer", "def getCustomer(self):\n return self.base.get(\"customer\", [])", "def __str__(self):\n\n return self.customer.first_name + \" \" + self.payment_name", "def getCustomer(self)...
[ "0.8773322", "0.72585183", "0.72349936", "0.715536", "0.7111342", "0.7096749", "0.684566", "0.68219835", "0.6775382", "0.67625487", "0.67317456", "0.6717967", "0.6713337", "0.6651998", "0.6633621", "0.66315216", "0.66025484", "0.6575484", "0.65680176", "0.6505166", "0.6465378...
0.7327407
2
Return the bank's name
def get_bank(self): return self._bank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bank_name():\r\n\r\n with open(\"config.json\") as f:\r\n config = json.loads(f.read())\r\n\r\n return config[\"BANK_NAME\"]", "def get_bank_name_by_id(bank_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank where id = '{}...
[ "0.7939867", "0.76192105", "0.76158357", "0.7009449", "0.68444806", "0.6841892", "0.6825423", "0.681393", "0.67982596", "0.67663383", "0.67662084", "0.6721667", "0.65943986", "0.6575735", "0.6546608", "0.6546608", "0.65403897", "0.65403897", "0.65403897", "0.65403897", "0.654...
0.67452586
11
Return the bank's name
def get_account(self): return self._account
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bank_name():\r\n\r\n with open(\"config.json\") as f:\r\n config = json.loads(f.read())\r\n\r\n return config[\"BANK_NAME\"]", "def get_bank_name_by_id(bank_id: int) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank where id = '{}...
[ "0.7939867", "0.76192105", "0.76158357", "0.7009449", "0.68444806", "0.6841892", "0.6825423", "0.681393", "0.67982596", "0.67663383", "0.67662084", "0.67452586", "0.6721667", "0.65943986", "0.6575735", "0.6546608", "0.6546608", "0.65403897", "0.65403897", "0.65403897", "0.654...
0.0
-1
Return current credit limit
def get_limit(self): return self._limit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_plan_limit(self, source):\n commitment = getattr(self.get_subscription(), 'commitment', {})\n return self.get_plan().get_price_data(source, commitment)[1]", "def get_limit(self):\n return self.limit", "def charge_limit(self, limit=None):\n if limit is None:\n done...
[ "0.70615613", "0.6857322", "0.68410796", "0.6831366", "0.6759597", "0.661983", "0.6546747", "0.63908046", "0.63815755", "0.6301675", "0.6296434", "0.6276246", "0.626394", "0.6231881", "0.62091506", "0.62080514", "0.6149896", "0.6149896", "0.6126138", "0.61131227", "0.6094338"...
0.6619479
7
Return the current balancr
def get_balance(self): return self._balance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def BB ( self ) :\n return self.__bb", "def BB ( self ) :\n return self.__bb", "def _branch(self):\n printer = Printer(None)\n ci_manager = CIManager(printer)\n return ci_manager.get_branch()", "def get_bribe(self):\r\n return self.bribe", "def get_bolsa(self):\n ...
[ "0.689818", "0.689818", "0.6664285", "0.6529631", "0.64657265", "0.63538027", "0.63538027", "0.63459975", "0.6342656", "0.6154038", "0.6110734", "0.60647565", "0.6034983", "0.59981585", "0.59864616", "0.5886264", "0.5817673", "0.58071756", "0.57708025", "0.5752032", "0.573413...
0.0
-1
Charge given price to the card, assuming sufficient card limit Return True if charge was processed;False if charge was denied
def charge(self,price): if price + self._balance> self._limit: return False else: self._balance+=price return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError('Price must be numeric')\n if price + self._balance > self._limit: # if charge would exceed limit\n return False # cannot accept charge\n self._balance += price\n...
[ "0.8322736", "0.8296711", "0.7829243", "0.6570418", "0.6408199", "0.612218", "0.61032504", "0.60590565", "0.6037275", "0.60188115", "0.5950383", "0.59281176", "0.59213865", "0.5874455", "0.58660394", "0.5860026", "0.5825693", "0.5783239", "0.57501924", "0.5738126", "0.5724947...
0.8432956
0
Process customer payment that reduces balance
def make_payment(self,amount): self._balance-=amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_payment(self, payment):\n self._balance -= payment", "def make_payment(self, amount):\n if not isinstance(amount, (int, float)):\n raise TypeError('Amount must be numeric')\n self._balance -= amount", "def make_payment(self, amount):\n if not isinstance(amount, (...
[ "0.66964227", "0.6128591", "0.61197674", "0.5983254", "0.5962717", "0.59586847", "0.5945208", "0.59426755", "0.59344083", "0.59324026", "0.5862457", "0.58423245", "0.5832974", "0.58243793", "0.58243793", "0.58215094", "0.582141", "0.5816743", "0.58033234", "0.5802297", "0.577...
0.6793116
0
Generate random bytes to use as csrf secret
def gen_csrf_secret(): return Random.new().read(csrf_secret_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')", "def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\...
[ "0.751621", "0.7423662", "0.7333086", "0.7329716", "0.7236352", "0.7232258", "0.71953297", "0.71489805", "0.7082189", "0.70771956", "0.7053713", "0.70158905", "0.70001644", "0.6955148", "0.69448227", "0.6937748", "0.69271857", "0.692591", "0.69196445", "0.6904558", "0.6870434...
0.87860125
0
Read csrf secret from session if it exists; otherwise generate it and store in session
def get_csrf_secret(): sess = managers.request_manager.get_request().session() secret = sess.get(csrf_secret_sess_var_name, None) if not secret: secret = gen_csrf_secret() sess[csrf_secret_sess_var_name] = secret return secret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def getcsrf(session):\n session.get(\"http://anichart.net\")", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure ra...
[ "0.7415861", "0.74125195", "0.72343606", "0.69048315", "0.686525", "0.6681955", "0.65113086", "0.64043695", "0.6368639", "0.6332918", "0.6305956", "0.625523", "0.6235409", "0.6150814", "0.6095577", "0.60392517", "0.6010801", "0.59956753", "0.59555525", "0.59422135", "0.590692...
0.7971118
0
Generate csrf token based on existing/new csrf secret and provided/new salt
def create_csrf_token(salt=''): if not salt: salt = Random.new().read(csrf_salt_len).encode('hex') h = SHA256.new() h.update(get_csrf_secret() + salt) return h.hexdigest() + salt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_csrf_secret():\n\treturn Random.new().read(csrf_secret_len)", "def generate_csrf_token() -> int:\r\n ...", "def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print...
[ "0.768297", "0.7293141", "0.70243514", "0.67706597", "0.6689844", "0.6654554", "0.6647972", "0.66283864", "0.6550067", "0.6501651", "0.64808244", "0.6338237", "0.6312519", "0.6307513", "0.6307513", "0.6238732", "0.62366146", "0.6235569", "0.6225915", "0.6221798", "0.6175029",...
0.80260617
0
Verify csrf token against csrf secret from the session; if token is not provided it's read from request arguments
def verify_csrf_token(token=''): if not token: token = managers.request_manager.get_request().arguments().arguments().get(csrf_token_arg_name, "") if token: token = token[0] if len(token) != 2 * digest_size + 2 * csrf_salt_len: debug('Incorrect csrf token length') raise VDOM_csrf_exception() salt = token[2*digest_size:] if token != create_csrf_token(salt): debug('Incorrect csrf token value') raise VDOM_csrf_exception()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_t...
[ "0.75995755", "0.73856825", "0.73174137", "0.71832883", "0.7028827", "0.6969976", "0.69217205", "0.68938845", "0.68345594", "0.68069303", "0.66359556", "0.66284615", "0.6612116", "0.644829", "0.63917625", "0.6361629", "0.6360583", "0.63481784", "0.6308884", "0.6281791", "0.62...
0.79013884
0
list starter arguments that must be applied conditionally based on version
def get_version_specific_arguments(self, version: str): result = [] semversion = semver.VersionInfo.parse(version) # Extended database names were introduced in 3.9.0 if self.supports_extended_names: result += ["--args.all.database.extended-names-databases=true"] # Telemetry was introduced in 3.11.0 if (semversion.major == 3 and semversion.minor >= 11) or (semversion.major > 3): result += ["--all.server.telemetrics-api=false"] # Column cache if ( self.cfg.enterprise and semver.compare(version, "3.9.5") >= 0 and semver.compare(version, "3.10.0") != 0 and semver.compare(version, "3.10.1") != 0 ): result += ["--args.all.arangosearch.columns-cache-limit=10000"] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_prelim_opts_args(application):\n opts, args = application.parse_preliminary_options(\n ['--foo', '--verbose', 'src', 'setup.py', '--statistics', '--version'])\n\n assert opts.verbose\n assert args == ['--foo', 'src', 'setup.py', '--statistics', '--version']", "def full_args():\n retur...
[ "0.6652257", "0.6436489", "0.64220834", "0.624852", "0.601735", "0.59371823", "0.5928165", "0.5906079", "0.59029114", "0.5887085", "0.58200306", "0.58132803", "0.57993174", "0.5784199", "0.57690525", "0.5752837", "0.573312", "0.57010204", "0.56947035", "0.5688802", "0.5674345...
0.74655694
0
serialize the instance info compatible with testing.js
def get_structure(self): instances = [] urls = [] leader_name = "" if self.is_leader: leader_name = self.name for arangod in self.all_instances: struct = arangod.get_structure() struct["JWT_header"] = self.get_jwt_header() urls.append(struct["url"]) instances.append(struct) return { "protocol": self.get_http_protocol(), "options": "", "addArgs": "", "rootDir": str(self.basedir), "leader": leader_name, "agencyConfig": "", "httpAuthOptions": "", "urls": str(urls), "arangods": instances, "JWT_header": self.get_jwt_header(), # 'url': self.url, # 'endpoints': self.endpoints, # 'endpoint': self.endpoint, # 'restKeyFile': self.restKeyFile, # 'tcpdump': self.tcpdump, # 'cleanup': self.cleanup }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self):\n cls = self.__class__\n return {\n \"spawn_prob\": self.spawn_prob,\n \"agent_locs\": self.agent_locs.copy(),\n \"agent_names\": self.agent_names.copy(),\n \"board\": self.board.copy(),\n \"class\": \"%s.%s\" % (cls.__module...
[ "0.7045984", "0.70322937", "0.7007658", "0.6789255", "0.6712205", "0.66500646", "0.66200024", "0.65715665", "0.6565833", "0.6550571", "0.6525462", "0.6522684", "0.6512101", "0.64929324", "0.6474648", "0.6463481", "0.64474213", "0.6426796", "0.64142424", "0.6408753", "0.640858...
0.0
-1
name of this starter
def name(self): return str(self.name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name():\n pass", "def name():\n pass", "def name() -> str:\n pass", "def step_name(self):\n return \"main\"", "def get_name():\n return \"SVM Idea\"", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():...
[ "0.715454", "0.715454", "0.6939584", "0.686997", "0.68043464", "0.6709708", "0.6709708", "0.6709708", "0.6709708", "0.6709708", "0.6709708", "0.6678551", "0.6673218", "0.6657183", "0.6657183", "0.6643718", "0.66373086", "0.66373086", "0.66373086", "0.66373086", "0.66373086", ...
0.0
-1
get the frontend URLs of this starter instance
def get_frontends(self): ret = [] for i in self.all_instances: if i.is_frontend(): ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urls(self):\n return lambda : self.config.urls(active_only=True)", "def getURLs():", "def get_urls():\r\n return []", "def frontend_endpoint_ids(self) -> Sequence[str]:\n return pulumi.get(self, \"frontend_endpoint_ids\")", "def urls(self) -> list[str]:\r\n ...", "def urls...
[ "0.7023718", "0.6961256", "0.6898301", "0.6606221", "0.6593883", "0.65911895", "0.63784796", "0.63564336", "0.6309869", "0.62928236", "0.6277014", "0.6192597", "0.6181306", "0.61599916", "0.6134845", "0.6119912", "0.61194247", "0.6110606", "0.6094108", "0.60856676", "0.605543...
0.6309068
9
get the list of dbservers managed by this starter
def get_dbservers(self): ret = [] for i in self.all_instances: if i.is_dbserver(): ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_servers(self):\n\t\treturn self.__servers", "def get_all_servers(self) -> List[Server]:\n pass", "def servers(self):\n return self._servers", "def get_databases(self):\n pass", "def databases(self):\n return self._databases", "def list_databases():\n config = load_c...
[ "0.7709002", "0.73207456", "0.7205934", "0.72003657", "0.70308435", "0.701856", "0.70004505", "0.69486785", "0.6863328", "0.6820925", "0.6803907", "0.6767398", "0.67585117", "0.67491573", "0.6748919", "0.6736712", "0.67257047", "0.6720556", "0.6672018", "0.6666036", "0.655856...
0.85010105
0
get the list of agents managed by this starter
def get_agents(self): ret = [] for i in self.all_instances: if i.instance_type == InstanceType.AGENT: ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def manager_agents(self):\n return self.get(\"manager_agents\")", "def get_agents(self):\n if self.retrieved:\n raise errors.IllegalState('List has already been retrieved.')\n self.retrieved = True\n return objects.AgentList(self._results, runtime=self._runtime)", "def li...
[ "0.77033234", "0.7416487", "0.7300729", "0.7252941", "0.72281355", "0.7117161", "0.70339304", "0.6690089", "0.666582", "0.66602963", "0.6639476", "0.6470405", "0.6329325", "0.62738615", "0.6230014", "0.61977404", "0.61977404", "0.61977404", "0.61977404", "0.6167947", "0.61341...
0.76709455
1
get the list of arangosync masters managed by this starter
def get_sync_masters(self): ret = [] for i in self.all_instances: if i.instance_type == InstanceType.SYNCMASTER: ret.append(i) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ListMasters(cue='master.cfg', include_public=True, include_internal=True):\n # Look for \"internal\" masters first.\n path_internal = os.path.join(\n BUILD_DIR, os.pardir, 'build_internal', 'masters/*/' + cue)\n path = os.path.join(BUILD_DIR, 'masters/*/' + cue)\n filenames = []\n if include_public:\...
[ "0.7359204", "0.7229313", "0.72076976", "0.67820305", "0.63927853", "0.63927853", "0.6004608", "0.5842549", "0.5835515", "0.5802098", "0.5770662", "0.57044643", "0.56874114", "0.564752", "0.5564717", "0.5553358", "0.55246377", "0.55142653", "0.5474071", "0.52970237", "0.52841...
0.6722324
4
get the first frontendhost of this starter
def get_frontend(self): servers = self.get_frontends() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFrontend(self):\n return self.header['FRONTEND']", "def head_host(self) -> str:\n return self.head_args.host if self.head_args else None", "def get_host(self):\r\n return self.host", "def getHost():", "def getHost():", "def get_host(self):\n return self.host", "def ma...
[ "0.6992638", "0.6583565", "0.65169436", "0.6433285", "0.6433285", "0.6352371", "0.63019204", "0.63019204", "0.63019204", "0.62645936", "0.6246942", "0.62244636", "0.61690867", "0.6142307", "0.6142307", "0.61053056", "0.61049336", "0.6103162", "0.60861087", "0.60844123", "0.60...
0.7496468
0
get the first dbserver of this starter
def get_dbserver(self): servers = self.get_dbservers() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stored_primary_server_name(db):\n if \"last_primary_server\" in db.collection_names():\n stored_primary_server = db.last_primary_server.find_one()[\"server\"]\n else:\n stored_primary_server = None\n\n return stored_primary_server", "def get_sync_master(self):\n servers = se...
[ "0.7175073", "0.68898433", "0.6632575", "0.6597116", "0.65586126", "0.6534296", "0.6407233", "0.63766783", "0.63378835", "0.6261345", "0.6257781", "0.61878526", "0.6167295", "0.6121482", "0.61021817", "0.61021364", "0.6087449", "0.6070265", "0.6051888", "0.599604", "0.5983254...
0.8454486
0
get the first agent of this starter
def get_agent(self): servers = self.get_agents() assert servers, "starter: have no instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent(self):\n return self.__agent", "def agent(self) -> Entity:\n return self.__agent", "def getfirstbot(self):\n\n return self.bots[0]", "def get_first(self):\n raise NotImplementedError(\"get_first: You should have implemented this method!\")", "def test_get_agent_name(self):...
[ "0.66546506", "0.6309816", "0.61794835", "0.5938089", "0.5912412", "0.5894221", "0.58619547", "0.58138776", "0.57892704", "0.57603174", "0.5742176", "0.57214665", "0.5696438", "0.568106", "0.5669796", "0.5664195", "0.5583705", "0.55532354", "0.55385774", "0.5535078", "0.55320...
0.76360667
0
get the first arangosync master of this starter
def get_sync_master(self): servers = self.get_sync_masters() assert servers, "starter: don't have instances!" return servers[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"master\")", "def master(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"master\")", "def getMaster(sel...
[ "0.6829638", "0.6829638", "0.6791485", "0.6693696", "0.6673793", "0.6628041", "0.657018", "0.63935375", "0.62487674", "0.61431384", "0.6016861", "0.5940178", "0.5940178", "0.58904886", "0.5833857", "0.5826365", "0.57743245", "0.5721033", "0.57085377", "0.5699552", "0.56985486...
0.72852707
0
detect whether this manager manages instance
def have_this_instance(self, instance): for i in self.all_instances: if i == instance: print("YES ITS ME!") return True print("NO S.B. ELSE") return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active(self):\n return self in manager.handler", "def private_instance(self) -> bool:\n return pulumi.get(self, \"private_instance\")", "def is_running_manager(self) -> bool:\n return self.get_value(self._manager_running_attribute) == '1'", "def HasPerInstancePropertyProviders(self) ...
[ "0.678732", "0.6508173", "0.6324279", "0.6299621", "0.6208846", "0.62065923", "0.6194654", "0.6182245", "0.6136288", "0.6049219", "0.6000728", "0.5990074", "0.59753895", "0.59658927", "0.59321904", "0.59208906", "0.59013605", "0.5900331", "0.58949554", "0.5874805", "0.5874114...
0.58744586
20
get the essentials of all instances controlled by this starter
def get_instance_essentials(self): ret = [] for instance in self.all_instances: ret.append(instance.get_essentials()) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEssentialList(self):\n return self.essentials", "def show_all_instances(self):\n if not self.all_instances:\n logging.error(\"%s: no instances detected\", self.name)\n return\n instances = \"\"\n for instance in self.all_instances:\n instances +...
[ "0.64952594", "0.61609066", "0.6024103", "0.57624537", "0.5747617", "0.5703971", "0.56971", "0.5676145", "0.56370413", "0.5501353", "0.547217", "0.54002124", "0.53892356", "0.53773415", "0.5376807", "0.53593886", "0.5353485", "0.5344247", "0.5336788", "0.5326446", "0.53020716...
0.8199698
0
print all instances of this starter to the user
def show_all_instances(self): if not self.all_instances: logging.error("%s: no instances detected", self.name) return instances = "" for instance in self.all_instances: instances += " - {0.name} (pid: {0.pid})".format(instance) logging.info("arangod instances for starter: %s - %s", self.name, instances)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_out():\n pass", "def print_results(self):\n pass", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n ...
[ "0.67142516", "0.6560021", "0.636873", "0.63599074", "0.6321213", "0.62887406", "0.6230574", "0.62254745", "0.6211867", "0.621094", "0.6190211", "0.6186281", "0.61838835", "0.61825544", "0.61808974", "0.6177476", "0.6169501", "0.6163866", "0.61381304", "0.61379516", "0.613546...
0.7568982
0
launch the starter for this instance
def run_starter(self, expect_to_fail=False): logging.info("running starter " + self.name) args = [self.cfg.bin_dir / "arangodb"] + self.hotbackup_args + self.default_starter_args + self.arguments lh.log_cmd(args) self.instance = psutil.Popen(args) logging.info("my starter has PID:" + str(self.instance.pid)) if not expect_to_fail: self.wait_for_logfile() self.wait_for_port_bind()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def launch(self):", "def start_sml():\n launchfile = basepath + '/launch/teststarter.launch'\n\n uuid = roslaunch.rlutil.get_or_generate_uuid(None, False)\n #print roslaunch.rlutil.check_roslaunch(launchfile)\n #roslaunch....
[ "0.7489495", "0.7253172", "0.692011", "0.6630553", "0.65272653", "0.6522638", "0.6460976", "0.64253443", "0.6424377", "0.6397502", "0.6396763", "0.6396763", "0.63844043", "0.63730854", "0.6352418", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6323603", "0.6323603", ...
0.6387958
12
somebody else is running the party, but we also want to have a look
def attach_running_starter(self): # pylint disable=broad-except match_str = "--starter.data-dir={0.basedir}".format(self) if self.passvoidfile.exists(): self.passvoid = self.passvoidfile.read_text(errors="backslashreplace", encoding="utf-8") for process in psutil.process_iter(["pid", "name"]): try: name = process.name() if name.startswith("arangodb"): process = psutil.Process(process.pid) if any(match_str in s for s in process.cmdline()): print(process.cmdline()) print("attaching " + str(process.pid)) self.instance = process return except psutil.NoSuchProcess as ex: logging.error(ex) raise Exception("didn't find a starter for " + match_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def party(ctx):\n if ctx.invoked_subcommand is None:\n return await show_party(ctx)", "def like_to_party(msg):\n if message.rate_limit(msg.settings, 'like_to_party'):\n return\n return \"%s, I know for a fact you don't party. You do *not* party.\" \\\n % msg...
[ "0.6360574", "0.60688215", "0.6012925", "0.59863085", "0.59315145", "0.589366", "0.5702806", "0.56775403", "0.56753576", "0.5657366", "0.5642449", "0.5641897", "0.5616109", "0.56132597", "0.56030726", "0.5584753", "0.555659", "0.5550408", "0.55344415", "0.5533722", "0.5528718...
0.0
-1
some scenarios don't want to use the builtin jwt generation from the manager
def set_jwt_file(self, filename): self.jwtfile = filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_jwt_token(self):\n payload = jwt_payload_handler(self)\n token = jwt_encode_handler(payload)\n return token", "def _generate_jwt_token(self):\n import jwt\n from datetime import datetime, timedelta\n from django.conf import settings\n\n dt = datetime...
[ "0.736411", "0.70779586", "0.705047", "0.69427395", "0.6859739", "0.676506", "0.6741483", "0.67354095", "0.66105115", "0.65517974", "0.65479994", "0.6544189", "0.6534447", "0.65264636", "0.6466637", "0.6452001", "0.6433748", "0.6410727", "0.6399224", "0.6341143", "0.62983555"...
0.0
-1
retrieve token from the JWT secret file which is cached for the future use
def get_jwt_token_from_secret_file(self, filename): # pylint: disable=consider-iterating-dictionary if filename in self.jwt_tokens.keys(): # token for that file was checked already. return self.jwt_tokens[filename] cmd = [ self.cfg.bin_dir / "arangodb", "auth", "header", "--auth.jwt-secret", str(filename), ] print(cmd) jwt_proc = psutil.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) logging.info("JWT starter has PID:" + str(jwt_proc.pid)) (header, err) = jwt_proc.communicate() jwt_proc.wait() if len(str(err)) > 3: raise Exception("error invoking the starter " "to generate the jwt header token! " + str(err)) if len(str(header).split(" ")) != 3: raise Exception("failed to parse the output" " of the header command: " + str(header)) self.jwt_tokens[filename] = str(header).split(" ")[2].split("\\")[0] return self.jwt_tokens[filename]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n trac...
[ "0.77592856", "0.7595998", "0.75481737", "0.7526818", "0.69495", "0.69054925", "0.6903875", "0.6800849", "0.6775595", "0.6755065", "0.6746886", "0.6740278", "0.67240804", "0.6659499", "0.66390276", "0.6621728", "0.6608063", "0.659404", "0.6589575", "0.65768105", "0.6572895", ...
0.8024519
0
return jwt header from current installation
def get_jwt_header(self): if self.jwt_header: return self.jwt_header self.jwt_header = self.get_jwt_token_from_secret_file(str(self.jwtfile)) return self.jwt_header
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def get_jwt(self, request):\n auth_header_prefix = self.auth_head...
[ "0.7406444", "0.7215993", "0.7196089", "0.7186894", "0.7012713", "0.6979103", "0.6942568", "0.69397306", "0.69034165", "0.6893624", "0.6858153", "0.68385124", "0.6749719", "0.6743572", "0.6729134", "0.6724721", "0.6700563", "0.6690566", "0.6663717", "0.66617864", "0.663666", ...
0.8041909
0
set the passvoid to the managed instance
def set_passvoid(self, passvoid, write_to_server=True): if write_to_server: print("Provisioning passvoid " + passvoid) self.arangosh.js_set_passvoid("root", passvoid) self.passvoidfile.write_text(passvoid, encoding="utf-8") self.passvoid = passvoid for i in self.all_instances: if i.is_frontend(): i.set_passvoid(passvoid) self.cfg.passvoid = passvoid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instance(self, instance):\n self._instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, instance):\n self.instance = instance", "def set_instance(self, ins...
[ "0.6201083", "0.614531", "0.614531", "0.614531", "0.614531", "0.614531", "0.6030137", "0.5798542", "0.57683265", "0.5758265", "0.5750186", "0.57295257", "0.567436", "0.56572455", "0.56152976", "0.5603632", "0.5594542", "0.5575087", "0.55718654", "0.55426687", "0.54812384", ...
0.6298621
0
get the passvoid to the managed instance
def get_passvoid(self): return self.passvoid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n pass", "def get(self):\n pass", "def get(self):\n pass", "def get(self):\n pass", "def object(self):", "def retrieve(self):\n pass", "def context(self) -> Any:\n ...", "def context(self) -> CONTEXT:", "def get_transfer(self):\n return se...
[ "0.59444445", "0.59444445", "0.59444445", "0.59444445", "0.58766246", "0.5858545", "0.5811761", "0.5790257", "0.57571363", "0.57146895", "0.5700108", "0.5700108", "0.5633125", "0.5611686", "0.55938387", "0.55938387", "0.5585385", "0.55678827", "0.55678827", "0.55592895", "0.5...
0.6621574
0
send an http request to the instance
def send_request(self, instance_type, verb_method, url, data=None, headers=None, timeout=None): if headers is None: request_headers = {} else: request_headers = dict(headers) http_client.HTTPConnection.debuglevel = 1 results = [] for instance in self.all_instances: if instance.instance_type == instance_type: if instance.detect_gone(): print("Instance to send request to already gone: " + repr(instance)) else: request_headers["Authorization"] = "Bearer " + str(self.get_jwt_header()) base_url = instance.get_public_plain_url() full_url = self.get_http_protocol() + "://" + base_url + url attach_http_request_to_report(verb_method.__name__, full_url, request_headers, data) reply = verb_method( full_url, data=data, headers=request_headers, allow_redirects=False, timeout=timeout, verify=False, ) attach_http_response_to_report(reply) results.append(reply) http_client.HTTPConnection.debuglevel = 0 return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request(self, url, *args, **kwargs):\n raise NotImplementedError", "def send(self, request: Request, **requests_kwargs) -> Response:", "def call(self):\n\n self.url = self._prepare_url()\n status_code, response = self._do_request(self.url)\n return self._process_resp...
[ "0.7022592", "0.69352275", "0.69292426", "0.6814427", "0.6778195", "0.6773092", "0.67646056", "0.672339", "0.6689945", "0.667453", "0.667453", "0.66193897", "0.65896803", "0.6585336", "0.65369105", "0.65295655", "0.6499505", "0.6499505", "0.6468787", "0.64431155", "0.642135",...
0.5976503
75
make all managed instances plus the starter itself crash.
def crash_instances(self): try: if self.instance.status() == psutil.STATUS_RUNNING or self.instance.status() == psutil.STATUS_SLEEPING: print("generating coredump for " + str(self.instance)) gcore = psutil.Popen(["gcore", str(self.instance.pid)], cwd=self.basedir) print("launched GCORE with PID:" + str(gcore.pid)) gcore.wait() self.kill_instance() else: print("NOT generating coredump for " + str(self.instance)) except psutil.NoSuchProcess: logging.info("instance already dead: " + str(self.instance)) for instance in self.all_instances: instance.crash_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n cause_a_bunch_of_exceptions_to_happen()", "def detect_fatal_errors(self):\n for instance in self.all_instances:\n instance.detect_fatal_errors()", "def test_too_many_cores(self):\n compute1 = self.start_service('compute', host='host1')\n compute2 = self.start_se...
[ "0.6183212", "0.6157061", "0.5949658", "0.5940772", "0.58803564", "0.5872182", "0.58227324", "0.57449096", "0.573384", "0.5643141", "0.5623912", "0.5593848", "0.5592989", "0.559106", "0.55775046", "0.5566891", "0.5562831", "0.555892", "0.55581", "0.5557331", "0.5549696", "0...
0.70966077
0
check whether this is still running
def is_instance_running(self): try: self.instance.wait(timeout=1) except psutil.TimeoutExpired: pass return self.instance.is_running()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_running(self):\n\t\treturn self in _running", "def running(self):\n return not self._kill_event.is_set()", "def is_running(self):\n\t\treturn self._running", "def check_finish(self):\r\n return not self.proc.is_alive()", "def is_running(self) -> bool:\r\n return self.__running",...
[ "0.81097966", "0.79656595", "0.7808091", "0.7757858", "0.77343863", "0.77087873", "0.76777333", "0.76591086", "0.7655416", "0.76325166", "0.76325166", "0.76325166", "0.76198006", "0.76070553", "0.7606519", "0.75803435", "0.75677806", "0.7559066", "0.75572735", "0.7554629", "0...
0.7342232
42
wait for our instance to create a logfile
def wait_for_logfile(self): counter = 0 keep_going = True logging.info("Looking for log file.\n") while keep_going: self.check_that_instance_is_alive() if counter == 20: raise Exception("logfile did not appear: " + str(self.log_file)) counter += 1 logging.info("counter = " + str(counter)) if self.log_file.exists(): logging.info("Found: " + str(self.log_file) + "\n") keep_going = False time.sleep(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_log_file_created(self, mock_parsing_handler, mock_api_handler, mock_progress):\n\n directory = path.join(path_to_module, \"fake_ngs_data\")\n directory_status = DirectoryStatus(directory)\n log_file = path.join(directory, \"irida-uploader.log\")\n # Check that log file does not...
[ "0.6342506", "0.6339533", "0.632533", "0.62805986", "0.61698097", "0.6142417", "0.60563993", "0.5989396", "0.58973587", "0.58555603", "0.5847372", "0.5842015", "0.5833766", "0.58280855", "0.58130676", "0.5812685", "0.5794679", "0.57772213", "0.5774569", "0.57051474", "0.56672...
0.7679139
0
wait for our instance to bind its TCPports
def wait_for_port_bind(self): if self.starter_port is not None: count = 0 while count < 10: for socket in self.instance.connections(): if socket.status == "LISTEN" and socket.laddr.port == self.starter_port: print("socket found!") return count += 1 time.sleep(1) raise Exception(f"starter didn't bind {self.starter_port} on time!") print("dont know port")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_open_ports(self, instance_name=\"\"):\n ports = None\n if instance_name in wellknownports:\n ports = wellknownports[instance_name]\n else:\n elements = self.systemd_name.split(\"@\")\n if elements[0] in wellknownports:\n ports = well...
[ "0.7288536", "0.70136964", "0.699281", "0.669659", "0.663772", "0.66184366", "0.6514401", "0.6453551", "0.6413905", "0.63941497", "0.63894254", "0.6319579", "0.6308585", "0.6289045", "0.6239562", "0.6228363", "0.6223435", "0.62091696", "0.62067133", "0.6188757", "0.6175939", ...
0.8042169
0
in single server mode the 'upgrade' commander exits before the actual upgrade is finished. Hence we need to look into the logfile of the managing starter if it thinks its finished.
def wait_for_upgrade_done_in_log(self, timeout=120): keep_going = True logging.info('Looking for "Upgrading done" in the log file.\n') while keep_going: text = self.get_log_file() pos = text.find("Upgrading done.") keep_going = pos == -1 if keep_going: time.sleep(1) progress(".") timeout -= 1 if timeout <= 0: raise TimeoutError("upgrade of leader follower not found on time") for instance in self.all_instances: instance.wait_for_shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_upgrade(self):\n args = [\n self.cfg.bin_dir / \"arangodb\",\n \"upgrade\",\n \"--starter.endpoint\",\n self.get_http_protocol() + \"://127.0.0.1:\" + str(self.get_my_port()),\n ]\n logging.info(\"StarterManager: Commanding upgrade:\")\n ...
[ "0.69765425", "0.66025215", "0.64390016", "0.5890137", "0.5890137", "0.5887266", "0.5863103", "0.5858503", "0.57888055", "0.5738239", "0.572192", "0.5695196", "0.5680773", "0.5659436", "0.5635215", "0.5601957", "0.5586011", "0.5567595", "0.5544726", "0.5493787", "0.5493098", ...
0.53627825
28
check whether all spawned arangods are fully bootet
def is_instance_up(self): logging.debug("checking if starter instance booted: " + str(self.basedir)) if not self.instance.is_running(): message = "Starter Instance {0.name} is gone!".format(self) logging.error(message) raise Exception(message) # if the logfile contains up and running we are fine lfs = self.get_log_file() regx = re.compile(r"(\w*) up and running ") for line in lfs.splitlines(): match = regx.search(line) if match: groups = match.groups() if len(groups) == 1 and groups[0] == "agent": continue return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS R...
[ "0.6885012", "0.6798123", "0.6798123", "0.67851114", "0.6722148", "0.6583287", "0.6581712", "0.63905674", "0.62930745", "0.6158512", "0.61108637", "0.6062468", "0.6051119", "0.59692967", "0.59578604", "0.5939921", "0.59331906", "0.5890832", "0.5845868", "0.5824541", "0.580896...
0.55383885
49
terminate the instance of this starter (it should kill all its managed services)
def terminate_instance(self, keep_instances=False): lh.subsubsection("terminating instances for: " + str(self.name)) logging.info( "StarterManager: Terminating starter instance: %s", str(self.default_starter_args + self.arguments) ) logging.info("This should terminate all child processes") self.instance.terminate() logging.info("StarterManager: waiting for process to exit") exit_code = self.instance.wait() self.add_logfile_to_report() # workaround BTS-815: starter exits 15 on the wintendo: if IS_WINDOWS and exit_code == 15: exit_code = 0 if exit_code != 0: raise Exception("Starter %s exited with %d" % (self.basedir, exit_code)) old_log = self.basedir / "arangodb.log.old" logging.info( "StarterManager: done - moving logfile from %s to %s", str(self.log_file), str(old_log), ) if old_log.exists(): old_log.unlink() self.log_file.rename(old_log) for instance in self.all_instances: instance.rename_logfile() if not instance.detect_gone(): print("Manually terminating instance!") instance.terminate_instance(False) if keep_instances: for i in self.all_instances: i.pid = None i.ppid = None return False # Clear instances as they have been stopped and the logfiles # have been moved. ret = False for instance in self.all_instances: print("u" * 80) if instance.search_for_warnings(True): ret = True self.is_leader = False self.all_instances = [] return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n ...
[ "0.7309345", "0.71649116", "0.7040128", "0.6952882", "0.6948429", "0.69379115", "0.69061065", "0.68995476", "0.6895416", "0.6874266", "0.6873975", "0.6823828", "0.68103665", "0.6805319", "0.6787299", "0.6752569", "0.6720022", "0.66706365", "0.6654536", "0.6638244", "0.6632896...
0.6931023
6
kill the instance of this starter (it won't kill its managed services)
def kill_instance(self): logging.info("StarterManager: Killing: %s", str(self.default_starter_args + self.arguments)) self.instance.kill() try: logging.info(str(self.instance.wait(timeout=45))) self.add_logfile_to_report() except Exception as ex: raise Exception("Failed to KILL the starter instance? " + repr(self)) from ex logging.info("StarterManager: Instance now dead.") self.instance = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill(self):\n # Prevent a weird behavior: when STOPPED and kill() is called, app crashes (FIXME)\n if self.__state is not ServiceState.STOPPED:\n os.kill(int(self.__properties['MainPID']), signal.SIGKILL)\n # Not nice but simple and currently working (FIXME)\n # TODO: Cha...
[ "0.7303505", "0.7108762", "0.70873225", "0.70490146", "0.69961184", "0.6964912", "0.6952806", "0.69508827", "0.6916327", "0.688939", "0.686323", "0.681936", "0.6806005", "0.6758173", "0.6745181", "0.6741397", "0.6677145", "0.6674688", "0.6668985", "0.6658009", "0.6651729", ...
0.7906417
0
replace the parts of the installation with information after an upgrade kill the starter processes of the old version revalidate that the old arangods are still running and alive replace the starter binary with a new one. this has not yet spawned any children
def replace_binary_for_upgrade(self, new_install_cfg, relaunch=True): # On windows the install prefix may change, # since we can't overwrite open files: old_version = self.cfg.version self.default_starter_args = new_install_cfg.default_starter_args.copy() self.enterprise = new_install_cfg.enterprise self.replace_binary_setup_for_upgrade(new_install_cfg) with step("kill the starter processes of the old version"): logging.info("StarterManager: Killing my instance [%s]", str(self.instance.pid)) self.kill_instance() with step("revalidate that the old arangods are still running and alive"): self.detect_instance_pids_still_alive() if relaunch: with step("replace the starter binary with a new one," + " this has not yet spawned any children"): self.respawn_instance(new_install_cfg.version) logging.info("StarterManager: respawned instance as [%s]", str(self.instance.pid)) self.arangosh = None self.detect_arangosh_instances(new_install_cfg, old_version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # ...
[ "0.63134474", "0.5887858", "0.5887858", "0.58364576", "0.57861626", "0.57441574", "0.57368964", "0.57141185", "0.57095736", "0.56810385", "0.56625885", "0.56388724", "0.5615103", "0.5612521", "0.5526397", "0.55071336", "0.5483531", "0.5466706", "0.54461366", "0.5432789", "0.5...
0.7012954
0
kill specific instances of this starter (it won't kill starter itself)
def kill_specific_instance(self, which_instances): for instance_type in which_instances: for instance in self.all_instances: if instance.instance_type == instance_type: instance.terminate_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_instance(self):\n logging.info(\"StarterManager: Killing: %s\", str(self.default_starter_args + self.arguments))\n self.instance.kill()\n try:\n logging.info(str(self.instance.wait(timeout=45)))\n self.add_logfile_to_report()\n except Exception as ex:\n ...
[ "0.70153147", "0.68872017", "0.68046296", "0.6784485", "0.6766652", "0.6715903", "0.6670214", "0.660583", "0.660212", "0.6543471", "0.65000117", "0.65000117", "0.64805627", "0.6475557", "0.6455918", "0.6429913", "0.6328956", "0.6207044", "0.6205789", "0.62014425", "0.6166641"...
0.7285985
0
launch the instances of this starter with optional arguments
def manually_launch_instances(self, which_instances, moreargs, waitpid=True, kill_instance=False): for instance_type in which_instances: for instance in self.all_instances: if instance.instance_type == instance_type: if kill_instance: instance.kill_instance() instance.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, moreargs, waitpid, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(self, **kwargs) -> None:\n ...", "def start( *args, **kwargs ):", "def launch(self):", "def run(self, args):\n\n return", "def run(self, args):\n pass", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def main(cls, *args, **kwargs):\n a...
[ "0.7010809", "0.68640345", "0.68409586", "0.678276", "0.6754254", "0.6719668", "0.6687372", "0.66585034", "0.66585034", "0.66445416", "0.66290414", "0.6617419", "0.6617419", "0.6592643", "0.65777135", "0.65322596", "0.6527857", "0.65144086", "0.6507485", "0.64961606", "0.6448...
0.0
-1
launch the instances of this starter with optional arguments
def manually_launch_instances_for_upgrade(self, which_instances, moreargs, waitpid=True, kill_instance=False): for instance_type in which_instances: for i in self.all_instances: if i.instance_type == instance_type: if kill_instance: i.kill_instance() i.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, moreargs, waitpid, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(self, **kwargs) -> None:\n ...", "def start( *args, **kwargs ):", "def launch(self):", "def run(self, args):\n\n return", "def run(self, args):\n pass", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def main(cls, *args, **kwargs):\n a...
[ "0.7010809", "0.68640345", "0.68409586", "0.678276", "0.6754254", "0.6719668", "0.6687372", "0.66585034", "0.66585034", "0.66445416", "0.66290414", "0.6617419", "0.6617419", "0.6592643", "0.65777135", "0.65322596", "0.6527857", "0.65144086", "0.6507485", "0.64961606", "0.6448...
0.0
-1
kill, launch the instances of this starter with optional arguments and restart
def upgrade_instances(self, which_instances, moreargs, waitpid=True, force_kill_fatal=True): for instance_type in which_instances: for i in self.all_instances: if i.instance_type == instance_type: i.terminate_instance() i.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, moreargs, True, ) i.launch_manual_from_instance_control_file( self.cfg.sbin_dir, self.old_install_prefix, self.cfg.install_prefix, self.cfg.version, self.enterprise, [], False, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop_and_restart():\n U.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "def stop_and_restart():\n updater.stop()\n os.execl(sys.executable, sys.executable, *sys.argv)", "async def kill(self, restart: bool = False) -> None:\n pass", "def stop_and_restart():\n updater.stop...
[ "0.69379675", "0.66118777", "0.659606", "0.6544262", "0.6499166", "0.6498725", "0.64666843", "0.6448177", "0.64149165", "0.6343142", "0.6260202", "0.6211733", "0.6203442", "0.614886", "0.6116213", "0.6090153", "0.6088133", "0.60815567", "0.60769475", "0.6035262", "0.6033425",...
0.5673631
47
Terminate arangod(s). Let the starter restart them.
def restart_arangods(self): for instance in self.all_instances: instance.kill_instance() instance.rename_logfile() self.detect_instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate():\n dislin.disfin()", "def terminate():\n sys.exit()", "def terminate(self):", "def terminate_all(self):\n self._stop_all('terminate')", "async def terminate(self, restart=False) -> None:\n pass", "def terminate(self) -> None:\n self.robot.terminate_all()", "de...
[ "0.71201587", "0.6880036", "0.6720558", "0.6588962", "0.64856535", "0.63402265", "0.63243175", "0.62685204", "0.62685204", "0.6242662", "0.62372035", "0.61764723", "0.61611766", "0.61569375", "0.6126655", "0.61082095", "0.60830575", "0.6070568", "0.60174894", "0.6013085", "0....
0.63488686
5
replace the parts of the installation with information after an upgrade
def replace_binary_setup_for_upgrade(self, new_install_cfg): # On windows the install prefix may change, # since we can't overwrite open files: self.cfg.set_directories(new_install_cfg) if self.cfg.hot_backup_supported: self.hotbackup_args = [ "--all.rclone.executable", self.cfg.real_sbin_dir / "rclone-arangodb", ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):", "def upgrade(self):", "def updates_check(self,request):\n\t\tp0 = subprocess.Popen(['LC_ALL=C apt-get update'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\t\t(stdout,stderr) = p0.communicate()\n\n\t\tp1 = subprocess.Popen(['LC_ALL=C apt-get -u dist-upgrade -s'], stdout=s...
[ "0.68529916", "0.68529916", "0.6102295", "0.6096292", "0.60911036", "0.60778755", "0.60303754", "0.59639597", "0.59076554", "0.59010935", "0.58665204", "0.5826871", "0.57834613", "0.57735384", "0.5736566", "0.572258", "0.5700653", "0.56856245", "0.568505", "0.56655", "0.56608...
0.5961917
8
kill all arangosync instances we posses
def kill_sync_processes(self, force, rev): for i in self.all_instances: if i.is_sync_instance(): if not force and i.pid_file is not None and rev >= semver.VersionInfo.parse("0.15.0"): print("Skipping manual kill") return logging.info("manually killing syncer: " + str(i.pid)) i.terminate_instance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kill_all():\n compose_kill_all()", "def kill_all(self):\n self._stop_all('kill')", "def killAll(controller=False):", "def restart_arangods(self):\n for instance in self.all_instances:\n instance.kill_instance()\n instance.rename_logfile()\n self.detect_instan...
[ "0.7347588", "0.71552086", "0.7108252", "0.7059267", "0.6879104", "0.6862512", "0.6845278", "0.6730945", "0.66946965", "0.6599361", "0.65697455", "0.6569324", "0.6562354", "0.65175766", "0.64793795", "0.644303", "0.644055", "0.64267015", "0.6424852", "0.6371221", "0.6370731",...
0.6927068
4
we use a starter, to tell daemon starters to perform the rolling upgrade
def command_upgrade(self): args = [ self.cfg.bin_dir / "arangodb", "upgrade", "--starter.endpoint", self.get_http_protocol() + "://127.0.0.1:" + str(self.get_my_port()), ] logging.info("StarterManager: Commanding upgrade:") lh.log_cmd(" ".join([str(arg) for arg in args])) self.upgradeprocess = psutil.Popen( args, # stdout=subprocess.PIPE, # stdin=subprocess.PIPE, # stderr=subprocess.PIPE, universal_newlines=True, ) print("Upgrade commander has PID:" + str(self.upgradeprocess.pid))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # ...
[ "0.5944614", "0.5928757", "0.57852423", "0.5737201", "0.57358366", "0.57358366", "0.5688907", "0.5653342", "0.5614044", "0.56099707", "0.5584282", "0.55671465", "0.5554055", "0.5543296", "0.5511669", "0.5501744", "0.5501293", "0.54920185", "0.54645705", "0.5462174", "0.543319...
0.5740626
3
wait for the upgrade commanding starter to finish
def wait_for_upgrade(self, timeout=60): ret = None try: ret = self.upgradeprocess.wait(timeout=timeout) except psutil.TimeoutExpired as timeout_ex: msg = "StarterManager: Upgrade command [%s] didn't finish in time: %d" % ( str(self.basedir), timeout, ) raise TimeoutError(msg) from timeout_ex logging.info( "StarterManager: Upgrade command [%s] exited: %s", str(self.basedir), str(ret), ) if ret != 0: raise Exception("Upgrade process exited with non-zero reply")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wait_for_upgrade(self):\n self.run_test_suites(self.wait_for_upgrade_test_suite_list)", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def test_do_upgrade(self):\n with self.with_config_update():\n result ...
[ "0.77359796", "0.7126432", "0.7126432", "0.7126432", "0.7126432", "0.6678276", "0.66306156", "0.6630208", "0.66240835", "0.64890206", "0.6419722", "0.640707", "0.640707", "0.64049435", "0.63792473", "0.6337039", "0.63248074", "0.62579197", "0.6122265", "0.6120902", "0.6113800...
0.73207533
1
tries to wait for the server to restart after the 'restore' command
def wait_for_restore(self): for node in self.all_instances: if node.instance_type in [ InstanceType.RESILIENT_SINGLE, InstanceType.SINGLE, InstanceType.DBSERVER, ]: node.detect_restore_restart()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")", "def continue_server():\n update_server_status({'ready': True})", "async def async_restore(self):\n await self._client.restore()\n self.async_write_ha_s...
[ "0.69105613", "0.6523955", "0.63679016", "0.62714815", "0.6169232", "0.6149965", "0.61382735", "0.61187077", "0.61115396", "0.61071575", "0.606816", "0.6046619", "0.6045591", "0.6022971", "0.6000961", "0.59798926", "0.59677154", "0.5965609", "0.59406596", "0.5929384", "0.5921...
0.7891546
0
tries to wait for the server to restart after the 'restore' command
def tcp_ping_nodes(self, timeout=20.0): for node in self.all_instances: if node.instance_type in [ InstanceType.RESILIENT_SINGLE, InstanceType.SINGLE, InstanceType.DBSERVER, ]: node.check_version_request(timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait_for_restore(self):\n for node in self.all_instances:\n if node.instance_type in [\n InstanceType.RESILIENT_SINGLE,\n InstanceType.SINGLE,\n InstanceType.DBSERVER,\n ]:\n node.detect_restore_restart()", "def finished...
[ "0.7891546", "0.69105613", "0.6523955", "0.63679016", "0.62714815", "0.6169232", "0.6149965", "0.61382735", "0.61187077", "0.61115396", "0.61071575", "0.606816", "0.6046619", "0.6045591", "0.6022971", "0.6000961", "0.59798926", "0.59677154", "0.5965609", "0.59406596", "0.5929...
0.0
-1
restart the starter instance after we killed it eventually, maybe command manual upgrade (and wait for exit)
def respawn_instance(self, version, moreargs=None, wait_for_logfile=True): assert version is not None self.cfg.version = version args = [self.cfg.bin_dir / "arangodb"] + self.hotbackup_args + self.default_starter_args + self.arguments if moreargs is not None: args.extend(moreargs) logging.info("StarterManager: respawning instance %s", str(args)) self.instance = psutil.Popen(args) self.pid = self.instance.pid self.ppid = self.instance.ppid() print("respawned with PID:" + str(self.instance.pid)) if wait_for_logfile: self.wait_for_logfile() self.wait_for_port_bind() else: print("Waiting for starter to exit") print("Starter exited %d" % self.instance.wait())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()", "def restart():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('fig -f prod.yml stop')\n run('fig -f prod.yml up -d')", "def _restart(self):\n pass", "def stop_and_restart():\n up...
[ "0.762096", "0.7359066", "0.7356461", "0.7342839", "0.7295235", "0.7272434", "0.7151316", "0.7120452", "0.703074", "0.7012595", "0.6993744", "0.69693404", "0.6928476", "0.6927464", "0.692301", "0.6919504", "0.6894615", "0.6894615", "0.68921846", "0.68630123", "0.6854615", "...
0.0
-1
wait for the SUT reply with a 200 to /_api/version
def wait_for_version_reply(self): frontends = self.get_frontends() for frontend in frontends: # we abuse this function: while frontend.get_afo_state() != AfoServerState.LEADER: progress(".") time.sleep(0.1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gets_to_version_page(self):\n\n response = self.client.get('/version')\n\n self.assertEqual(response.status_code, 200)", "def test_server_details_ok(self):\n response = self.call_api('server_details', {}, 200).json\n self.assertEqual(utils.get_app_version(), response['server_version'...
[ "0.68522596", "0.6699962", "0.6616843", "0.66101503", "0.6607136", "0.6591218", "0.65614545", "0.6492159", "0.6463067", "0.6431189", "0.639559", "0.6369794", "0.62930536", "0.6274485", "0.6269045", "0.62181973", "0.6199127", "0.6190027", "0.6177709", "0.6155891", "0.6152194",...
0.6028883
23
use arangosh to run a command on the frontend arangod
def execute_frontend(self, cmd, verbose=True): return self.arangosh.run_command(cmd, verbose)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_command(self, args):\n pass", "def command():\n pass", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():...
[ "0.6615906", "0.6522063", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "0.6375979", "...
0.79413956
0
get the port of the arangod which is coordinator etc.
def get_frontend_port(self): if self.frontend_port: return self.frontend_port return self.get_frontend().port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_port(self):\n return self.__port", "def get_port(self):\n return self.port", "def port(self) -> int:", "def get_port(self):\n return self.__port", "def port():", "def port(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n ...
[ "0.78841996", "0.78097165", "0.7728278", "0.7653651", "0.7631629", "0.7624739", "0.7596705", "0.750416", "0.7466592", "0.74258894", "0.74075156", "0.7370084", "0.7345154", "0.7341591", "0.7341591", "0.7341591", "0.7341591", "0.7341591", "0.7341591", "0.7284412", "0.7284412", ...
0.6660332
89
find out my frontend port
def get_my_port(self): if self.starter_port is not None: return self.starter_port where = -1 tries = 10 while where == -1 and tries: tries -= 1 lfcontent = self.get_log_file() where = lfcontent.find("ArangoDB Starter listening on") if where != -1: where = lfcontent.find(":", where) if where != -1: end = lfcontent.find(" ", where) port = lfcontent[where + 1 : end] self.starter_port = port assert int(port), "port cannot be converted to int!" return port logging.info("retrying logfile") time.sleep(1) message = "could not get port form: " + self.log_file logging.error(message) raise Exception(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_frontend_port(self):\n if self.frontend_port:\n return self.frontend_port\n return self.get_frontend().port", "def port():", "def port(self) -> int:", "def port(self):\r\n _, port = self.server_address\r\n return port", "def get_port():\n return int(os.gete...
[ "0.77191985", "0.76183486", "0.7364856", "0.7049408", "0.70435005", "0.7038148", "0.69965345", "0.6989152", "0.6982898", "0.68437135", "0.6827302", "0.6827302", "0.6827302", "0.6827302", "0.6827302", "0.68235147", "0.6803366", "0.67924094", "0.678412", "0.6752687", "0.672235"...
0.0
-1
get the port of a syncmaster arangosync
def get_sync_master_port(self): self.sync_master_port = None pos = None sm_port_text = "Starting syncmaster on port" sw_text = "syncworker up and running" worker_count = 0 logging.info("detecting sync master port") while worker_count < 3 and self.is_instance_running(): progress("%") lfs = self.get_log_file() npos = lfs.find(sw_text, pos) if npos >= 0: worker_count += 1 pos = npos + len(sw_text) else: time.sleep(1) lfs = self.get_log_file() pos = lfs.find(sm_port_text) pos = lfs.find(sm_port_text, pos + len(sm_port_text)) pos = lfs.find(sm_port_text, pos + len(sm_port_text)) if pos >= 0: pos = pos + len(sm_port_text) + 1 self.sync_master_port = int(lfs[pos : pos + 4]) return self.sync_master_port
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def master_port(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"master_port\")", "def masterPort(self):\r\n return self._masterPort", "def port(self) -> int:", "def get_slave_port():\n ...
[ "0.70510185", "0.70510185", "0.70024353", "0.6852463", "0.68338513", "0.679988", "0.6775076", "0.6759334", "0.66923726", "0.6612166", "0.6540162", "0.6490325", "0.64330757", "0.6365352", "0.63405347", "0.6295224", "0.61943614", "0.6190745", "0.6190299", "0.6183827", "0.617589...
0.7340527
0
fetch the logfile of this starter
def get_log_file(self): return self.log_file.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def getLogs():", "def getLogs():", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\r\n return LOG.getLog...
[ "0.75987124", "0.734955", "0.734955", "0.69272965", "0.6812943", "0.67958313", "0.6760511", "0.6727943", "0.67024195", "0.6673078", "0.6650076", "0.6613002", "0.66086835", "0.6594247", "0.6592443", "0.6579317", "0.6579317", "0.65463054", "0.6543485", "0.6533908", "0.6504604",...
0.63419765
31
get the logfile of the dbserver instance
def read_db_logfile(self): server = self.get_dbserver() assert server.logfile.exists(), "don't have logfile?" return server.logfile.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def log_db():\n return pymongo.MongoClient(SCITRAN_PERSISTENT_DB_LOG_URI).get_database()", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf...
[ "0.7550862", "0.7224536", "0.6880102", "0.6592017", "0.6583927", "0.6482713", "0.6482713", "0.6469686", "0.6463902", "0.63621795", "0.63040406", "0.6289661", "0.6283953", "0.6207564", "0.6207564", "0.6109618", "0.6103252", "0.6098555", "0.6073537", "0.60647166", "0.6054933", ...
0.7439977
1
get the agent logfile of this instance
def read_agent_logfile(self): server = self.get_agent() assert server.logfile.exists(), "don't have logfile?" return server.logfile.read_text(errors="backslashreplace")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logfile(self):\n return self._get('logfile')", "def get_log(self):\n\n open_lf = open(self.logfile, 'r')\n log_str = open_lf.read()\n sys.stdout.write(log_str)\n\n return log_str", "def getLogFile(self):\r\n return LOG.getLogFile().name", "def getLog(self):\n ...
[ "0.79751503", "0.73155695", "0.72758114", "0.72477007", "0.72477007", "0.70513636", "0.6913755", "0.68768865", "0.68585616", "0.6792453", "0.6792453", "0.6751899", "0.6743848", "0.6700573", "0.66865516", "0.66694623", "0.66520137", "0.66206974", "0.66192824", "0.66146636", "0...
0.7742073
1
see which arangods where spawned and inspect their logfiles
def detect_instances(self): lh.subsection("Instance Detection for {0.name}".format(self)) jwt = self.get_jwt_header() self.all_instances = [] logging.debug("waiting for frontend") logfiles = set() # logfiles that can be used for debugging # the more instances we expect to spawn the more patient: tries = 10 * self.expect_instance_count # Wait for forntend to become alive. all_instances_up = False while not all_instances_up and tries: self.all_instances = [] detected_instances = [] sys.stdout.write(".") sys.stdout.flush() for root, dirs, files in os.walk(self.basedir): for onefile in files: # logging.debug("f: " + root + os.path.sep + onefile) if onefile.endswith("log"): logfiles.add(str(Path(root) / onefile)) for name in dirs: # logging.debug("d: " + root + os.path.sep + name) match = None instance_class = None if name.startswith("sync"): match = re.match(r"(syncmaster|syncworker)(\d*)", name) instance_class = SyncInstance else: match = re.match( r"(agent|coordinator|dbserver|resilientsingle|single)(\d*)", name, ) instance_class = ArangodInstance # directory = self.basedir / name if match and len(match.group(2)) > 0: # we may see a `local-slave-*` directory inbetween, # hence we need to choose the current directory not # the starter toplevel dir for this: instance = instance_class( match.group(1), match.group(2), self.cfg.localhost, self.cfg.publicip, Path(root) / name, self.passvoid, self.cfg.ssl, self.cfg.version, self.enterprise, jwt=jwt, ) instance.wait_for_logfile(tries) instance.detect_pid( ppid=self.instance.pid, full_binary_path=self.cfg.real_sbin_dir, offset=0, ) detected_instances.append(instance.instance_type) self.all_instances.append(instance) print(self.expect_instances) detected_instances.sort() print(detected_instances) attach(str(self.expect_instances), "Expected instances") attach(str(detected_instances), "Detected instances") if (self.expect_instances != detected_instances) or (not self.get_frontends()): tries -= 1 time.sleep(5) else: all_instances_up = True if not self.get_frontends(): print() logging.error("STARTER FAILED TO SPAWN ARANGOD") self.show_all_instances() logging.error("can not continue without frontend instance") logging.error("please check logs in" + str(self.basedir)) for logf in logfiles: logging.debug(logf) message = "if that does not help try to delete: " + str(self.basedir) logging.error(message) raise Exception(message) self.show_all_instances()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLogs():", "def getLogs():", "def getArchLogs(self):\n\n # Implement checkFiles() for archs?\n\n # Pull log file\n if self.nbDetails['proc']['archLog'] is not None:\n result = self.c.get(self.nbDetails['proc']['archLog'])\n print(f\"Pulled archive creation log {result.remote} to {r...
[ "0.64035773", "0.64035773", "0.61854243", "0.6003146", "0.57708687", "0.5767324", "0.57531023", "0.57415503", "0.56718665", "0.5575059", "0.5452616", "0.54122627", "0.53984296", "0.53840137", "0.5376799", "0.5361503", "0.53584874", "0.53567886", "0.53554016", "0.53292704", "0...
0.5462737
10
detect the arangod instance PIDs
def detect_instance_pids(self): for instance in self.all_instances: instance.detect_pid( ppid=self.instance.pid, full_binary_path=self.cfg.real_sbin_dir, offset=0, ) self.show_all_instances() self.detect_arangosh_instances(self.cfg, self.cfg.version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_instances(self):\n lh.subsection(\"Instance Detection for {0.name}\".format(self))\n jwt = self.get_jwt_header()\n self.all_instances = []\n logging.debug(\"waiting for frontend\")\n logfiles = set() # logfiles that can be used for debugging\n\n # the more inst...
[ "0.6319383", "0.61950517", "0.6192594", "0.61528647", "0.6111595", "0.6052284", "0.605194", "0.59694993", "0.59247625", "0.5862299", "0.5763337", "0.5762358", "0.5724921", "0.5709404", "0.5638399", "0.56117487", "0.5553753", "0.55345756", "0.54887575", "0.54887056", "0.545983...
0.77291846
0
scan all instances for `FATAL` statements
def detect_fatal_errors(self): for instance in self.all_instances: instance.detect_fatal_errors()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors_fatal(self) -> List[Error]:", "def fatal_error_processor(self):\n while True:\n _ = (yield)\n self.failed = True\n self.converged = False\n self.solve_completed = False", "def getFatalErrors(self):\n global hadFatalErrors\n if hadFatal...
[ "0.61938083", "0.6015998", "0.59813035", "0.5981033", "0.5679613", "0.55990446", "0.550483", "0.54907763", "0.5387474", "0.5358392", "0.5331162", "0.53206944", "0.5311066", "0.5309457", "0.5303004", "0.5290556", "0.5203423", "0.5195811", "0.51785743", "0.517628", "0.51631856"...
0.69902325
0
gets the arangosh instance to speak to the frontend of this starter
def detect_arangosh_instances(self, config, old_version): if self.arangosh is None: config.port = self.get_frontend_port() config.passvoid = self.passvoid self.arangosh = ArangoshExecutor(config, self.get_frontend(), old_version) self.arango_importer = ArangoImportExecutor(config, self.get_frontend()) self.arango_restore = ArangoRestoreExecutor(config, self.get_frontend()) if config.hot_backup_supported: self.hb_instance = HotBackupManager( config, self.raw_basedir, config.base_test_dir / self.raw_basedir, self.get_frontend(), ) self.hb_config = HotBackupConfig( config, self.raw_basedir, config.base_test_dir / self.raw_basedir, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(approot, instance):\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()",...
[ "0.5918849", "0.5901219", "0.57980865", "0.5683462", "0.55928767", "0.55773854", "0.55701894", "0.5559227", "0.54972243", "0.53930515", "0.5382782", "0.5344606", "0.5341296", "0.53286755", "0.5324452", "0.5287806", "0.52836734", "0.52671164", "0.52298224", "0.52198905", "0.52...
0.53324765
13
launch an arangobench instance to the frontend of this starter
def launch_arangobench(self, testacse_no, moreopts=None): arangobench = ArangoBenchManager(self.cfg, self.get_frontend()) arangobench.launch(testacse_no, moreopts) return arangobench
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n tng.api.runner()", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def launch_test():\n import sys\n from kothrak.envs.KothrakEnv import KothrakEnv\n from kothrak.envs.game.MyApp import style\n from PyQt5.QtWidgets import QApplication, QWidget\n\n q...
[ "0.6429091", "0.6309221", "0.62545246", "0.61604047", "0.6152724", "0.60954607", "0.6065459", "0.6057383", "0.60311", "0.59965384", "0.59894437", "0.5950306", "0.58948225", "0.5882779", "0.5840209", "0.5838446", "0.5817836", "0.58095634", "0.5783761", "0.5774965", "0.57688946...
0.78506184
0
detecting whether the processes the starter spawned are still there
def detect_instance_pids_still_alive(self): missing_instances = [] running_pids = psutil.pids() for instance in self.all_instances: if instance.pid not in running_pids: missing_instances.append(instance) if len(missing_instances) > 0: logging.error( "Not all instances are alive. The following are not running: %s", str(missing_instances), ) logging.error(get_process_tree()) raise Exception("instances missing: " + str(missing_instances)) instances_table = get_instances_table(self.get_instance_essentials()) logging.info("All arangod instances still running: \n%s", str(instances_table)) attach_table(instances_table, "Instances table")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_started(self):\n return bool(self._processes)", "def check_parent_processes_alive():\n cur_process = psutil.Process()\n parent = cur_process.parent()\n while True:\n time.sleep(1)\n if not parent.is_running():\n break\n\n logger.warni...
[ "0.70939624", "0.70884717", "0.7061167", "0.7040123", "0.6903951", "0.68766594", "0.6748461", "0.66941243", "0.6645801", "0.6616109", "0.6574495", "0.6571236", "0.6525034", "0.64792585", "0.6474497", "0.6439188", "0.64198804", "0.6415135", "0.64056075", "0.63858753", "0.63682...
0.6200317
37
enables / disables maintainance mode
def maintainance(self, on_off, instance_type): print(("enabling" if on_off else "disabling") + " Maintainer mode") tries = 60 while True: reply = self.send_request( instance_type, requests.put, "/_admin/cluster/maintenance", '"on"' if on_off else '"off"', ) if len(reply) > 0: print("Reply: " + str(reply[0].text)) if reply[0].status_code == 200: return print(f"Reply status code is {reply[0].status_code}. Sleeping for 3 s.") time.sleep(3) tries -= 1 else: print("Reply is empty. Sleeping for 3 s.") time.sleep(3) tries -= 1 if tries <= 0: action = "enable" if on_off else "disable" raise Exception(f"Couldn't {action} maintainance mode!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_enable_maintence_mode(self):\n pass", "def test_enable_maintence_mode1(self):\n pass", "def maintenance_mode():\n pass", "def check_manual_mode_change(self, event):\n if self.vehicle.get_manual_mode_change(reset=True):\n data = lambda: None\n data.mode_t...
[ "0.7372797", "0.73507535", "0.67397463", "0.6203601", "0.6188932", "0.6090768", "0.59693944", "0.5953735", "0.5913658", "0.5828994", "0.5818898", "0.5807375", "0.57376415", "0.5717043", "0.57043606", "0.5703792", "0.56936777", "0.56936777", "0.56642973", "0.5659629", "0.56460...
0.7124232
2
in active failover detect whether we run the leader
def detect_leader(self): # Should this be moved to the AF script? lfs = self.read_db_logfile() became_leader = lfs.find("Became leader in") >= 0 took_over = lfs.find("Successful leadership takeover:" + " All your base are belong to us") >= 0 self.is_leader = became_leader or took_over if self.is_leader: url = self.get_frontend().get_local_url("") reply = requests.get(url, auth=requests.auth.HTTPBasicAuth("root", self.passvoid), timeout=120) print(f"{url} => {str(reply)}") if reply.status_code == 503: self.is_leader = False return self.is_leader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def active_failover_detect_host_now_follower(self):\n self.check_that_instance_is_alive()\n lfs = self.get_log_file()\n if lfs.find(\"resilientsingle up and running as follower\") >= 0:\n self.is_master = False\n return True\n return False", "def probe_leader(sel...
[ "0.73940504", "0.7367778", "0.72519314", "0.724043", "0.7167702", "0.70824057", "0.69250184", "0.69241196", "0.67583", "0.65203124", "0.65203124", "0.6361834", "0.6304819", "0.6293747", "0.6229859", "0.6190928", "0.61597836", "0.604024", "0.60138893", "0.5951151", "0.58495647...
0.7651802
0
talk to the frontends to find out whether its a leader or not.
def probe_leader(self): # Should this be moved to the AF script? self.is_leader = False for instance in self.get_frontends(): if instance.probe_if_is_leader(): self.is_leader = True return self.is_leader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_leader(self):\n # Should this be moved to the AF script?\n lfs = self.read_db_logfile()\n\n became_leader = lfs.find(\"Became leader in\") >= 0\n took_over = lfs.find(\"Successful leadership takeover:\" + \" All your base are belong to us\") >= 0\n self.is_leader = bec...
[ "0.7640331", "0.74470776", "0.74470776", "0.74285924", "0.713586", "0.6762263", "0.6751401", "0.6734137", "0.6681536", "0.65391123", "0.64464325", "0.629198", "0.6276058", "0.62618124", "0.61972916", "0.61910975", "0.6164121", "0.59911203", "0.5944456", "0.5918337", "0.587292...
0.78123844
0
detect hosts for the active failover
def active_failover_detect_hosts(self): self.check_that_instance_is_alive() # this is the way to detect the master starter... lfs = self.get_log_file() if lfs.find("Just became master") >= 0: self.is_master = True else: self.is_master = False regx = re.compile(r"Starting resilientsingle on port (\d*) .*") match = regx.search(lfs) if match is None: raise Exception(timestamp() + "Unable to get my host state! " + self.basedir + " - " + lfs) self.frontend_port = match.groups()[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sniff_hosts(self):\n previous_sniff = self.last_sniff\n hosts = []\n try:\n # reset last_sniff timestamp\n self.last_sniff = time.time()\n try:\n hosts = self.get_es_node_addresses()\n except Exception:\n raise Trans...
[ "0.6949691", "0.6628178", "0.6534761", "0.6527784", "0.64729506", "0.64609885", "0.64203495", "0.64133173", "0.63669676", "0.6323245", "0.6245519", "0.6221742", "0.6206262", "0.62053776", "0.61899376", "0.61697066", "0.61608654", "0.6157352", "0.6157352", "0.61356586", "0.612...
0.8014416
0
detect whether we successfully respawned the instance, and it became a follower
def active_failover_detect_host_now_follower(self): self.check_that_instance_is_alive() lfs = self.get_log_file() if lfs.find("resilientsingle up and running as follower") >= 0: self.is_master = False return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n self.center_x = SCREEN_WIDTH / 2\n self.center_y = 600", "def respawn(self):\n # If we are in the middle of respawning, this is non-zero.\n self.respawning = 1\n ...
[ "0.62327015", "0.61734205", "0.5913024", "0.58508074", "0.58208185", "0.58022654", "0.5687446", "0.56421936", "0.56058353", "0.55823416", "0.55794746", "0.55337363", "0.5507955", "0.5464997", "0.54603153", "0.54300016", "0.5409948", "0.540742", "0.54065454", "0.54039454", "0....
0.6564329
0
dump out instance args, and what could be fishy in my log
def search_for_warnings(self): log = str() print(self.default_starter_args + self.arguments) if not self.log_file.exists(): print(str(self.log_file) + " not there. Skipping search") return print(str(self.log_file)) with self.log_file.open(errors="backslashreplace") as log_f: for line in log_f.readline(): if "WARN" in line or "ERROR" in line: print(line.rstrip()) log += line.rstrip() attach(log, "WARN or ERROR lines from starter log")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug(self, *args, **kwargs):", "def __init__(self, args, logger):\n super().__init__(args, logger)", "def dump(self, args):\n if self.stru:\n self.stru.dump(args)\n if self.index:\n self.index.dump(args)\n if self.bank:\n self.bank.dump(args)\n ...
[ "0.6568053", "0.62609863", "0.6245112", "0.6137647", "0.61293983", "0.6093716", "0.60875756", "0.60433614", "0.5990159", "0.5985708", "0.59716463", "0.59541845", "0.5896174", "0.58789164", "0.58609366", "0.58609366", "0.58546025", "0.5845228", "0.5839964", "0.582042", "0.5813...
0.0
-1
Add starter log to allure report
def add_logfile_to_report(self): logfile = str(self.log_file) attach.file(logfile, "Starter log file", AttachmentType.TEXT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logStarted(build, step, log):", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def write_terraform_apply_log_header(self):\n with open(self.terraform_install_log, 'a+') as logfile:\n logfile...
[ "0.6069437", "0.5960698", "0.59574604", "0.5930421", "0.5885275", "0.5810344", "0.57965165", "0.5733731", "0.5677679", "0.567086", "0.56483895", "0.5602861", "0.5598323", "0.5581187", "0.5545258", "0.5526509", "0.55119", "0.5511117", "0.5473258", "0.5438435", "0.54359597", ...
0.6250697
0
get HTTP protocol for this starter(http/https)
def get_http_protocol(self): if self.cfg.ssl: return "https" else: return "http"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_protocol():\n if https():\n protocol = 'https'\n else:\n protocol = 'http'\n return protocol", "def get_protocol(self):\n if self.ssl:\n return \"https\"\n else:\n return \"http\"", "def protocol(self):\n return 'https' if self.allow_htt...
[ "0.85699964", "0.8152591", "0.7916452", "0.74209046", "0.7311242", "0.7305693", "0.7218571", "0.7194539", "0.70896435", "0.69592017", "0.68593144", "0.68068993", "0.6778132", "0.6735765", "0.67253834", "0.65918314", "0.6591383", "0.6480884", "0.6470097", "0.64394146", "0.6407...
0.8308429
1
Check that starter instance is alive
def check_that_instance_is_alive(self): if not self.instance.is_running(): raise Exception(f"Starter instance is not running. Base directory: {str(self.basedir)}") if self.instance.status() == psutil.STATUS_ZOMBIE: raise Exception(f"Starter instance is a zombie. Base directory: {str(self.basedir)}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_alive(self):\n pass", "def is_alive(self):\n return True", "def is_alive(self):", "def alive(self):\n return True", "def is_instance_up(self):\n logging.debug(\"checking if starter instance booted: \" + str(self.basedir))\n if not self.instance.is_running():\n ...
[ "0.77166504", "0.7615171", "0.74846673", "0.7292565", "0.7082333", "0.69093466", "0.69083273", "0.687419", "0.6822564", "0.66718936", "0.66718936", "0.66681343", "0.6643307", "0.6634902", "0.6621218", "0.6621218", "0.66178095", "0.66134834", "0.6612024", "0.6602468", "0.65610...
0.820519
0
check whether substring is present in the starter log
def check_that_starter_log_contains(self, substring: str): if self.count_occurances_in_starter_log(substring) > 0: return else: raise Exception( f"Expected to find the following string: {substring}\n in this log file:\n{str(self.log_file)}" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_contains(self, s: str) -> bool:\n return len(list(filter(lambda str: s in str, self.logs))) > 0", "def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None", "def _is_substring(s1, s2):\n\treturn s1.find(s2) != -1", "def match_substring(self, str):\n ...
[ "0.68737906", "0.6771097", "0.6496623", "0.6410618", "0.63869303", "0.63129365", "0.61710656", "0.61576027", "0.6072304", "0.6054336", "0.6003412", "0.5982007", "0.596408", "0.576148", "0.5692559", "0.5674016", "0.5639905", "0.563524", "0.56121904", "0.56079257", "0.55621016"...
0.82817847
0
count occurrences of a substring in the starter log
def count_occurances_in_starter_log(self, substring: str): number_of_occurances = self.get_log_file().count(substring) return number_of_occurances
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_substring(string, sub_string):\n return string.count(sub_string)", "def count_sub(sub, s):\n count = 0\n for i in range(len(s) - len(sub) + 1):\n if s[i:i + len(sub)] == sub:\n count += 1\n return count", "def recCountString():\r\n target = raw_input(\"Enter target st...
[ "0.69256353", "0.6731468", "0.6658297", "0.6571659", "0.6564363", "0.6540324", "0.6493924", "0.6485271", "0.64711386", "0.6334691", "0.63335073", "0.6273863", "0.626735", "0.6240192", "0.62162554", "0.6199468", "0.6196827", "0.61422545", "0.6131402", "0.6095801", "0.6036622",...
0.8001489
0
fake run starter method
def run_starter(self, expect_to_fail=False):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startTestRun(self):", "def test_get_run(self):\n pass", "def run(_):\n pass", "def Run():\r\n pass", "def runtest(self):", "def run_experiment():\n pass", "def run():\n main()", "def runTests(self):\n \n pass", "def test_run_started(self):", "def run_test(self...
[ "0.74126184", "0.73602825", "0.7261935", "0.72602695", "0.72600466", "0.7242399", "0.72393954", "0.7110345", "0.71020657", "0.7089417", "0.70837325", "0.7037372", "0.7013169", "0.70076424", "0.6999183", "0.6999183", "0.6967566", "0.6933449", "0.6933449", "0.6933449", "0.69334...
0.8099833
0
Test case for basketballteams_get
def test_basketballteams_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basketballteams_id_get(self):\n pass", "def test_get_teams(self):\n pass", "def test_get_teams(self):\n pass", "def test_teams_get_teams_v2(self):\n pass", "def test_retrieve_team(self):\n pass", "def test_teams_get_teams_v1(self):\n pass", "def test_t...
[ "0.8449335", "0.84178495", "0.84178495", "0.81079006", "0.81024987", "0.7863653", "0.7826981", "0.7781774", "0.7720299", "0.7501402", "0.7489745", "0.7379568", "0.7356525", "0.72275877", "0.7195928", "0.71437454", "0.71002096", "0.70476884", "0.69918925", "0.6990166", "0.6847...
0.93158627
0
Test case for basketballteams_id_get
def test_basketballteams_id_get(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_basketballteams_get(self):\n pass", "def test_workflows_id_team_get(self):\n pass", "def test_gridironfootballplayers_id_get(self):\n pass", "def test_data_source_soaps_id_team_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def test_cyclingleagues...
[ "0.7799063", "0.7685305", "0.7607886", "0.74427456", "0.72060305", "0.6990276", "0.6840412", "0.68341905", "0.68341905", "0.6772549", "0.6731343", "0.67035055", "0.6620671", "0.66105074", "0.6533537", "0.6475667", "0.645218", "0.6435485", "0.63834304", "0.6345525", "0.630249"...
0.939899
0
Initialize the matplotlib figure.
def initialize_plot(self, ranges=None): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_plot(x: float, y: float) -> plt.figure:\n LOG.debug(\"Initializing plot.\")\n plt.ion()\n fig = plt.figure(figsize=(x, y), num=\"GlacierFlowModel\")\n fig.patch.set_facecolor(\"black\")\n return fig", "def init_plot(self):\n self.dpi = 100\n self.fig = ...
[ "0.79452497", "0.79369026", "0.77807647", "0.77807647", "0.7650264", "0.75176746", "0.7469306", "0.7419887", "0.728096", "0.7241806", "0.722278", "0.7206917", "0.71901965", "0.7183423", "0.70679206", "0.704637", "0.7034995", "0.69461167", "0.68985367", "0.6886135", "0.6879203...
0.63938004
46
Update the internal state of the Plot to represent the given key tuple (where integers represent frames). Returns this state.
def update(self, key): return self.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_frame(self, key, ranges=None):", "def _get_frame(self, key):\n layout_frame = self.layout.clone(shared_data=False)\n keyisint = isinstance(key, int)\n if not isinstance(key, tuple): key = (key,)\n nthkey_fn = lambda x: zip(tuple(x.name for x in x.kdims),\n ...
[ "0.6366824", "0.57968193", "0.57502097", "0.5714252", "0.56339014", "0.5459689", "0.54484665", "0.54372066", "0.5365949", "0.5340403", "0.5326292", "0.5320078", "0.5305696", "0.52860016", "0.52631617", "0.52473605", "0.5218694", "0.52184427", "0.5185107", "0.51785284", "0.515...
0.64864296
0
The plotting state that gets updated via the update method and used by the renderer to generate output.
def state(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_plot():\n pass", "def store(self, state):\n if self.interactive:\n self._fig.clear()\n fig = self._fig\n else:\n fig = plt.figure()\n\n self._plot_function(fig, copy_state(state))\n\n fig.canvas.draw()\n if not self.interactive...
[ "0.78376055", "0.7461222", "0.7259287", "0.7013627", "0.69416213", "0.6845786", "0.68316853", "0.6825293", "0.6825293", "0.6825293", "0.6825293", "0.6825293", "0.67640865", "0.67583346", "0.6713076", "0.67130053", "0.666491", "0.66637933", "0.66528654", "0.66066337", "0.65670...
0.57563335
92
Returns the total number of available frames.
def __len__(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_frames(self) -> int:\n return self.num_frames", "def size(self):\n if self.frames is None:\n return 0\n return self.frames.size", "def frames(self):\n frame_count = 0\n if self.is_video() or self.is_audio():\n if self.__dict__['nb_frames']:...
[ "0.8600426", "0.77835464", "0.7621566", "0.75558245", "0.7531837", "0.7526261", "0.7391324", "0.7189588", "0.71559983", "0.70775414", "0.70692587", "0.7036695", "0.7004031", "0.69709444", "0.6951622", "0.688492", "0.68131065", "0.6812733", "0.6789151", "0.66947013", "0.664887...
0.0
-1