query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Set ruleset state sid
def set_state_sid_request(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.patch_state(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sid(self, sid):\n self._sid = sid", "def set_state(self,s):\n self.state = s", "def set_state(self, state: int):", "def __setstate__(self, state):\n\n self.set(DER = state)", "def set_rule(self, rule):\n self.rule.load_state_dict(rule, strict=True)", "def _set_state(self, ...
[ "0.6317392", "0.6268615", "0.62445796", "0.60649145", "0.58590347", "0.5837428", "0.580806", "0.58021194", "0.57980675", "0.5752198", "0.5752198", "0.5744414", "0.57234263", "0.5718662", "0.5679742", "0.5645187", "0.5636659", "0.5628161", "0.5618529", "0.5560293", "0.5513871"...
0.74748975
0
Get ruleset state sid
def get_state_sid_request(ruleset_name, sid): result = host.get_state(ruleset_name, sid) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_id(self):\n return self._state_id", "def get_rule_id(self):\n from .osid_errors import IllegalState\n # Someday I'll have a real implementation, but for now I just:\n raise IllegalState()", "def sid(self):\n return self._sid", "def sid(self):\n return self....
[ "0.6522904", "0.6476181", "0.6398606", "0.6354551", "0.60375524", "0.60295653", "0.60295653", "0.59568083", "0.5888084", "0.58808523", "0.58517295", "0.58414584", "0.58183634", "0.5815065", "0.57778585", "0.5670262", "0.5668822", "0.56557137", "0.56524223", "0.56524223", "0.5...
0.6936951
0
Post events to the ruleset
def post_events(ruleset_name): message = json.loads(request.stream.read().decode('utf-8')) result = host.post(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, event):\n post_event(event, self.baseUrl, self.filterName)", "def _do_rule_processing(self, line, events):\n\n for rule in self.rules:\n match = rule.regexp.search(line)\n if match:\n events.append(Event(self, rule.handler, LogMatch(line, matc...
[ "0.63994485", "0.6042524", "0.6003626", "0.5981115", "0.5941807", "0.5918527", "0.5845204", "0.5819378", "0.58176184", "0.58072335", "0.57101154", "0.5693851", "0.5638689", "0.56246656", "0.55693597", "0.5526446", "0.55139947", "0.54291743", "0.54178923", "0.5412167", "0.5411...
0.6678471
0
Post sid events to the ruleset
def post_sid_events(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.post(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_events(ruleset_name):\n message = json.loads(request.stream.read().decode('utf-8'))\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def set_state_sid_request(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n ...
[ "0.560759", "0.5351545", "0.5286287", "0.5215918", "0.50854534", "0.50759035", "0.5052492", "0.5019985", "0.49917015", "0.4915208", "0.4852344", "0.48465505", "0.48308286", "0.47611645", "0.47459525", "0.47393727", "0.47084105", "0.46966222", "0.46946904", "0.46800652", "0.46...
0.7941506
0
Post factss to the ruleset
def default_facts_request(ruleset_name): message = json.loads(request.stream.read().decode('utf-8')) result = host.assert_fact(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refactor_post(self,post_name):\n for name in list(self.rules):\n related_post = \"{}.post.{}\".format(name,post_name)\n if related_post in self.rules:\n parts = [self.MakeSymbolName(x) for x in [post_name, related_post]]\n self.rules[name] = self.MakeC...
[ "0.59369296", "0.5716443", "0.56727445", "0.56607735", "0.56607735", "0.5658255", "0.55677813", "0.5550459", "0.550259", "0.5496113", "0.5428811", "0.53964937", "0.53963", "0.53746647", "0.534213", "0.53217864", "0.53138274", "0.53119683", "0.5304553", "0.5288513", "0.5285464...
0.0
-1
Post sid facts to the ruleset
def facts_request(ruleset_name, sid): message = json.loads(request.stream.read().decode('utf-8')) message['sid'] = sid result = host.assert_fact(ruleset_name, message) return jsonify(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post_sid_events(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('utf-8'))\n message['sid'] = sid\n result = host.post(ruleset_name, message)\n return jsonify(result)", "def set_state_sid_request(ruleset_name, sid):\n message = json.loads(request.stream.read().decode('ut...
[ "0.681913", "0.601957", "0.6000734", "0.512934", "0.50864774", "0.50735605", "0.5069272", "0.49133524", "0.4802121", "0.47750175", "0.4747155", "0.47424155", "0.46992487", "0.46838996", "0.4672943", "0.4652938", "0.46246898", "0.46226344", "0.46224123", "0.46183434", "0.46069...
0.55748254
3
Convert network's sigmoid output into depth prediction The formula for this conversion is given in the 'additional considerations' section of the paper.
def disp_to_depth(disp): MIN_DEPTH = 0.1#0.1#1e-3#0.1 MAX_DEPTH = 100#100 min_disp = 1 / MAX_DEPTH max_disp = 1 / MIN_DEPTH scaled_disp = min_disp + (max_disp - min_disp) * disp depth = 1 / scaled_disp return scaled_disp, depth
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_depth(self, input_path, output_dir):\n try:\n result = real_predict_depth(input_path, output_dir)\n reset_default_graph()\n return result\n except Exception as e:\n return '!ERROR' + str(e)", "def sigmoid2predictions(output: torch.Tensor) -> t...
[ "0.6572657", "0.65554243", "0.59933084", "0.5872708", "0.57708555", "0.570335", "0.5695602", "0.56596303", "0.5657298", "0.5650019", "0.56448084", "0.56245506", "0.5608685", "0.559613", "0.5580954", "0.5546335", "0.55431604", "0.55408496", "0.5537363", "0.5534928", "0.5526947...
0.0
-1
Computation of error metrics between predicted and ground truth depths
def compute_errors(gt, pred): thresh = np.maximum((gt / pred), (pred / gt)) a1 = (thresh < 1.25 ).mean() a2 = (thresh < 1.25 ** 2).mean() a3 = (thresh < 1.25 ** 3).mean() rmse = (gt - pred) ** 2 rmse = np.sqrt(rmse.mean()) rmse_log = (np.log(gt) - np.log(pred)) ** 2 rmse_log = np.sqrt(rmse_log.mean()) abs_rel = np.mean(np.abs(gt - pred) / gt) sq_rel = np.mean(((gt - pred) ** 2) / gt) return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_error(self, params):\n return self.endog - self.predict(params)", "def error_in_assigned_energy(predictions, ground_truth):\n errors = {}\n both_sets_of_meters = iterate_through_submeters_of_two_metergroups(\n predictions, ground_truth)\n for pred_meter, ground_truth_meter in both_...
[ "0.67500764", "0.6546798", "0.65235454", "0.65183836", "0.6474383", "0.64611065", "0.6404243", "0.6374521", "0.6370064", "0.6316882", "0.6293244", "0.6250774", "0.62246156", "0.62059015", "0.62025636", "0.617991", "0.61761516", "0.6161752", "0.61567384", "0.614145", "0.613276...
0.6518734
3
Evaluates a pretrained model using a specified test set
def evaluate(params,dataloader): MIN_DEPTH = 1e-3 MAX_DEPTH = 80 num_gpus = 1 pred_depth_scale_factor = 1 checkpoint_path = './log_diretory/mono_depth2-102000/model-97060'#'./log_diretory/kitti_resnet_MS2_nbn_1epoch_pose_fix/model-189107' gt_path = './utils/gt/eigen_zhou' eval_stereo = False with tf.Graph().as_default(), tf.device('/cpu:0'): dataloader = MonodepthDataloader(dataloader.data_path, dataloader.filenames_file, params, dataloader.dataset, dataloader.mode) reference = dataloader.reference_image_batch param = dataloader.param_path_batch # split for each gpu reference_splits = tf.split(reference, num_gpus,0) param_splits = tf.split(param,num_gpus,0) reuse_variables = None with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%d' % i) as scope: print(i) model = MonodepthModel(params, dataloader.mode, reference_splits[i],None,None,None,param_splits[i], #param_path=param_path_splits[i], reuse_variables=reuse_variables, model_index=i) config = tf.ConfigProto(allow_soft_placement=True) # allow_soft_placement는 명시된 device없을 때 자동으로 잡아준다. sess = tf.Session(config=config) # Saver train_saver = tf.train.Saver() # Init sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) coordinator = tf.train.Coordinator() ## coordinator=조정자, threads 관리해주는 함수 threads = tf.train.start_queue_runners(sess=sess, coord=coordinator) # Restore print("Restore") if checkpoint_path != '': print('----------------------------------------------') print(checkpoint_path) print('\n') print(checkpoint_path.split(".")[0]) print('----------------------------------------------') train_saver.restore(sess, checkpoint_path) print("Restore OK") with tf.variable_scope(tf.get_variable_scope()): for i in range(num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%d' % i) as scope: bn_updates_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope) num_test_samples = count_text_lines(dataloader.filenames_file) pred_disps = [] print('Start') for step in range(num_test_samples): pred_disp = sess.run(model.disp_reference_est[0]) pred_disp = pred_disp.squeeze() pred_disp,_ = disp_to_depth(pred_disp) # print(pred_disp.shape) # plt.imshow(pred_disp) # plt.show() pred_disp = np.expand_dims(pred_disp,0) pred_disps.append(pred_disp) pred_disps = np.concatenate(pred_disps) print(pred_disps.shape) gt_path = gt_path+ '/gt_depths.npz' gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')["data"] print(gt_depths[0].shape) print("-> Evaluating") disable_median_scaling=False if eval_stereo: print(" Stereo evaluation - " "disabling median scaling, scaling by {}".format(STEREO_SCALE_FACTOR)) disable_median_scaling = True pred_depth_scale_factor = STEREO_SCALE_FACTOR else: print(" Mono evaluation - using median scaling") errors = [] ratios = [] for i in range(pred_disps.shape[0]): gt_depth = gt_depths[i] gt_height, gt_width = gt_depth.shape[:2] pred_disp = pred_disps[i] pred_disp = cv2.resize(pred_disp, (gt_width, gt_height)) pred_depth = 1 / pred_disp print(pred_depth[0,0]) mask = np.logical_and(gt_depth > MIN_DEPTH, gt_depth < MAX_DEPTH) crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height, 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32) crop_mask = np.zeros(mask.shape) crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1 mask = np.logical_and(mask, crop_mask) print(mask) #if i ==pred_disps.shape[0]-3: # plt.imshow(pred_depth / 100) # pred_depth[mask]/100) # plt.show() # plt.imshow(np.where(mask,pred_depth,np.zeros_like(pred_depth))/100)#pred_depth[mask]/100) # plt.show() # plt.imshow(np.where(mask,gt_depth,np.zeros_like(gt_depth))/100) # plt.show() print("pred_depth[mask]", pred_depth[mask]) print("gt_depth[mask]", gt_depth[mask]) pred_depth = pred_depth[mask] gt_depth = gt_depth[mask] pred_depth *= pred_depth_scale_factor if not disable_median_scaling: print('?') ratio = np.median(gt_depth) / np.median(pred_depth) ratios.append(ratio) pred_depth *= ratio pred_depth[pred_depth < MIN_DEPTH] = MIN_DEPTH pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH print("pred_depth={}".format(pred_depth)) print("pred_depth < MIN_DEPTH",pred_depth < MIN_DEPTH) print(" pred_depth[pred_depth < MIN_DEPTH] ", pred_depth[pred_depth < MIN_DEPTH] ) print("pred_depth > MAX_DEPTH",pred_depth > MAX_DEPTH) print("pred_depth[pred_depth > MAX_DEPTH]",pred_depth[pred_depth > MAX_DEPTH]) print("pred_depth_shape={}".format(pred_depth.shape)) print("gt_depth_shape={}".format(gt_depth.shape)) errors.append(compute_errors(gt_depth, pred_depth)) if not disable_median_scaling: ratios = np.array(ratios) med = np.median(ratios) print(" Scaling ratios | med: {:0.3f} | std: {:0.3f}".format(med, np.std(ratios / med))) mean_errors = np.array(errors).mean(0) print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3")) print(("&{: 8.3f} " * 7).format(*mean_errors.tolist()) + "\\\\") print("\n-> Done!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_model(model, testset):\n\n # Sort data by top level label to ease inspection\n testset = testset.sort_using_layer(-1, reverse=True)\n\n # Feed the samples to the model to obtain each layers' activations\n v = testset.get_layer(0)\n hs = model.transform(v)[1:]\n\n # Read model weights...
[ "0.74473107", "0.741568", "0.72438216", "0.71786034", "0.7145973", "0.69723153", "0.68649215", "0.683188", "0.6808273", "0.67838573", "0.67193323", "0.6700262", "0.6699694", "0.6683093", "0.6668421", "0.6642181", "0.66375136", "0.6633465", "0.6606388", "0.6606388", "0.6606388...
0.0
-1
This function populates an instance of DeadlineTab with the UI controls that make up the submission dialog. This tab is instantiated by Katana every time the user selects "Tabs > Thinkbox > Submit to Deadline" from the menu bar in Katana. Essentially, this function serves as a deferred __init__ implementation for the tab class that can be easily updated via the Deadline repository.
def PopulateSubmitter( gui ): global submissionInfo print( "Grabbing submitter info..." ) try: stringSubInfo = CallDeadlineCommand( [ "-prettyJSON", "-GetSubmissionInfo", "Pools", "Groups", "MaxPriority", "UserHomeDir", "RepoDir:submission/Katana/Main", "RepoDir:submission/Integration/Main", ], useDeadlineBg=True ) output = json.loads( stringSubInfo, encoding="utf-8" ) except: print( "Unable to get submitter info from Deadline:\n\n" + traceback.format_exc() ) raise if output[ "ok" ]: submissionInfo = output[ "result" ] else: print( "DeadlineCommand returned a bad result and was unable to grab the submitter info.\n\n" + output[ "result" ] ) raise ValueError( output[ "result" ] ) # Create a widget with a vertical box layout as a container for widgets to include in the tab scrollWidget = QWidget() scrollLayout = QGridLayout(scrollWidget) scrollLayout.setSpacing(4) scrollLayout.setContentsMargins(4, 4, 4, 4) buttonLayout = QHBoxLayout() # First layout: General options scrollLayout.addWidget(CreateSeparator( "Job Description" ),0,0,1,3) jobNameLabel = QLabel( "Job Name" ) jobNameLabel.setToolTip("The name of your job. This is optional, and if left blank, it will default to 'Untitled'.") scrollLayout.addWidget(jobNameLabel,1,0) gui.jobNameWidget = QLineEdit( os.path.basename(FarmAPI.GetKatanaFileName()).split('.')[0] ) scrollLayout.addWidget(gui.jobNameWidget, 1, 1, 1, 1 ) commentLabel = QLabel( "Comment" ) commentLabel.setToolTip("A simple description of your job. This is optional and can be left blank.") scrollLayout.addWidget(commentLabel,2,0) gui.commentWidget = QLineEdit( "" ) scrollLayout.addWidget(gui.commentWidget, 2, 1, 1, 1 ) departmentLabel = QLabel( "Department" ) departmentLabel.setToolTip( "The department you belong to. This is optional and can be left blank." ) scrollLayout.addWidget(departmentLabel, 3, 0) gui.departmentWidget = QLineEdit( "" ) scrollLayout.addWidget(gui.departmentWidget, 3, 1, 1, 1 ) # Second layout: Job options scrollLayout.addWidget(CreateSeparator( "Job Options" ),4,0,1,3) pools = submissionInfo["Pools"] poolLabel = QLabel( "Pool" ) poolLabel.setToolTip( "The pool that your job will be submitted to." ) scrollLayout.addWidget(poolLabel, 5, 0) gui.poolsWidget = QComboBox() gui.poolsWidget.addItems(pools) scrollLayout.addWidget(gui.poolsWidget, 5, 1 ) secondPoolLabel = QLabel( "Secondary Pool" ) secondPoolLabel.setToolTip( "The secondary pool lets you specify a pool to use if the primary pool does not have any available Slaves." ) scrollLayout.addWidget(secondPoolLabel, 6, 0 ) gui.secondPoolsWidget = QComboBox() gui.secondPoolsWidget.addItems(pools) scrollLayout.addWidget(gui.secondPoolsWidget, 6, 1 ) groups = submissionInfo[ "Groups" ] groupLabel = QLabel( "Group" ) groupLabel.setToolTip( "The group that your job will be submitted to." ) scrollLayout.addWidget(groupLabel, 7, 0) gui.groupWidget = QComboBox() gui.groupWidget.addItems(groups) scrollLayout.addWidget(gui.groupWidget, 7, 1) priorityLabel = QLabel( "Priority" ) priorityLabel.setToolTip( "A job can have a numeric priority from 0 to 100, where 0 is the lowest priority and 100 is the highest." ) scrollLayout.addWidget(priorityLabel, 8, 0) maxPriority = submissionInfo["MaxPriority"] gui.priorityBox = QSpinBox() gui.priorityBox.setMinimum(0) gui.priorityBox.setMaximum( maxPriority ) scrollLayout.addWidget(gui.priorityBox, 8, 1) taskTimeoutLabel = QLabel( "Task Timeout" ) taskTimeoutLabel.setToolTip( "The number of minutes a Slave has to render a task for this job before it requeues it. Specify 0 for no limit." ) scrollLayout.addWidget(taskTimeoutLabel, 9, 0) gui.taskTimeoutBox = QSpinBox() gui.taskTimeoutBox.setMinimum(0) gui.taskTimeoutBox.setMaximum(10000) scrollLayout.addWidget(gui.taskTimeoutBox, 9, 1) concurrentTasksLabel = QLabel( "Concurrent Tasks" ) concurrentTasksLabel.setToolTip("The number of tasks that can render concurrently on a single Slave. This is useful if the rendering application only uses one thread to render and your Slaves have multiple CPUs.") scrollLayout.addWidget(concurrentTasksLabel, 10, 0 ) gui.concurrentTasksWidget = QSpinBox( ) scrollLayout.addWidget(gui.concurrentTasksWidget, 10, 1) gui.concurrentTasksWidget.setMinimum(1) gui.concurrentTasksWidget.setMaximum(16) gui.limitTasksSlaveLimit = QCheckBox( "Limit Tasks To Slave's Task Limit" ) gui.limitTasksSlaveLimit.setToolTip( "If you limit the tasks to a Slave's task limit, then by default, the Slave won't dequeue more tasks then it has CPUs. This task limit can be overridden for individual Slaves by an administrator." ) scrollLayout.addWidget(gui.limitTasksSlaveLimit, 10, 2) machineLimitLabel = QLabel( "Machine Limit" ) machineLimitLabel.setToolTip("Use the Machine Limit to specify the maximum number of machines that can render your job at one time. Specify 0 for no limit.") scrollLayout.addWidget( machineLimitLabel, 11, 0 ) gui.machineLimitWidget = QSpinBox() scrollLayout.addWidget(gui.machineLimitWidget, 11, 1) gui.isBlackListWidget = QCheckBox( "Machine List Is Blacklist" ) gui.isBlackListWidget.setToolTip("You can force the job to render on specific machines by using a whitelist, or you can avoid specific machines by using a blacklist.") scrollLayout.addWidget(gui.isBlackListWidget, 11, 2) machineListLabel = QLabel( "Machine List" ) machineListLabel.setToolTip("The whitelisted or blacklisted list of machines.") scrollLayout.addWidget( machineListLabel, 12, 0 ) machineListLayout = QHBoxLayout() gui.machineListWidget = QLineEdit( "" ) machineListLayout.addWidget(gui.machineListWidget) getMachineListWidget = QPushButton( "..." ) getMachineListWidget.pressed.connect( lambda: BrowseMachineList(gui.machineListWidget) ) machineListLayout.addWidget(getMachineListWidget) scrollLayout.addLayout( machineListLayout, 12, 1, 1, 2 ) limitsLabel = QLabel( "Limits" ) limitsLabel.setToolTip("The Limits that your job requires.") scrollLayout.addWidget( limitsLabel, 13, 0 ) limitsLayout = QHBoxLayout() gui.limitsWidget = QLineEdit( "" ) limitsLayout.addWidget(gui.limitsWidget) getLimitsWidget = QPushButton( "..." ) getLimitsWidget.pressed.connect( lambda: BrowseLimitList(gui.limitsWidget) ) limitsLayout.addWidget(getLimitsWidget) scrollLayout.addLayout( limitsLayout, 13, 1, 1, 2 ) dependenciesLabel = QLabel( "Dependencies" ) dependenciesLabel.setToolTip("Specify existing jobs that this job will be dependent on. This job will not start until the specified dependencies finish rendering.") scrollLayout.addWidget( dependenciesLabel, 14, 0 ) dependenciesLayout = QHBoxLayout() gui.dependenciesWidget = QLineEdit( "" ) dependenciesLayout.addWidget(gui.dependenciesWidget) getDependenciesWidget = QPushButton( "..." ) getDependenciesWidget.pressed.connect( lambda: BrowseDependencyList(gui.dependenciesWidget) ) dependenciesLayout.addWidget(getDependenciesWidget) scrollLayout.addLayout( dependenciesLayout, 14, 1, 1, 2 ) onJobCompleteLabel = QLabel( "On Job Complete" ) onJobCompleteLabel.setToolTip("If desired, you can automatically archive or delete the job when it completes.") scrollLayout.addWidget( onJobCompleteLabel, 15, 0 ) gui.onJobCompleteWidget = QComboBox( ) gui.onJobCompleteWidget.addItems(["Nothing", "Archive", "Delete"]) scrollLayout.addWidget(gui.onJobCompleteWidget, 15, 1) gui.submitSuspendedWidget = QCheckBox( "Submit Job as Suspended" ) gui.submitSuspendedWidget.setToolTip( "If enabled, the job will submit in the suspended state. This is useful if you don't want the job to start rendering right away. Just resume it from the Monitor when you want it to render.") scrollLayout.addWidget(gui.submitSuspendedWidget, 15, 2) # Third layout: Katana options scrollLayout.addWidget(CreateSeparator( "Katana Options" ),16,0,1,3) frameRangeLabel = QLabel( "Frame Range" ) frameRangeLabel.setToolTip("The list of frames to render.") scrollLayout.addWidget( frameRangeLabel, 17, 0 ) gui.frameRangeWidget = QLineEdit( "" ) # Populate based on frame range scrollLayout.addWidget( gui.frameRangeWidget, 17, 1, 1, 1 ) frameRange = FarmAPI.GetSceneFrameRange() gui.frameRangeWidget.setText( str(frameRange['start']) + "-" + str(frameRange['end']) ) gui.submitSceneBox = QCheckBox( "Submit Katana Scene File" ) gui.submitSceneBox.setToolTip( "If this option is enabled, the scene file will be submitted with the job, and then copied locally to the Slave machine during rendering." ) scrollLayout.addWidget(gui.submitSceneBox, 17, 2 ) framesPerTaskLabel = QLabel( "Frames Per Task" ) framesPerTaskLabel.setToolTip( "This is the number of frames that will be rendered at a time for each job task." ) scrollLayout.addWidget( framesPerTaskLabel, 18, 0 ) gui.framesPerTaskWidget = QSpinBox( ) gui.framesPerTaskWidget.setMinimum(1) scrollLayout.addWidget( gui.framesPerTaskWidget, 18, 1, 1, 1 ) gui.useWorkingDirectory = QCheckBox( "Use Working Directory" ) gui.useWorkingDirectory.setToolTip( "If enabled, the current working directory will be used during rendering. This is required if your Katana project file contains relative paths." ) gui.useWorkingDirectory.setChecked(True) scrollLayout.addWidget( gui.useWorkingDirectory, 18, 2 ) renderNodeSelectLabel = QLabel( "Render Node Submission" ) renderNodeSelectLabel.setToolTip( "Choose to render the whole scene, render all nodes as separate jobs, or render separate nodes" ) scrollLayout.addWidget( renderNodeSelectLabel, 19, 0 ) gui.renderSelectBox = QComboBox() gui.renderSelectBox.addItems( ["Submit All Render Nodes As Separate Jobs", "Select Render Node"] ) scrollLayout.addWidget( gui.renderSelectBox, 19, 1 ) gui.includeImageWrite = QCheckBox( "Include ImageWrite Nodes" ) gui.includeImageWrite.setToolTip( "If enabled, ImageWrite nodes will be included for submission." ) scrollLayout.addWidget( gui.includeImageWrite, 19, 2 ) renderNodeLabel = QLabel( "Render Node" ) renderNodeLabel.setToolTip( "Set the render node to render with, or leave blank to use the node already set." ) scrollLayout.addWidget( renderNodeLabel, 20, 0 ) gui.frameDependent = QCheckBox( "Submit Jobs As Frame Dependent" ) gui.frameDependent.setToolTip( "If enabled, the Katana Job(s) will have Frame Dependencies. If your scene contains static content, do not use!" ) scrollLayout.addWidget( gui.frameDependent, 20, 2 ) gui.renderNodeBox = QComboBox() gui.renderSelectBox.currentIndexChanged.connect( lambda: RenderSelectionChanged( gui.renderSelectBox, gui.renderNodeBox ) ) scrollLayout.addWidget( gui.renderNodeBox, 20, 1) gui.renderNodeBox.setDisabled(True) # Submit button buttonLayoutSpacer = QSpacerItem( 0, 0, QSizePolicy.MinimumExpanding, QSizePolicy.Minimum ) buttonLayout.addItem( buttonLayoutSpacer ) gui.pipelineToolStatusLabel = QLabel( "No Pipeline Tools Set" ) gui.pipelineToolStatusLabel.setAlignment( QtCore.Qt.AlignCenter ) buttonLayout.addWidget( gui.pipelineToolStatusLabel ) pipelineToolsButton = QPushButton( "Pipeline Tools" ) pipelineToolsButton.pressed.connect( lambda: PipelineToolsClicked( gui ) ) buttonLayout.addWidget( pipelineToolsButton ) submitButton = QPushButton( "Submit" ) submitButton.pressed.connect( lambda: SubmitPressed(gui) ) buttonLayout.addWidget( submitButton ) scrollLayout.addLayout( buttonLayout,21,0,1,3 ) verticalStretchLayout = QVBoxLayout() verticalStretchLayout.addStretch() scrollLayout.addLayout( verticalStretchLayout, 22, 0 ) scrollArea = QScrollArea() scrollArea.setWidget(scrollWidget) scrollArea.setWidgetResizable(True) scrollArea.setFrameStyle(QFrame.NoFrame + QFrame.Plain) vLayout = QVBoxLayout() vLayout.setObjectName('vLayout') vLayout.addWidget(scrollArea) gui.setLayout(vLayout) LoadStickySettings( gui ) try: pipelineToolStatusMessage = RetrievePipelineToolStatus( raiseOnExitCode=True ) except subprocess.CalledProcessError as e: pipelineToolStatusMessage = HandlePipelineToolsCalledProcessError( e ) UpdatePipelineToolStatusLabel( gui, pipelineToolStatusMessage ) # Populate the render node drop down based on the effective check state # of the "Include ImageWrite Nodes" checkbox after sticky settings are applied PopulateRenderNodeDropDown(gui.includeImageWrite.isChecked(), gui.renderNodeBox) # We delay wiring up this signal handler until after the sticky settings are applied to avoid # rebuilding the drop-down list multiple times unnecessarily gui.includeImageWrite.stateChanged.connect(lambda checked: PopulateRenderNodeDropDown(checked, gui.renderNodeBox)) # Check if this tab is part of a pane in the main window, or if it is contained in a floating pane if gui.window() != UI4.App.MainWindow.CurrentMainWindow(): # Resize the floating pane's window to accommodate the tab's widgets requiredSize = scrollWidget.sizeHint() gui.window().resize(max(requiredSize.width() + 20, 200), min(requiredSize.height() + 40, 1000))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populateUI():\n \n # Main form layout\n form = cmds.formLayout()\n\n # Tab Layout\n tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)\n # Form attachment config\n cmds.formLayout( form, edit=True, attachForm=((tabs, 'top', 0), (tabs, 'left', 0), (tabs, 'bottom', 0), (tabs, 'ri...
[ "0.6382394", "0.5868262", "0.5825627", "0.5785208", "0.5766149", "0.57174546", "0.568647", "0.56794524", "0.5625228", "0.5619884", "0.55831283", "0.5565874", "0.5505238", "0.5494764", "0.54784214", "0.5467494", "0.54137725", "0.5401807", "0.5315046", "0.5295689", "0.52889675"...
0.65244144
0
Augments a staged job info submission file with the appropriate properties for the Pipeline Tool settings.
def ConcatenatePipelineSettingsToJob( jobInfoPath, batchName ): global submissionInfo jobWriterPath = os.path.join( submissionInfo["RepoDirs"]["submission/Integration/Main"], "JobWriter.py" ) scenePath = NodegraphAPI.GetSourceFile() argArray = ["-ExecuteScript", jobWriterPath, "Katana", "--write", "--scene-path", scenePath, "--job-path", jobInfoPath, "--batch-name", batchName] CallDeadlineCommand( argArray, False )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_job_info(run, seqno, slices):\n inset = {\"job_info\": [\"workscript.stdout\", \"workscript.stderr\"],\n }\n outset = {\"job_info\": [\"std_{0:06d}_{1:03d}.out\", \"std_{0:06d}_{1:03d}.err\"],\n }\n tarset = {\"job_info\": \"job_info_{0:06d}_{1:03d}.tgz\",\n }\n ba...
[ "0.54600435", "0.5365229", "0.49939448", "0.4906341", "0.48872775", "0.48601264", "0.48544395", "0.48313162", "0.48072532", "0.47882256", "0.4777496", "0.47465393", "0.47256604", "0.46529025", "0.4646404", "0.46430737", "0.46245492", "0.4616829", "0.45886663", "0.4572362", "0...
0.61063117
0
Grabs a status message from the JobWriter that indicates which pipeline tools have settings enabled for the current scene.
def RetrievePipelineToolStatus( raiseOnExitCode=False ): global submissionInfo scenePath = NodegraphAPI.GetSourceFile() jobWriterPath = os.path.join(submissionInfo["RepoDirs"]["submission/Integration/Main"], "JobWriter.py") argArray = ["-ExecuteScript", jobWriterPath, "Katana", "--status", "--scene-path", scenePath] statusMessage = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=raiseOnExitCode) return statusMessage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tools_state(self):\n\t\treturn Job(SDK.PrlVm_GetToolsState(self.handle)[0])", "def status(self):\n return STATUSES.get(self._mower_status, {}).get('message', self._mower_status)", "def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return se...
[ "0.5826462", "0.5679577", "0.56522906", "0.5546779", "0.55216604", "0.5513174", "0.54827136", "0.5467698", "0.5463971", "0.5463971", "0.5463971", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", "0.5425974", ...
0.7234952
0
Modifies the Pipeline Tool status label UI element with the supplied message
def UpdatePipelineToolStatusLabel( gui, statusMessage ): gui.pipelineToolStatusLabel.setText( statusMessage )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_status(self, msg):\n self.status_lbl.config(text=msg)", "def status_display(self, message, level=0, field=0):\n #print(message)\n self.statusbar_txt.set(message)", "def updateStatus(self, message):\r\n self.statusBar().showMessage(message, 5000)\r\n if self.kinfile...
[ "0.79712987", "0.7420736", "0.7228758", "0.7161831", "0.70710754", "0.69951653", "0.6985524", "0.6982464", "0.6789776", "0.6676902", "0.6615886", "0.6576708", "0.6549624", "0.653623", "0.6525615", "0.648638", "0.6450446", "0.64447117", "0.6439653", "0.6434152", "0.6397675", ...
0.8840854
0
Generic error handling when the a pipeline tools script run via deadline command returns a nonzero exit code. Generates a technical error message for a given subprocess.CalledProcessError instance and displays it in the Katana console. Similarly, a humanreadable error message is presented to the user in a modal dialog. The technical error message contains the full commandline arguments, exit code, and standard output from the called process. Returns a userfriendly error message that can be presented to the user in the pipeline tools status label
def HandlePipelineToolsCalledProcessError( exc ): errorMsg = StringIO() errorMsg.write( "Pipeline Tools encountered an error - the command:" ) errorMsg.write( os.linesep * 2 ) errorMsg.write( exc.cmd ) errorMsg.write( os.linesep * 2 ) errorMsg.write( "return a non-zero (%d) exit code" % exc.returncode ) if exc.output: errorMsg.write( " and the following output:" ) errorMsg.write( os.linesep * 2 ) errorMsg.write( exc.output ) errorMsg = errorMsg.getvalue() # On Windows, print statements output to the console window that is created minimized when Katana launches print( errorMsg ) # Display a human-readable generic error message ShowModalDialog( "Pipeline Tools Error", "Pipeline Tools encountered an error. Check the Katana console for more detailed information." ) return "Pipeline Tools Error"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_build_error(error):\n sys.stderr.write('Error running command `%s`. Returned %s.\\n' % (\n ' '.join(error.argv), str(error.error_code)))", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def error(text, exitcode=1):\n\n # If we g...
[ "0.5881938", "0.5847716", "0.5779613", "0.5747992", "0.5723428", "0.5697356", "0.56824833", "0.5620372", "0.55943125", "0.5581835", "0.5550394", "0.55393744", "0.5523604", "0.55169374", "0.551156", "0.54878414", "0.54639775", "0.54484504", "0.54443103", "0.5437845", "0.542948...
0.75752896
0
Opens the a dialog for viewing and modifying the job's pipeline tool settings. The dialog is launched in a deadline command subprocess. All settings are maintained by the JobWriter using a combination of the application name and the scene path.
def OpenIntegrationWindow( raiseOnExitCode=False ): global submissionInfo integrationPath = os.path.join( submissionInfo["RepoDirs"]["submission/Integration/Main"], "IntegrationUIStandAlone.py" ) scenePath = NodegraphAPI.GetSourceFile() if not scenePath: raise SceneNotSavedError() argArray = ["-ExecuteScript", integrationPath, "-v", "2", "-d", "Katana", "Draft", "Shotgun", "FTrack", "--path", scenePath] try: pipelineToolStatus = CallDeadlineCommand(argArray, hideWindow=False, raiseOnExitCode=True) except subprocess.CalledProcessError as e: pipelineToolStatus = HandlePipelineToolsCalledProcessError( e ) return pipelineToolStatus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shotWinUI(*args):\n### ---------- should check for current project\n if cmds.window(\"shotWin\", exists = True):\n cmds.deleteUI(\"shotWin\")\n\n widgets[\"win\"] = cmds.window(\"shotWin\", t= \"Charlex Shot Manager\", w=1000, h=560, s=False)\n widgets[\"mainCLO\"] = cmds.columnLayout(w=1000, h...
[ "0.5803222", "0.5764439", "0.5632621", "0.5579987", "0.55609196", "0.5531322", "0.5498198", "0.5456681", "0.5440068", "0.54032004", "0.54000485", "0.5395688", "0.5395272", "0.53947246", "0.5367619", "0.5364008", "0.53474754", "0.5346228", "0.53405243", "0.5282741", "0.5273741...
0.6004767
0
Returns the path to DeadlineCommand.
def GetDeadlineCommand( useDeadlineBg=False ): deadlineBin = "" try: deadlineBin = os.environ[ 'DEADLINE_PATH' ] except KeyError: # if the error is a key error it means that DEADLINE_PATH is not set. however Deadline command may be in the PATH or on OSX it could be in the file /Users/Shared/Thinkbox/DEADLINE_PATH pass # On OSX, we look for the DEADLINE_PATH file if the environment variable does not exist. if deadlineBin == "" and os.path.exists( "/Users/Shared/Thinkbox/DEADLINE_PATH" ): with io.open( "/Users/Shared/Thinkbox/DEADLINE_PATH", encoding="utf-8" ) as f: deadlineBin = f.read().strip() exeName = "deadlinecommand" if useDeadlineBg: exeName += "bg" deadlineCommand = os.path.join( deadlineBin, exeName ) return deadlineCommand
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_deadline_command_path():\n\n deadline_bin = os.environ.get('DEADLINE_PATH', '')\n\n # On Linux, the Deadline Client installer creates a system-wide script to set the DEADLINE_PATH environment\n # variable. Cloud-init does not load system environment variables. Cherry-pick the\n ...
[ "0.753901", "0.6118858", "0.6027574", "0.58908194", "0.5830067", "0.5762068", "0.570046", "0.5663638", "0.56532186", "0.56523234", "0.5645256", "0.5634848", "0.56305516", "0.5628725", "0.5616503", "0.5605415", "0.5578746", "0.55568534", "0.55538136", "0.5526211", "0.5518902",...
0.73081684
1
Creates a utf8 encoded file with each argument in arguments on a separate line.
def CreateArgFile( arguments, tmpDir ): tmpFile = os.path.join( tmpDir, "args.txt" ) with io.open( tmpFile, 'w', encoding="utf-8-sig" ) as fileHandle: fileHandle.write( "\n".join( arguments ) ) return tmpFile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_i18n_data_file(cls, filename, encoding):\n cls.cluster.fs.setuser(cls.cluster.superuser)\n f = cls.cluster.fs.open(filename, \"w\")\n for x in range(256):\n f.write(\"%d\\t%s\\n\" % (x, chr(x).encode(encoding)))\n f.close()", "def output_file(data, filename):\n with open(filename + ...
[ "0.6052145", "0.57538974", "0.567268", "0.55836433", "0.55042565", "0.5475151", "0.54015994", "0.5380762", "0.5356525", "0.5350646", "0.5287505", "0.5250849", "0.52459705", "0.5193831", "0.51840913", "0.51814663", "0.5180244", "0.5170334", "0.51647687", "0.5129749", "0.506740...
0.7158849
0
Run DeadlineCommand with the specified arguments returning the standard out
def CallDeadlineCommand(arguments, hideWindow=True, useArgFile=False, useDeadlineBg=False, raiseOnExitCode=False): deadlineCommand = GetDeadlineCommand( useDeadlineBg ) tmpdir = None if useArgFile or useDeadlineBg: tmpdir = tempfile.mkdtemp() if useDeadlineBg: arguments = [ "-outputfiles", os.path.join( tmpdir, "dlout.txt" ), os.path.join( tmpdir, "dlexit.txt" ) ] + arguments startupinfo = None creationflags = 0 if os.name == 'nt': if hideWindow: # Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both. if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ): startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW else: # still show top-level windows, but don't show a console window CREATE_NO_WINDOW = 0x08000000 # MSDN process creation flag creationflags = CREATE_NO_WINDOW if useArgFile: arguments = [ CreateArgFile( arguments, tmpdir ) ] arguments.insert( 0, deadlineCommand ) # Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards. proc = subprocess.Popen( arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags ) output, errors = proc.communicate() if raiseOnExitCode and proc.returncode != 0: try: # The quote function was moved to shutil in python 3 from shutil import quote as shell_quote except ImportError: # In python 2, quote lived in the pipes module from pipes import quote as shell_quote cmd = ' '.join([shell_quote(arg) for arg in arguments]) raise subprocess.CalledProcessError(proc.returncode, cmd, output) if useDeadlineBg: with io.open( os.path.join( tmpdir, "dlout.txt" ), 'r', encoding='utf-8' ) as fileHandle: output = fileHandle.read() if tmpdir: try: shutil.rmtree( tmpdir ) except: print( 'Failed to remove temp directory: "%s"' % tmpdir ) return output.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _call_deadline_command_raw(self, arguments):\n # make a copy so we don't mutate the caller's reference\n arguments = list(arguments)\n arguments.insert(0, self._deadline_command_path)\n try:\n proc = subprocess.Popen(\n arguments,\n stdin=sub...
[ "0.6623097", "0.6296079", "0.6084702", "0.5997185", "0.59120613", "0.5874816", "0.57683825", "0.5742775", "0.57391727", "0.5712437", "0.56482756", "0.5596094", "0.558642", "0.553605", "0.553605", "0.5521694", "0.55093294", "0.54556483", "0.5436485", "0.54264593", "0.54078025"...
0.65875596
1
Get the path to the file where we will store sticky settings
def GetStickySettingsFilePath(): global submissionInfo deadlineHome = submissionInfo[ "UserHomeDir" ].strip() return os.path.join( deadlineHome, "settings", "katana_sticky.json" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settingsFilePath(self):\n return self._settingsFilePath", "def get_preference_file():\n\n return \"{}/{}\".format(_MANAGER_PREFERENCE_PATH, _MANAGER_PREFERENCE_FILE)", "def get_preference_file_cache_destination_path():\n\n return read_preference_key(search_key=\"cache_manager_cache_path\")", ...
[ "0.72601885", "0.7198174", "0.69512", "0.6910759", "0.69085604", "0.68241256", "0.67362624", "0.6648517", "0.66195136", "0.6618425", "0.6611885", "0.65249866", "0.6479099", "0.64735585", "0.64711976", "0.6452971", "0.638629", "0.6381435", "0.63718975", "0.63349026", "0.631905...
0.8301903
0
Writes the current settings from Submitter UI to the sticky settings file.
def WriteStickySettings( gui ): global stickySettingWidgets, stickyWidgetSaveFunctions print( "Writing sticky settings..." ) configFile = GetStickySettingsFilePath() stickySettings = {} for setting, widgetName in stickySettingWidgets.iteritems(): try: widget = getattr( gui, widgetName ) stickySettings[setting] = stickyWidgetSaveFunctions[ type( widget ) ]( widget ) except AttributeError: print( traceback.format_exc() ) try: fileContents = json.dumps( stickySettings, encoding="utf-8" ) with io.open( configFile, "w", encoding="utf-8" ) as fileHandle: fileHandle.write( fileContents.decode("utf-8") ) except IOError: print( "Could not write sticky settings" ) print( traceback.format_exc() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_settings(self):\n logger.info(f'Saving settings: {self.settings_dict}')\n for k, section in self.settings_dict.items():\n for setting_name in section.keys():\n value = self.get_control_value(setting_name)\n if value is not None:\n s...
[ "0.71699524", "0.7144108", "0.6855974", "0.68193734", "0.66913515", "0.66821957", "0.64933175", "0.64606106", "0.6453299", "0.63580054", "0.63520503", "0.63510686", "0.6333627", "0.6321657", "0.6306876", "0.62875223", "0.6263997", "0.62562144", "0.62060374", "0.61840034", "0....
0.7189759
0
Reads in settings from the sticky settings file, then update the UI with the new settings
def LoadStickySettings( gui ): global stickySettingWidgets, stickyWidgetLoadFunctions configFile = GetStickySettingsFilePath() print( "Reading sticky settings from: %s" % configFile ) stickySettings = None try: with io.open( configFile, "r", encoding="utf-8" ) as fileHandle: stickySettings = json.load( fileHandle, encoding="utf-8" ) except IOError: print( "No sticky settings found. Using default settings." ) except ValueError: print( "Invalid sticky settings. Using default settings." ) print( traceback.format_exc() ) except Exception: print( "Could not read sticky settings. Using default settings." ) print( traceback.format_exc() ) if stickySettings: for setting, value in stickySettings.iteritems(): widgetName = stickySettingWidgets.get(setting) if widgetName: try: widget = getattr(gui, widgetName) stickyWidgetLoadFunctions[ type( widget ) ]( widget, value ) except AttributeError: print( traceback.format_exc() )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSettings(self):\n self.parser.read(self.file)\n self.showTicker = self.parser.getboolean('Settings', 'showTicker')\n self.verbose = self.parser.getboolean('Settings', 'verbose')\n self.sleepTime = self.parser.getint('Settings', 'sleeptime')\n self.saveGraph = self.parse...
[ "0.686287", "0.67737114", "0.6748555", "0.6717438", "0.6571519", "0.6435366", "0.63965124", "0.6376148", "0.6361373", "0.62330866", "0.621005", "0.62071073", "0.61983466", "0.61828625", "0.61292857", "0.6116447", "0.59903854", "0.5935539", "0.5932085", "0.58979905", "0.584024...
0.69833964
0
Converts a url patternesque string into a path, given a context dict, and splits the result.
def pathify(urlpattern, **context): repl = lambda match: context[match.group(1)] path = re.sub(r':([a-z]+)', repl, urlpattern) return tuple(path[1:].split('/'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def resolveContext(self, context):\n if context is None:\n return context\n elif isinstance(contex...
[ "0.59003174", "0.5704174", "0.5683664", "0.5584328", "0.55201805", "0.546162", "0.5402494", "0.535743", "0.53368884", "0.5284471", "0.5279856", "0.52473545", "0.5235247", "0.52138245", "0.51656365", "0.5129309", "0.5124352", "0.5093053", "0.5055723", "0.5051632", "0.5041933",...
0.7681451
0
init cluster_temp for all the center point
def __initCluster(self): data_size, cluster_center = self.data_size, self.cluster_center self.cluster_temp = np.zeros(data_size, dtype=int) self.cluster_upper_bound = np.full(len(cluster_center), float('inf'), dtype=float) for center in cluster_center: self.cluster_temp[center] = center
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initClusters(self):\n if len(self.labelList) != len(self.pointList):\n \traise ValueError(\"Label List and Point List not the same length!\")\n for i in range(len(self.labelList)):\n self.centroids[self.labelList[i]] = self.pointList[i]\n self.pointcounts[self.labelLi...
[ "0.69504863", "0.6859036", "0.67012495", "0.6668851", "0.6667392", "0.6468853", "0.6415132", "0.64095896", "0.63832414", "0.6361127", "0.63474107", "0.6336359", "0.62062657", "0.62016225", "0.61754805", "0.61420494", "0.6140045", "0.6138546", "0.6138051", "0.6124449", "0.6099...
0.825215
0
load data to memory
def load_dis_data(self, filename): logger.info('load data') self.distance, self.data_size = {}, 1 for line in open(path + filename, 'r'): x1, x2, d = line.strip().split(' ') x1, x2, d = int(x1), int(x2), float(d) self.data_size = max(x2 + 1, self.data_size) self.max_dis = max(self.max_dis, d) self.min_dis = min(self.min_dis, d) self.distance[(x1, x2)] = d self.master = np.zeros(self.data_size, dtype=int) logger.info('load accomplish')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def load_data(self):\n raise NotImplementedError()", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_data(self):\n if self.debug:\n print(\"Loading data\"...
[ "0.8095919", "0.7828865", "0.7052371", "0.6787303", "0.66700774", "0.66354895", "0.6628009", "0.66243476", "0.6613523", "0.6586045", "0.65813166", "0.6575096", "0.6461651", "0.6460657", "0.64506775", "0.64494663", "0.64268064", "0.64058614", "0.6364637", "0.6342839", "0.63403...
0.0
-1
select the distance ranked if not auto, we will choose the distance at 1.8% top position as dc
def get_dc(self, auto=False, percent=0.018): data_size, distance = self.data_size, self.distance if not auto: position = int((data_size * (data_size + 1) / 2 - data_size) * percent) dc = sorted(distance.items(), key=lambda item: item[1])[position][1] logger.info("dc - " + str(dc)) return dc else: min_range, max_range = self.min_dis, self.max_dis dc = (min_range + max_range) / 2 while True: avg_rho_percent = sum([1 for d in distance.values() if d < dc]) / data_size ** 2 * 2 if 0.01 <= avg_rho_percent <= 0.02: break if avg_rho_percent < 0.01: min_range = dc else: max_range = dc dc = (min_range + max_range) / 2 if max_range - min_range < 0.01: break return dc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distances(self):", "def location_of_stops(self, choice, distance):\r\n avg_dist = 0\r\n min_dist = 1000\r\n max_dist = 0\r\n\r\n if choice == 1:\r\n #for dist_ in distance:\r\n # if int(dist_) < min_dist:\r\n # min_dist = dist_\r\n ...
[ "0.6012625", "0.60071933", "0.6004598", "0.58783704", "0.5852285", "0.57596016", "0.57278866", "0.5697758", "0.56681937", "0.56577826", "0.56478626", "0.5647478", "0.5602821", "0.5586023", "0.55812424", "0.55765444", "0.55755365", "0.55707633", "0.556011", "0.5559879", "0.555...
0.56715935
8
calculate the density of each vector and get the max_pos
def calculate_density(self, dc, cut_off=False): data_size, distance = self.data_size, self.distance logger.info('calculate density begin') func = lambda dij, dc: math.exp(- (dij / dc) ** 2) if cut_off: func = lambda dij, dc: 1 if dij < dc else 0 max_density = -1 for index in range(data_size): density = 0 for front in range(index): density += func(distance[(front, index)], dc) for later in range(index + 1, data_size): density += func(distance[(index, later)], dc) self.result.append([density, float("inf")]) max_density = max(max_density, density) if max_density == density: self.max_pos = index self.max_density = max_density self.result = np.array(self.result) self.rho_des_index = np.argsort(-self.result[:, 0]) logger.info('calculate density end')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_max_density(self):\n max_density = str(self.density.index(min(self.density)) + 1)\n print(max_density)\n return max_density", "def _calc_density(x: np.ndarray, y: np.ndarray):\n from scipy.stats import gaussian_kde\n\n # Calculate the point density\n xy = np.vstack([x, y])\n...
[ "0.6800876", "0.6359699", "0.6188986", "0.6152179", "0.6099716", "0.60784084", "0.5983489", "0.58815664", "0.58410376", "0.58110356", "0.58088136", "0.57901305", "0.577701", "0.575839", "0.57013303", "0.5689801", "0.56889397", "0.56295735", "0.562879", "0.562879", "0.562879",...
0.5624752
21
calculate the delta of each vector save the delta point as master
def calculate_delta(self): rho_des_index, distance, data_size = self.rho_des_index, self.distance, self.data_size self.result[rho_des_index[0]][1] = -1 for i in range(1, data_size): for j in range(0, i): old_i, old_j = rho_des_index[i], rho_des_index[j] min_pos, max_pos = min(old_j, old_i), max(old_j, old_i) if distance[(min_pos, max_pos)] < self.result[old_i][1]: self.result[old_i][1] = distance[(min_pos, max_pos)] self.master[old_i] = old_j self.result[rho_des_index[0]][1] = max(self.result[:, 1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_velocities(self):\n Ddemo_trajs = []\n\n for demo_traj in self._demo_trajs:\n d_traj = np.diff(demo_traj, axis=0)/self._dt\n #append last element to adjust the length\n d_traj = np.hstack([d_traj, d_traj[-1]])\n #add it to the list\n ...
[ "0.65990263", "0.6551683", "0.6391416", "0.6347046", "0.6311589", "0.6305406", "0.6271971", "0.62243825", "0.61688155", "0.6113892", "0.6110934", "0.6104737", "0.6018288", "0.5975151", "0.5968072", "0.592978", "0.59040904", "0.584247", "0.57922715", "0.57737917", "0.5765913",...
0.673058
0
use the multiplication of normalized rho and delta as gamma to determine cluster center
def calculate_gamma(self): result = self.result # scaler = preprocessing.StandardScaler() # train_minmax = scaler.fit_transform(result) # st_rho, st_delta = train_minmax[:, 0], train_minmax[:, 1] # self.gamma = (st_delta + st_rho) / 2 self.gamma = result[:, 0] * result[:, 1] self.gamma_des_index = np.argsort(-self.gamma)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_cluster_center(self, threshold):\n gamma = self.gamma\n self.cluster_center = np.where(gamma >= threshold)[0]", "def M_step(X, gamma):\n N = X.shape[0] # number of objects\n C = gamma.shape[1] # number of clusters\n d = X.shape[1] # dimension of each object\n\n ### YOUR CO...
[ "0.6678306", "0.6379675", "0.6162399", "0.59755903", "0.5975015", "0.59482974", "0.59137064", "0.58591443", "0.5844926", "0.5769318", "0.5760845", "0.5734815", "0.5687573", "0.56765157", "0.56329596", "0.56303257", "0.5629205", "0.558961", "0.55850154", "0.5579336", "0.557140...
0.64044535
1
Intercept a point with gamma greater than 0.2 as the cluster center
def calculate_cluster_center(self, threshold): gamma = self.gamma self.cluster_center = np.where(gamma >= threshold)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def predict_center(point):\n point_cluster_num = predict_cluster(point)\n center = centers[point_cluster_num]\n return center", "def center(x):\n return x - x.mean()", "def gauss_spot(s...
[ "0.6103443", "0.5330922", "0.529988", "0.52296835", "0.52157253", "0.5153788", "0.51438296", "0.51432735", "0.5122229", "0.5116896", "0.51106155", "0.509807", "0.50852835", "0.50819665", "0.5080408", "0.50785637", "0.50589377", "0.50488997", "0.504817", "0.50294673", "0.50135...
0.57451344
1
Initial configuration. Used to specify your username, password and domain. Configuration is stored in ~/.accountable/config.yaml.
def configure(username, password, domain): art = r''' Welcome! __ ___. .__ _____ ____ ____ ____ __ __ _____/ |______ \_ |__ | | ____ \__ \ _/ ___\/ ___\/ _ \| | \/ \ __\__ \ | __ \| | _/ __ \ / __ \\ \__\ \__( <_> ) | / | \ | / __ \| \_\ \ |_\ ___/ (____ /\___ >___ >____/|____/|___| /__| (____ /___ /____/\___ > \/ \/ \/ \/ \/ \/ \/ ''' click.secho(art, fg='blue') Config(username=username, password=password, domain=domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure(self):\n configurations = config.Configurations()\n self.credentials = configurations.credentials\n self.config = configurations.config", "def configure(self, conf):\n self.openam_base_url = conf.get('url')\n self.username = conf.get('user')\n self.__passwo...
[ "0.6725636", "0.65860176", "0.6572466", "0.6524407", "0.6381888", "0.625799", "0.62230504", "0.6196215", "0.6191912", "0.61845404", "0.6112712", "0.60647243", "0.6038883", "0.6033728", "0.60200953", "0.60005546", "0.5989566", "0.598916", "0.59883714", "0.5980305", "0.5948604"...
0.6898006
0
List all issue types. Optional parameter to list issue types by a given project.
def issuetypes(accountable, project_key): projects = accountable.issue_types(project_key) headers = sorted(['id', 'name', 'description']) rows = [] for key, issue_types in sorted(projects.items()): for issue_type in issue_types: rows.append( [key] + [v for k, v in sorted(issue_type.items()) if k in headers] ) rows.insert(0, ['project_key'] + headers) print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self, request):\n bug_types = BugType.objects.all()\n\n # Note the additional `many=True` argument to the\n # serializer. It's needed when you are serializing\n # a list of objects instead of a single object.\n serializer = BugTypeSerializer(\n bug_types, many...
[ "0.580682", "0.57997316", "0.55276394", "0.53734636", "0.53584605", "0.53383344", "0.5332019", "0.5323111", "0.53199", "0.52730525", "0.5229358", "0.5195646", "0.51418656", "0.51354766", "0.50994647", "0.5088411", "0.50732434", "0.5071402", "0.50672746", "0.5037107", "0.50092...
0.715429
0
Returns a list of all a project's components.
def components(accountable, project_key): components = accountable.project_components(project_key) headers = sorted(['id', 'name', 'self']) rows = [[v for k, v in sorted(component.items()) if k in headers] for component in components] rows.insert(0, headers) print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p i...
[ "0.7027259", "0.68796676", "0.6818949", "0.6719623", "0.67192936", "0.6683855", "0.66704535", "0.66671795", "0.66467714", "0.6640229", "0.65798545", "0.6556899", "0.65495473", "0.65493363", "0.65490365", "0.6516722", "0.6468194", "0.64657927", "0.64543396", "0.64378566", "0.6...
0.63358796
31
Create a new issue and checkout a branch named after it.
def checkoutbranch(accountable, options): issue = accountable.checkout_branch(options) headers = sorted(['id', 'key', 'self']) rows = [headers, [itemgetter(header)(issue) for header in headers]] print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_branch_from_issue(jira_url, jira_username, jira_api_key, project_key, source_branch_name, issue_key):\n click.echo('Branch \"{}\" was created'.format(\n create_branch_func(\n source_branch_name, get_branch_name(jira_url, jira_username, jira_api_key, issue_key, project_key)\n ...
[ "0.76392406", "0.68339527", "0.669128", "0.6574816", "0.64436364", "0.641573", "0.6397651", "0.6355058", "0.633265", "0.63172853", "0.62088567", "0.61402905", "0.59608126", "0.59550846", "0.59550846", "0.59512776", "0.592472", "0.59242016", "0.5866487", "0.58594525", "0.58388...
0.51225036
67
Checkout a new branch or checkout to a branch for a given issue.
def checkout(accountable, issue_key): issue = accountable.checkout(issue_key) headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkout(connection, branch, rid=None, repo=None):\n\n if repo is None:\n repo = Repository(connection, rid)\n\n return repo.checkout(branch)", "def checkout2(repo, branch, overwrite=True):\n cmd = 'git checkout %s' % (branch,)\n out = repo.issue(cmd, error='return')\n if ov...
[ "0.6947186", "0.67593735", "0.6722136", "0.668372", "0.6598679", "0.6580118", "0.657252", "0.6368997", "0.6331378", "0.6195333", "0.6167681", "0.6159897", "0.6148435", "0.61310816", "0.61085874", "0.6107166", "0.61063325", "0.6099721", "0.5997142", "0.5974702", "0.5951594", ...
0.5194389
62
List metadata for a given issue key.
def issue(ctx, accountable, issue_key): accountable.issue_key = issue_key if not ctx.invoked_subcommand: issue = accountable.issue_meta() headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_metadata(key=''):\n response, content = httplib2.Http().request(\n '%s/%s' % (METADATA_BASE_URL, key),\n headers={'Metadata-Flavor': 'Google'},\n method='GET',\n )\n if response['status'] == '404':\n raise NotFoundError(response, content)\n return content", "def get_metadata_keys (a...
[ "0.61033624", "0.5903859", "0.5889419", "0.578731", "0.56787604", "0.56540203", "0.56326365", "0.5542737", "0.5526142", "0.54738116", "0.5469202", "0.5457471", "0.5391811", "0.53859067", "0.5359557", "0.5359515", "0.5340263", "0.5308705", "0.5304822", "0.5293067", "0.52539086...
0.57509816
4
Update an existing issue.
def update(accountable, options): issue = accountable.issue_update(options) headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n\n params = {\n \"title\": self.title,\n \"body\": self.body,\n \"state\": self.state,\n \"labels\": self.labels,\n \"assignees\": self.assignees,\n }\n\n if self.milestone:\n params[\"milestone\"] = self.mile...
[ "0.7507036", "0.6419542", "0.6255618", "0.62028265", "0.6197646", "0.6125686", "0.60750145", "0.6069679", "0.6039978", "0.5961434", "0.5903089", "0.5882173", "0.58697164", "0.58686197", "0.5837822", "0.5791016", "0.5773482", "0.57148165", "0.56193185", "0.5503607", "0.5480972...
0.59853673
9
Lists all comments for a given issue key.
def comments(accountable): comments = accountable.issue_comments() headers = sorted(['author_name', 'body', 'updated']) if comments: rows = [[v for k, v in sorted(c.items()) if k in headers] for c in comments] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No comments found for {}'.format( accountable.issue_key ), fg='red')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_comments(self, issue_id):\n data = self._get(\"/issues/{}/comments\".format(issue_id))\n comments = []\n for item in data:\n comments.append(\n Comment(item['user']['login'], item['body'])\n )\n return comments", "def problem_comments(self...
[ "0.6735813", "0.63723326", "0.6294947", "0.62622994", "0.61518073", "0.6067908", "0.5866216", "0.5846625", "0.5813753", "0.58126175", "0.5763556", "0.5763556", "0.56536496", "0.5609131", "0.55674785", "0.55547565", "0.55169636", "0.54941475", "0.547762", "0.5461454", "0.54559...
0.68489426
0
Add a comment to the given issue key. Accepts a body argument to be used as the comment's body.
def addcomment(accountable, body): r = accountable.issue_add_comment(body) headers = sorted(['author_name', 'body', 'updated']) rows = [[v for k, v in sorted(r.items()) if k in headers]] rows.insert(0, headers) print_table(SingleTable(rows))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_comment_to_issue(repo, issue_number, body, allow_duplicates):\n found = False\n issue = repo.issue(issue_number)\n\n if not allow_duplicates:\n for comment in issue.iter_comments():\n if comment.body == body:\n found = True\n break\n\n if allow_du...
[ "0.682299", "0.6740081", "0.6561953", "0.6297364", "0.6274821", "0.6229835", "0.61394274", "0.5977267", "0.5953699", "0.5946078", "0.58701116", "0.5741862", "0.57191175", "0.56251615", "0.56233865", "0.5619574", "0.5502269", "0.5478731", "0.54059154", "0.5405601", "0.5395265"...
0.7143064
0
List all worklogs for a given issue key.
def worklog(accountable): worklog = accountable.issue_worklog() headers = ['author_name', 'comment', 'time_spent'] if worklog: rows = [[v for k, v in sorted(w.items()) if k in headers] for w in worklog] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No worklogs found for {}'.format(accountable.issue_key), fg='red' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_worklog(self, emp_id=None):\n\n query = \"select * from worklog\"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_logs(job_key):\n job = Job.fet...
[ "0.6104521", "0.5450961", "0.5429597", "0.54215986", "0.53740776", "0.53375506", "0.5177321", "0.51320475", "0.509252", "0.50396067", "0.50299364", "0.50039464", "0.49709633", "0.49424547", "0.49327973", "0.49153993", "0.4895989", "0.4895989", "0.48916838", "0.48571062", "0.4...
0.68508613
0
List all possible transitions for a given issue.
def transitions(accountable): transitions = accountable.issue_transitions().get('transitions') headers = ['id', 'name'] if transitions: rows = [[v for k, v in sorted(t.items()) if k in headers] for t in transitions] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No transitions found for {}'.format(accountable.issue_key), fg='red' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transitions(self) -> List[Dict]:\n return []", "def transitions(self, from_state=None):\n return list(self.iter_transitions(from_state))", "def setup_transition_list():\n xn_list = []\n\n xn_list.append( Transition(3, 4, 2., 'left ejection') )\n xn_list.append( Transition(12, 2, 2., ...
[ "0.6658203", "0.64597243", "0.609803", "0.5973659", "0.59435755", "0.5658975", "0.56407136", "0.5377166", "0.5376197", "0.5366969", "0.53484374", "0.5304934", "0.52682185", "0.5264709", "0.5251384", "0.5251384", "0.5246294", "0.52343994", "0.5204525", "0.5165543", "0.5150279"...
0.7419396
0
Transition the given issue to the provided ID. The API does not return a JSON response for this call.
def dotransition(accountable, transition_id): t = accountable.issue_do_transition(transition_id) if t.status_code == 204: click.secho( 'Successfully transitioned {}'.format(accountable.issue_key), fg='green' )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_issue(self, issue):\n data = {\n \"title\" : issue._title,\n \"body\" : issue._desc,\n \"labels\" : issue._labels\n }\n state = issue._state\n resp = self._post(\n self._base + \"/issues\", data=self._format_data(data))\n iss...
[ "0.6096263", "0.5963492", "0.5913452", "0.5913452", "0.5844219", "0.563173", "0.5610131", "0.55846214", "0.5549557", "0.55340236", "0.53562486", "0.52944756", "0.52351904", "0.51000565", "0.5072759", "0.5057223", "0.504402", "0.49940005", "0.49697727", "0.49525893", "0.487762...
0.5930258
2
Executes a user search for the given query.
def users(accountable, query): users = accountable.users(query) headers = ['display_name', 'key'] if users: rows = [[v for k, v in sorted(u.items()) if k in headers] for u in users] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No users found for query {}'.format( query ), fg='red')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_user(request: Request) -> Response:\n if not request.query_params.get('query'):\n return Response({'type': 'error', 'data': {'message': 'Invalid username query'}})\n\n users = User.objects.filter(\n username__contains=request.query_params.get('query'))\n return Response(UserSerial...
[ "0.6919763", "0.6878182", "0.68767464", "0.6873332", "0.674729", "0.66318774", "0.6590094", "0.65592164", "0.65443397", "0.65341306", "0.653293", "0.65029144", "0.6447364", "0.6444566", "0.63871574", "0.63288534", "0.6322284", "0.6304574", "0.6282313", "0.6281736", "0.6269824...
0.0
-1
Debug breakpoint while in curses mode
def _D(stdscr): curses.nocbreak() stdscr.keypad(0) curses.echo() curses.endwin() import pdb; pdb.set_trace()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def _debug_trace():\n from PyQt4.QtCore i...
[ "0.6429854", "0.64178854", "0.6243146", "0.62248564", "0.6194091", "0.610298", "0.6085449", "0.5991171", "0.5902381", "0.58755255", "0.5865251", "0.5808573", "0.5802128", "0.57814217", "0.57667226", "0.5760795", "0.5739943", "0.572808", "0.5721066", "0.5709992", "0.5702061", ...
0.75144726
0
Simple reader for CSV files
def csv_reader(filepath): with open(filepath) as f: for row in f: row = row.strip() r = list() part = '' is_double_quoted = False for c in row: if c == ',': if is_double_quoted is False: r.append(part) part = '' else: part += c elif c == '\"': is_double_quoted = not is_double_quoted else: part += c if part != '': r.append(part) yield r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv():", "def read_csv_file(self):\n pass", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n ...
[ "0.8346554", "0.78787494", "0.7780344", "0.7574572", "0.7152502", "0.71060395", "0.71036065", "0.71036065", "0.70984864", "0.7079543", "0.7066068", "0.7056243", "0.7056243", "0.7051177", "0.70340747", "0.702111", "0.70198274", "0.7002565", "0.699316", "0.6969437", "0.69495463...
0.6942765
21
Simple writer for CSV files
def csv_writer(filepath, seqs): with open(filepath, 'w') as f: f.write('\n'.join([','.join( ['"{}"'.format(r) if (' ' in r) or (',' in r) else r for r in s]) for s in seqs]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def write_csv(self, filelike):\r\n items = self.rows()\r\n ...
[ "0.73990387", "0.73866165", "0.72805005", "0.7275408", "0.71544623", "0.71080923", "0.70806223", "0.7071333", "0.70384103", "0.7035223", "0.7030914", "0.7019498", "0.7012548", "0.69920474", "0.6981219", "0.6955778", "0.6939745", "0.6935824", "0.69006646", "0.6868536", "0.6852...
0.645615
78
Return the n answers.
def search(): question = request.get_json() question = question['questions'] prediction = pipe.run(query=question[0], top_k_retriever=3, top_k_reader=3) answer = [] for res in prediction['answers']: answer.append(res['answer']) result = {"results":[prediction]} return json.dumps(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def answers(self):\n assert self._answer_count\n for ii in self._answer_count:\n yield ii", "def get_n_solutions(self, n):\n return [self.get_solution() for _ in range(n)]", "def get_n_answers(self):\n return len(self.df)", "def get_top_answers(self, N):\n return sorted(\n ...
[ "0.6906924", "0.68668944", "0.67589056", "0.6748318", "0.6720697", "0.6720697", "0.6720697", "0.6613341", "0.6404832", "0.63191617", "0.6287063", "0.61694974", "0.6149776", "0.6117479", "0.6075899", "0.60121405", "0.59699076", "0.59674805", "0.5928417", "0.59035397", "0.59035...
0.0
-1
Retrieve yaml data from a given path if file not exist, return False
def get_yaml_data(path): yaml_path = "%s%s.yml" % (CONTENT_FILE_DIR, path[:-5]) if os.path.isfile(yaml_path): f = open(yaml_path, 'r') template_data = yaml.load(f) return template_data else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n # This should maybe throw an exception or something\n return {}", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\...
[ "0.7641903", "0.7460384", "0.69069195", "0.6766211", "0.6622035", "0.6619555", "0.6472961", "0.6431809", "0.630646", "0.6232994", "0.620704", "0.620435", "0.617769", "0.6173353", "0.6155012", "0.6154364", "0.6134782", "0.6125133", "0.6101209", "0.6087636", "0.6067218", "0.6...
0.80198294
0
Try and determine the correct _ (underscore) template matching the files directory structure
def determine_template_by_path(path): path = path.lstrip('/') path_chunks = re.split('\/', path) if len(path_chunks) <= 1: return path else: """ For now be ignorant and just return the first entry of the list as the possible template name, so in fact we only have a 1 level deep structure """ return '_%s.html' % path_chunks[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _FindTemplateFile(self, topdir):\n if topdir.endswith('..'):\n topdir = '/'.join(topdir.split('/')[:-2])\n fnames = os.listdir(topdir)\n for fname in fnames:\n filename = '%s/%s' % (topdir, fname)\n if filename.endswith('.yaml') and not os.path.isdir(filena...
[ "0.68880016", "0.65661573", "0.6463607", "0.6450778", "0.6291978", "0.6183937", "0.6181306", "0.6174625", "0.61639", "0.60519874", "0.6047972", "0.6023817", "0.60039794", "0.5952855", "0.5941049", "0.5935887", "0.5918789", "0.5912781", "0.5902953", "0.5882798", "0.5849363", ...
0.6831351
1
constructor instantiate a Document with a term_list to be converted into dict
def __init__(self, term_list, links=[]): # do type check if not isinstance(term_list, list): raise TypeError('term_list must be of type list') if not isinstance(links, list): raise TypeError('links must be of type list') self.term_dict = {x: term_list.count(x) for x in term_list} self.links = copy.deepcopy(links)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, docs, n):\n self.n = n\n self.dict = {}\n self.vocab = set()\n self.sum_index = \"*sum*\"\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in terms:\n if t...
[ "0.66287744", "0.65560615", "0.64975613", "0.63856316", "0.63014597", "0.6102487", "0.6062859", "0.60123897", "0.59738135", "0.597112", "0.5929631", "0.58660865", "0.5838759", "0.5802222", "0.5794235", "0.5776823", "0.57711035", "0.5759174", "0.57271045", "0.5723895", "0.5695...
0.6789457
0
constructor instantiate a Document with a dict of word count
def __init__(self, t_dict, links=[]): # do type check if not isinstance(t_dict, dict): raise TypeError('t_dict must be of type dict') if not isinstance(links, list): raise TypeError('links must be of type list') self.term_dict = copy.deepcopy(t_dict) self.links = copy.deepcopy(links)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, docs, n):\n self.n = n\n self.dict = {}\n self.vocab = set()\n self.sum_index = \"*sum*\"\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in terms:\n if t...
[ "0.7427657", "0.7263681", "0.69983685", "0.6996699", "0.6939715", "0.66831976", "0.6674372", "0.667245", "0.6642106", "0.66213447", "0.6589039", "0.6532153", "0.64735675", "0.643729", "0.64181757", "0.63333464", "0.6331262", "0.63129574", "0.6303807", "0.6298997", "0.62720144...
0.0
-1
init Construct a DocumentSet with main document
def __init__(self, main_doc): if not isinstance(main_doc, Document): raise TypeError('term must be of type Document') self.main_doc = main_doc self.env_docs = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(init_document: 'Document') -> 'DocumentArray':", "def build_document(self):\n pass", "def new_document(self) -> nodes.document:\n document = super().new_document()\n document.__class__ = addnodes.document # replace the class with patched version\n\n # substitute transfor...
[ "0.6253416", "0.60488045", "0.5982539", "0.59698325", "0.5956928", "0.59276694", "0.58543664", "0.5842508", "0.5783739", "0.5778808", "0.57767564", "0.5767244", "0.57607514", "0.57084924", "0.5701809", "0.5619672", "0.56058586", "0.56026864", "0.55988246", "0.5564519", "0.556...
0.6229792
1
Add Env Page append a new env_page to env_docs
def add_env_page(self, env_page): if not isinstance(env_page, Document): raise TypeError('env_page must be of type Document') self.env_docs.append(env_page)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_env(self, env):\n pass", "def addPage(self, name, page, **attrs):\n page.globalConfig = self.globalConfig\n page.pageConfig['pageName'] = name\n self.globalConfig.pageList.append(name)\n self.globalConfig.pageAttributes[name] = dict(attrs)\n setattr(self,name,pag...
[ "0.5903957", "0.5708109", "0.5389904", "0.5385481", "0.52170116", "0.5199296", "0.51268643", "0.51034814", "0.5072406", "0.50699824", "0.49988046", "0.49757445", "0.4973589", "0.49586692", "0.49433592", "0.4912121", "0.4901298", "0.48945105", "0.4888683", "0.48753846", "0.487...
0.83140147
0
Count term in environment calculate idf of a term in main doc
def __count_term_in_env(self, term): # type check if not isinstance(term, str): raise TypeError('term must be of type str') total_cnt = float(len(self.env_docs)) + 1.0 if total_cnt == 1.0: return 1.0 cnt = 1.0 for doc in self.env_docs: if term in doc.term_dict: cnt += 1.0 return math.log(total_cnt / cnt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def term_idf(self, term):\n idf = math.log(2 + self.count_term_distinct_documents(ANY))\\\n - math.log(1 + self.count_term_distinct_documents(term...
[ "0.729734", "0.71593374", "0.7090254", "0.6939883", "0.6922164", "0.66782546", "0.6643847", "0.65991753", "0.6548294", "0.6533882", "0.6521299", "0.6515126", "0.6509364", "0.65010506", "0.64998555", "0.6493106", "0.64863795", "0.6480846", "0.6379101", "0.6369765", "0.63460505...
0.7381723
0
Statistic TF calculate and sort terms in main doc by tf
def statistic_tf(self): return sorted(self.main_doc.term_dict.items(), key=operator.itemgetter(1), reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_tf(doc):\r\n tf = {}\r\n for term in doc:\r\n if term not in tf:\r\n tf[term] = doc.count(term)\r\n return tf", "def compute_TF(doc_info):\n tf_scores = []\n\n for idx, doc in enumerate(doc_info):\n tf_score_table = {}\n for word in doc['freq_dict'].keys():...
[ "0.7448172", "0.7352905", "0.7274512", "0.719932", "0.70984966", "0.70313746", "0.69458485", "0.68545496", "0.66986537", "0.6692026", "0.6671128", "0.662987", "0.6564009", "0.65638477", "0.65546054", "0.6543071", "0.6411876", "0.6403338", "0.6386789", "0.631672", "0.62851524"...
0.805907
0
Statistic TFIDF calculate and sort terms in main doc by tfidf
def statistic_tfidf(self): # calculate df-idf for all words count_dict = {x: self.main_doc.term_dict[x] * self.__count_term_in_env(x) for x in self.main_doc.term_dict} # sort them by df and idf return sorted(count_dict.items(), key=operator.itemgetter(1), reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tf_idf_score():\n\n global final_doc_set\n global final_dictionary\n final_score = []\n\n for doc_id in final_doc_set:\n score = 0\n for query_term in final_dictionary.keys():\n if final_dictionary[query_term][1].get(doc_id):\n tf = final_dictionary[query_ter...
[ "0.7582223", "0.7435247", "0.73491657", "0.73197037", "0.7230406", "0.7206604", "0.71902233", "0.71717143", "0.71699125", "0.7125617", "0.702574", "0.7018102", "0.700898", "0.69296885", "0.6926581", "0.6906138", "0.68636584", "0.68492436", "0.68379414", "0.6832129", "0.682651...
0.8353365
0
Show the menu and return either None (if an exit key was pressed) or FindTweetMenu.BACK_INDEX
def showAndGet(self): keywords = TerminalInterface.getSearchKeywords() # If user did not enter any keywords, return FindUserMenu.BACK_INDEX if keywords is None: return FindTweetMenu.BACK_INDEX tweetGeneratorMethod = lambda: TweetsTableTools.findTweets( self._connection, keywords) menu = TweetsMenu(self._connection, self._userID, tweetGeneratorMethod, emptyMessage = FindTweetMenu._EMPTY_MESSAGE) choice = menu.showAndGet() if choice == TweetsMenu.BACK_INDEX: return FindTweetMenu.BACK_INDEX return choice
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_menu(self):\n while True:\n number = pyip.inputNum(\"0. Back to the main menu: \")\n if number == 0:\n # Clean up the console\n self.clear_console()\n # back to the main menu\n self.run()\n else:\n ...
[ "0.6956989", "0.63773394", "0.6250696", "0.6249938", "0.6121402", "0.6083828", "0.6070606", "0.6057965", "0.6057965", "0.6047459", "0.60398436", "0.60209143", "0.5979282", "0.59761137", "0.59599715", "0.59599715", "0.59599715", "0.5945133", "0.59189636", "0.5891626", "0.58872...
0.75444674
0
Update Plex by sending signal and jumping ahead by debounce timeout.
async def trigger_plex_update(hass, server_id): async_dispatcher_send(hass, PLEX_UPDATE_PLATFORMS_SIGNAL.format(server_id)) await hass.async_block_till_done() next_update = dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT) async_fire_time_changed(hass, next_update) await hass.async_block_till_done()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_update(self, event, signal):\n t = ppb.get_time() - self.start_time\n if t >= self.duration:\n signal(ppb.events.Quit())", "def pulley_activate(self):\n self.pulley(\"up\")\n time.sleep(5 * 0.7)\n self.pulley(\"stop\")\n time.sleep(2)\n self.pull...
[ "0.56693584", "0.5572143", "0.524882", "0.52033556", "0.5159521", "0.51439494", "0.5112212", "0.50803035", "0.50585544", "0.50414705", "0.49876454", "0.4983399", "0.49722165", "0.49537098", "0.49445942", "0.49259475", "0.49010918", "0.48859626", "0.48851368", "0.48832425", "0...
0.5235737
3
Uses an index array to obtain indices using an index array along an axis.
def select_indices(arr,index_arr,axis=-1): shape_list=(lambda x,y: [ 1 if dim!=x else y for dim in range(len(arr.shape))] ) indices_list=[np.reshape(np.arange(length),shape_list(length_id,length)) for length_id,length in enumerate(arr.shape)] indices_list[axis]=index_arr return arr.ravel()[np.ravel_multi_index(indices_list,dims=arr.shape)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pndindex(*args):\r\n return np.ndindex(*args)", "def pndindex(*args):\n return np.ndindex(*args)", "def _index(tensor_3d, tensor_2d):\n x, y, z = tensor_3d.size()\n t = tensor_3d.reshape(x * y, z)\n tt = tensor_2d.reshape(x * y)\n v = t[torch.arange(x * y), tt]\n v = v.reshape(x, y)\n ...
[ "0.7302368", "0.7263272", "0.69995314", "0.6984675", "0.68649966", "0.68557614", "0.6626734", "0.6612736", "0.64494765", "0.63717943", "0.6355618", "0.6344733", "0.6259788", "0.62565714", "0.62565714", "0.6241369", "0.62404037", "0.62190133", "0.62045544", "0.61014456", "0.60...
0.7482163
0
Load model with saved parameters
def __init__(self,data_path=None,load_quant=True,use_cuda=False): data_path = os.path.join("data", "Transformer_500k_UNK") if data_path==None else data_path if not os.path.isdir(data_path) or len(os.listdir(data_path)) == 0: raise FileNotFoundError(f"No such file or directory: {data_path}, set the path to your model " "directory or download the pre-trained one from https://github.com/Rvbens/Chatbot-en-Espanol " "and uncompress on ./data.") #download('transformer',load_quant) with open(data_path + '/voc.pkl', 'rb') as f: self.voc = pickle.load(f) self.device = torch.device("cuda" if use_cuda else "cpu") self.model = self.from_checkpoint(data_path,load_quant,use_cuda) self.searcher = self.beam_search
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self) -> Any:", "def load_model(self):\n pass", "def load(path_to_model):\n pass", "def load_model(self, model_path: str):", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.tr...
[ "0.82873255", "0.8127506", "0.7947357", "0.78214943", "0.7764831", "0.772304", "0.76998776", "0.7674535", "0.7609661", "0.75857383", "0.75005114", "0.74642503", "0.7394183", "0.7352217", "0.73177934", "0.7304131", "0.7298603", "0.7293181", "0.7292386", "0.7279392", "0.7239284...
0.0
-1
Give an answer to the input sentence using the model
def evaluateOneInput(self, input_sentence): input_sentence = process_punct(input_sentence.encode()) # Evaluate sentence output_words = self.evaluate(input_sentence) # Format and print response sentence output_words[:] = [x for x in output_words if not (x =='SOS' or x == 'EOS' or x == 'PAD')] raw_ans = ' '.join(output_words) ans = reformatString(raw_ans) return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insult_me(\n message : str \n ):\n \n #load model\n model = Detoxify('original')\n \n #predict toxicity\n results = model.predict(message)\n \n #echo results\n click.echo(pd.Series(results))", "def example_single(args, model, word2idx):\n #在命令行中加载和分段<目标、(推特内容)>配对\n ...
[ "0.6981685", "0.67661905", "0.67217624", "0.6701556", "0.6554767", "0.6553322", "0.6489698", "0.641607", "0.6358076", "0.6350352", "0.6321193", "0.62813115", "0.6276418", "0.6273281", "0.6234738", "0.61554706", "0.61365837", "0.61111104", "0.61026496", "0.6102064", "0.6094713...
0.6599026
4
Continous loop of inputs and answers
def evaluateCycle(self): print("Enter q or quit to exit") input_sentence = '' while(1): # Get input sentence input_sentence = input('> ') # Check if it is quit case if input_sentence == 'q' or input_sentence == 'quit': break ans = self.evaluateOneInput(input_sentence) print('Bot:', ans)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_loop():\n while(True):\n decision = raw_input(\"enter some mathematical operations\")\n if(decision == \"done\"):\n break\n print eval(decision)", "def main():\n min_random = 10 #keeping constant for the min random number range\n max_random = 99 #keeping constant...
[ "0.6230928", "0.6096894", "0.6076145", "0.5999656", "0.5885386", "0.5863726", "0.5846322", "0.5829764", "0.5800593", "0.57979757", "0.5749408", "0.57334924", "0.5704362", "0.5703528", "0.56611365", "0.5657158", "0.56488985", "0.564698", "0.5644468", "0.5608744", "0.5578464", ...
0.6725195
0
Primary method to play the game & checking the solution. It is not used in solving)
def shift(self, direction): direct, pos = tuple(direction) board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct] board[int(pos)].shift(direction=self.direct[direct])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play_game():\n pass", "def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the numbe...
[ "0.75411516", "0.7212041", "0.71931386", "0.7155727", "0.7048633", "0.7033831", "0.7021678", "0.70061886", "0.7005463", "0.6991841", "0.69794375", "0.69777775", "0.69694954", "0.6947417", "0.6946482", "0.68822587", "0.68286717", "0.68259066", "0.6824033", "0.6817702", "0.6791...
0.0
-1
method to create random tests
def shuffle(self, steps): from random import sample for s in range(steps): direction = sample('LRUD', 1)[0] if direction in 'LR': stepsize = str(sample(range(self.cdim), 1)[0]) else: stepsize = str(sample(range(self.rdim), 1)[0]) self.shift(direction + stepsize) return '\n'.join([''.join([node.value for node in row]) for row in self.rows])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_generate_all_testing(self):\n pass", "def create_scenarios(self, params, num_scenarios, random_seed):\n return None", "def tests():", "def random_test(self):\r\n return 1", "def random_test(self):\r\n return 1", "def test_create10(self):\n pass", "def setUp(self)...
[ "0.7360971", "0.71221584", "0.7059438", "0.697786", "0.697786", "0.683397", "0.65771455", "0.6566908", "0.6540432", "0.6537697", "0.6506476", "0.6506476", "0.6487945", "0.6480973", "0.6466432", "0.64281267", "0.6419093", "0.6400205", "0.6394195", "0.6386315", "0.6373467", "...
0.0
-1
Run all test scenario and then execute reporter if html flag exist.
def run(self): list_test_scenarios = self.__get_list_scenarios_in_folder() if not list_test_scenarios: utils.print_error( "\n{}\n".format(constant.ERR_CANNOT_FIND_ANY_TEST_SCENARIOS)) exit(1) (tests_pass, tests_fail) = self.__execute_tests(list_test_scenarios) complete_message = constant.INFO_TEST_PASS_FAIL.format( tests_pass, tests_fail) print(complete_message) self.__execute_reporter()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)", "def pytest_runtest_makereport(item, call): # pylint: disable=unused-argument\n pytest_html = item.config.pluginmanager.getpl...
[ "0.6686276", "0.65002805", "0.6378646", "0.6340691", "0.6270522", "0.62139416", "0.6112857", "0.60783136", "0.6054379", "0.60486007", "0.60120285", "0.5982267", "0.5931877", "0.59273463", "0.5872965", "0.58716303", "0.5847251", "0.5843916", "0.583857", "0.58348507", "0.583448...
0.62751275
4
Catch args for TestRunner in sys.argv.
def __catch_arg(self): arg_parser = argparse.ArgumentParser() arg_parser.add_argument("-d", "--directory", dest="directory", default="", nargs="?", help="directory of test " "scenarios (not recursive)") arg_parser.add_argument("-rd", "--recur_directory", dest="recur_directory", default="", nargs="?", help="directory of test scenarios (recursive)") arg_parser.add_argument("-t", "--timeout", dest="timeout", type=float, help="timeout for each " "scenario (default: 300s)", default=300, nargs="?") arg_parser.add_argument("-html", "--html_report", dest="report", action="store_true", default=False, help="if this flag is missing, html " "report would not be generated") arg_parser.add_argument("-l", "--keep_log", action="store_true", help="keep all log file") self.__args = arg_parser.parse_args() if self.__args.timeout <= 0.0: print("Invalid timeout!") exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_invalidargs(clickrunner):\n for args in maincli.invalid_args:\n result = clickrunner.invoke(maincli.entrypoint, args)\n assert result.exit_code == 2\n assert \"no such option\" in result.output", "def test_validargs(clickrunner):\n for args in maincli.valid_args:\n resu...
[ "0.7225798", "0.70116264", "0.69673175", "0.6952749", "0.69509286", "0.6940995", "0.69118005", "0.69118005", "0.68684554", "0.6853725", "0.67827827", "0.67526424", "0.6737822", "0.6723002", "0.670661", "0.6685841", "0.66617244", "0.66590375", "0.6636994", "0.66235673", "0.657...
0.6579239
20
Execute all test case and collect the number of tests and pass.
def __execute_tests(self, lst_tests): tests_pass = tests_fail = 0 queue_of_result = multiprocessing.Queue() for test in lst_tests: process = multiprocessing.Process( target=TestRunner.__helper_execute_test, kwargs={"test_cls": test, "time_out": self.__args.timeout, "channel": queue_of_result}) process.start() process.join() temp_result = {} if not queue_of_result.empty(): temp_result = queue_of_result.get_nowait() if "status" in temp_result: if temp_result["status"] == result.Status.PASSED: tests_pass += 1 else: tests_fail += 1 if "json_path" in temp_result: self.__lst_json_files.append(temp_result["json_path"]) if "log_path" in temp_result: self.__lst_log_files.append(temp_result["log_path"]) return tests_pass, tests_fail
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n ...
[ "0.8066305", "0.7679223", "0.7588624", "0.75611496", "0.7493511", "0.7447741", "0.7365203", "0.73429716", "0.7243645", "0.71807855", "0.71580714", "0.7156753", "0.7147598", "0.7142445", "0.71408165", "0.71127534", "0.70493865", "0.70469147", "0.70359755", "0.70329404", "0.703...
0.7079473
16
Execute html_reporter if html flag is exist in sys.argv.
def __execute_reporter(self): if not self.__args.report: return reporter.HTMLReporter().generate_report_from_file( self.__lst_json_files)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(args):\n p = OptionParser()\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='debug')\n p.add_option('-w', '--w3c',\n action='store_true', default=False, dest='w3c',\n help='send file to vali...
[ "0.61923707", "0.6103338", "0.57461756", "0.5729902", "0.5687042", "0.5661867", "0.5577024", "0.5549909", "0.5545881", "0.5544282", "0.5523945", "0.5516386", "0.5506726", "0.5482997", "0.545956", "0.541977", "0.5402856", "0.5398641", "0.5351573", "0.53477114", "0.5338802", ...
0.65210193
0
Get all scenario in folder. Recursive to sub folder if "rd" argument appear in sys.argv.
def __get_list_scenarios_in_folder(self): # If both directory and recur_directory are exist # then show "Invalid command" and exit. if self.__args.directory is not "" \ and self.__args.recur_directory is not "": utils.print_error("\n{}\n".format(constant.ERR_COMMAND_ERROR)) exit(1) recursive = False start_directory = "" if self.__args.directory is not "": start_directory = self.__args.directory elif self.__args.recur_directory is not "": start_directory = self.__args.recur_directory recursive = True if not start_directory: start_directory = TestRunner.__test_script_dir if not os.path.exists(start_directory): utils.print_error( "\n{}\n".format(constant.ERR_PATH_DOES_NOT_EXIST. format(start_directory))) exit(1) list_files = [] if start_directory.endswith(".py"): list_files = [start_directory] else: try: if recursive: for directory, _, _ in os.walk(start_directory): list_files.extend(glob.glob(os.path.join(directory, "*.py"))) else: list_files.extend(glob.glob(os.path.join(start_directory, "*.py"))) except OSError: pass list_test_scenarios = [] for file in list_files: sys.path.append(os.path.dirname(os.path.abspath(file))) test_module = \ importlib.import_module(os.path.basename(file).replace(".py", "")) for name, cls in inspect.getmembers(test_module, inspect.isclass): if cls is not TestScenarioBase \ and issubclass(cls, TestScenarioBase): list_test_scenarios.append(cls) return list_test_scenarios
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getImmediateSubdirectories(dir):", "def open_run_list(base_path, filter=None):\n dir_list = listdir(base_path)\n if not dir_list:\n return []\n if filter is not None:\n filter_list = glob(path.join(base_path, filter))\n filter_list = [path.basename(x) for x in filter_list]\n ...
[ "0.63146096", "0.5627409", "0.558369", "0.55248845", "0.5507381", "0.54794055", "0.54717195", "0.541972", "0.5409548", "0.5396662", "0.53879046", "0.5381494", "0.53524697", "0.534816", "0.53151697", "0.5314741", "0.5300426", "0.5292671", "0.5287332", "0.52773887", "0.5202164"...
0.6236096
1
Execute test case in a subprocess and send result to parent process
def __helper_execute_test(test_cls, channel, time_out): test_case = test_cls() test_case.execute_scenario(time_out=time_out) temp = {} if hasattr(test_case, "test_result"): temp["status"] = test_case.test_result.get_test_status() temp["json_path"] = test_case.test_result.get_json_file_path() temp["log_path"] = test_case.logger.get_log_file_path() channel.put_nowait(temp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subprocess_run(self, *args):\n return self.testdir.runpytest_subprocess(*args)", "def exec_test_command(cmd):\n process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)\n result = process.communicate()\n return (\n process.returncode,\n bytes(result[0]).dec...
[ "0.7415895", "0.7005816", "0.6814329", "0.68095565", "0.6792471", "0.6724851", "0.67234045", "0.671659", "0.6664277", "0.66287655", "0.66093504", "0.6543628", "0.6516597", "0.6505767", "0.64967453", "0.649315", "0.64489895", "0.64397633", "0.63724536", "0.6358078", "0.6355825...
0.0
-1
Takes a tuple representing a circle as (x,y,radius) and returns a tuple with the x,y coordinates and width,size (x,y,w,h)
def circle_2_tuple(circle): assign_coord = lambda x,y: x - y if x > y else 0 x = assign_coord(circle[0],circle[2]) y = assign_coord(circle[1],circle[2]) assign_size = lambda x,y : y*2 if x > y else y*2 - (y-x) w = assign_size(circle[0],circle[2]) h = assign_size(circle[1],circle[2]) return (x,y,w,h)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_2_bbox(circle):\n x,y,w,h = circle_2_tuple(circle)\n return ((x,y),(x+w,y+h))", "def circleInfo(r):\n c = 2 * 3.14159 * r\n a = 3.14159 * r * r\n return (c, a)", "def _resolve_size(self, width, height, center_x, center_y):\n if self.size_type == 'explicit':\n size_x,...
[ "0.6815439", "0.67740446", "0.6597744", "0.64084023", "0.63581634", "0.6175593", "0.6125594", "0.6088099", "0.6076769", "0.60566986", "0.6024376", "0.5960171", "0.5957911", "0.5952948", "0.59458065", "0.5938926", "0.5935301", "0.59228104", "0.59222513", "0.5917145", "0.588294...
0.82615507
0
Takes a tuple representing a circle as (x,y,radius) and returns a tuple represeting a bbox ((x,y),(x',y'))
def circle_2_bbox(circle): x,y,w,h = circle_2_tuple(circle) return ((x,y),(x+w,y+h))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_2_tuple(circle):\n assign_coord = lambda x,y: x - y if x > y else 0\n x = assign_coord(circle[0],circle[2])\n y = assign_coord(circle[1],circle[2])\n\n assign_size = lambda x,y : y*2 if x > y else y*2 - (y-x) \n w = assign_size(circle[0],circle[2])\n h = assign_size(circle[1],circle[2...
[ "0.7212098", "0.6748863", "0.6743238", "0.6730478", "0.67082477", "0.66678756", "0.66592455", "0.66318727", "0.6586817", "0.65842336", "0.6532223", "0.6481017", "0.6468795", "0.6422326", "0.6373362", "0.63589585", "0.635091", "0.6347281", "0.6332991", "0.63162756", "0.6307187...
0.87923753
0
Takes a tuple of tuples represeting a bbox ((x,y),(x',y')) and returns
def fix_bbox(bbox,img_shape): x = min(bbox[1][0],img_shape[1]) y = min(bbox[1][1],img_shape[0]) return ((bbox[0]),(x,y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def circle_2_bbox(circle):\n x,y,w,h = circle_2_tuple(circle)\n return ((x,y),(x+w,y+h))", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def bbox2points(bbox):\r\n l, x, y, w, h = bbox\r\n xmin = int(ro...
[ "0.74169517", "0.7330232", "0.73051816", "0.7260692", "0.72117823", "0.71556735", "0.711998", "0.70630515", "0.6968945", "0.6965542", "0.6959953", "0.68821084", "0.68737143", "0.68725014", "0.6858501", "0.68244123", "0.67616284", "0.67497444", "0.67070234", "0.66811466", "0.6...
0.75615424
0
Draws bboxes in a image given an array of circles [(x,y,radius)]
def bbox_from_circle(img, circles): seg_imgs = [] bboxes = [] aux = img.copy() for i,el in enumerate(circles): bbox = circle_2_bbox(el['coord']) bbox = fix_bbox(bbox,aux.shape) cv.rectangle(aux,bbox[0],bbox[1],(0,255,0)) bboxes.append(bbox) return bboxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bboxes(img, bboxes, color=(0, 0, 255), thick=6):\n draw_img = np.copy(img)\n # Draw rectangles given bbox coordinates as opposing coordinates\n # bboxes = opposing coordinates: (x1,y1), (x2,y2)\n [cv2.rectangle(draw_img, bbox[0], bbox[1], color, thick) for bbox in bboxes]\n return draw_img"...
[ "0.68533266", "0.68072176", "0.6805508", "0.6788925", "0.676972", "0.6738393", "0.67133397", "0.664385", "0.66165227", "0.6587222", "0.6578446", "0.65585065", "0.6551722", "0.65482426", "0.6528621", "0.65220505", "0.64468735", "0.64413995", "0.6400262", "0.6379862", "0.637677...
0.73344976
0
Calculate heterozygosity samples = list of sample names vcf = VCF file
def calHet( inFile, varType ): names = [] print("Sample\tfracHet\thetCt\thomCt") # print header with open( inFile, 'r') as files: # open sample name file for i in files: i = i.rstrip() vcf = i + "." + varType + ".vcf" with open( vcf, 'r' ) as data: hom = 0.0 # count homozygous sites het = 0.0 # count heterozygous sites fractionHet = 0.0 # fraction heterozygous for var in data: if var.startswith("#"): # skip header continue else: var = var.rstrip() line = var.split("\t") stats = line[9].split(':') # alleles = list( map( int, stats[1].split(',') ) ) # create list of allele counts check = [ i for i in alleles if i > 0] # put any counts > 0 into a list if not check: # if all allele counts == 0 continue # all alleles are set to zero wtf? Result of a quality score that is low. elif len(check) > 1: # multiple allele counts , must be heterozygous het += 1 # more than one allele elif len(check) == 1: # only one allele has a count hom += 1 #print("%s\t%s\t%s\t%s\t%s\t%s" %(i, line[0], line[1], stats[0], stats[1], check ) ) if hom == 0: fractionHet = 100 else: fractionHet = het/(hom + het) # calculate fraction heterozygous print("%s\t%f\t%f\t%f" %(i, fractionHet, het,hom )) files.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vcf_samples(vcffile):\n try:\n vcf_reader = vcf.Reader(open(vcffile, 'r'))\n return vcf_reader.samples\n except Exception as error:\n print(f\"Could not read vcffile {vcffile}: continuing without vcf data: {str(error)}\")\n\n return []", "def calculate_mixture_features(args):\n ...
[ "0.59488827", "0.5802758", "0.58007336", "0.57328737", "0.56093895", "0.55808663", "0.5559472", "0.5527993", "0.55187845", "0.5462843", "0.54014647", "0.5394218", "0.53905374", "0.53511345", "0.53466797", "0.5334894", "0.5314936", "0.5283237", "0.5243797", "0.5231647", "0.523...
0.6672723
0
Set up SMHI forecast as config entry.
async def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool: opp.config_entries.async_setup_platforms(entry, PLATFORMS) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, config_file_name):\n configs = io.read_yaml(PATH_CONFIG, config_file_name)\n Logger.info('Loaded future forecasts configs from file',\n os.path.join(PATH_CONFIG, config_file_name), self.__class__.__name__)\n\n self.is_sell_in_model = configs['model'] == 's...
[ "0.61663395", "0.5952376", "0.574808", "0.5706627", "0.57054985", "0.56703043", "0.5660496", "0.5641424", "0.5638844", "0.56216913", "0.5614565", "0.55751806", "0.55385953", "0.547761", "0.54702574", "0.54657686", "0.5461015", "0.5460905", "0.5451515", "0.54444194", "0.544236...
0.0
-1
Unload a config entry.
async def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry) -> bool: return await opp.config_entries.async_unload_platforms(entry, PLATFORMS)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_unload_entry(hass, config_entry):\n unload_ok = await hass.config_entries.async_forward_entry_unload(\n config_entry, \"climate\"\n )\n return unload_ok", "async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:\n hass.data.pop(DOMAIN)\n return True", ...
[ "0.697284", "0.6888074", "0.6779855", "0.6747459", "0.6689002", "0.6657831", "0.66162205", "0.6603433", "0.65925974", "0.65595686", "0.65411645", "0.6507643", "0.6507643", "0.6507643", "0.6507643", "0.64977276", "0.64931643", "0.6486601", "0.6486601", "0.6486601", "0.6486601"...
0.58642447
78
A convenience function for getting a single suggestion.
def get_suggestion(): global _suggestions_iterator while True: try: return next(_suggestions_iterator) except StopIteration: _suggestions_iterator = iter(suggestions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def suggestion(self, suggestion_id):\r\n return suggestions.Suggestion(self, suggestion_id)", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.sp...
[ "0.70957506", "0.7064316", "0.6983561", "0.6963836", "0.6963836", "0.6800833", "0.6749406", "0.6550867", "0.6436159", "0.6428319", "0.6357224", "0.62608695", "0.62456524", "0.6239825", "0.6186077", "0.60764414", "0.6011701", "0.5944827", "0.5927803", "0.582557", "0.5824507", ...
0.7540617
0
Builds game board by retrieving a sudoku puzzle preset from a sudoku dataset and then sets up the game board. Also calls a backtracking algorithm to derive a solution for the sudoku puzzle.
def build_game_board(self): # retrieves new sudoku puzzle from dataset sudoku_set = self.data.get_sudoku_set() sudoku_problem, sudoku_solution = sudoku_set[0], sudoku_set[1] # removes old game boards self.board = [] self.puzzle = [] self.alg_solution = [] self.data_solution = [] # sets up sudoku puzzle to array format segment = [] for num in sudoku_problem: segment.append(int(num)) if len(segment) == 9: self.board.append(segment) self.puzzle.append(segment[:]) segment = [] self.alg_solution = alg.solve_sudoku(self.puzzle) # uses sudoku backtracking algorithm to solve puzzle # sets up the provided sudoku puzzle solution from dataset to array format for num in sudoku_solution: segment.append(int(num)) if len(segment) == 9: self.data_solution.append(segment) segment = [] self.game_state = "Not Solved, Keep Trying!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solveSudoku(board):\n # represents all numbers in a specific row, col, box\n # format: if (5,9) is in rows, that means row 5 contains digit 9\n\t\t# format: if (3, 2) is in cols, that means col 3 contains digit 2\n\t\t# format: if (0,2,8) is in boxes, that means box (0,2) contains 8\n\t\t# cellsT...
[ "0.70219713", "0.6696299", "0.6669564", "0.665291", "0.6652331", "0.6648256", "0.6491506", "0.6417593", "0.64122254", "0.6406946", "0.64067495", "0.6398763", "0.6398049", "0.6371711", "0.63527167", "0.6332174", "0.63301975", "0.6281087", "0.6276849", "0.62565374", "0.62547344...
0.8129647
0
Requests user input for the row column and number input they would like to enter as the next entry to the Sudoku puzzle. Has some lightweight data validation through a try / except format and asks for another input attempt if invalid inputs were provided.
def request_number_input(self): try: self.print_board(self.board) row = int(input("Please enter row to add number to (0-8): ")) col = int(input("Please enter column to add number to (0-8): ")) num = int(input("Please enter number you wish to add (1-9): ")) response = self.set_number(col, row, num) print(response) # verifies if move was valid or if invalid inputs were provided. except: print("Invalid input, try again!") self.request_number_input()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input(self):\n while True:\n try:\n self.rows = int(input(\"Number of rows: \"))\n while self.rows < 2 or self.rows > 30:\n self.rows = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except Valu...
[ "0.7249653", "0.7011725", "0.68896455", "0.65655696", "0.64116174", "0.63925433", "0.62278056", "0.6168719", "0.6091157", "0.6074832", "0.604573", "0.6043378", "0.5990928", "0.5926154", "0.5902846", "0.58928376", "0.5879233", "0.5877413", "0.5809618", "0.57965463", "0.5755129...
0.71602094
1
Checks that inputs are valid and returns informative messages if not. If input is valid, updates the game board and returns an updated game state.
def set_number(self, col, row, num): if col > 8 or row > 8 or num > 9 or num < 0: return "Invalid input, try again!" elif self.new_input_does_not_overlap_original_board(col, row): if num == 0: self.board[row][col] = 0 else: self.board[row][col] = num return self.update_game_state() # return alg.check_solution(self.board) else: return "Cannot change this number, try again!"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_game_state(self):\n # if board is not filled out, returns a valid move message\n for row in self.board:\n if 0 in row:\n return \"Valid input\"\n\n # if board is filled out, verifies if solution is valid and updates game state\n self.game_state = alg...
[ "0.7908367", "0.62731576", "0.6187185", "0.6057185", "0.59081376", "0.583079", "0.58031124", "0.57425433", "0.57258314", "0.5701646", "0.56752145", "0.56365675", "0.562094", "0.5612027", "0.5599031", "0.5581615", "0.5548244", "0.55428475", "0.5534015", "0.5533693", "0.5515880...
0.0
-1
Checks if the requested square to change is an original input for the puzzle, which cannot be changed.
def new_input_does_not_overlap_original_board(self, col, row): return self.puzzle[row][col] == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_solved(self):\n # Iterate through each square of the puzzle\n for row in range(self.sl):\n for col in range(self.sl):\n val = self.puzzle[row][col]\n\n # If any square value is blank (0), not solved, return False\n if val == 0:\n ...
[ "0.68145555", "0.66621006", "0.65014184", "0.6457396", "0.64046955", "0.6342213", "0.6310124", "0.630704", "0.6286575", "0.62758124", "0.62362766", "0.6218367", "0.62178296", "0.61827266", "0.61717474", "0.61584324", "0.61545163", "0.61536086", "0.6134026", "0.6131081", "0.61...
0.710153
0
Checks to see if the sudoku puzzle has been filed out and if it has, checks if solution is valid.
def update_game_state(self): # if board is not filled out, returns a valid move message for row in self.board: if 0 in row: return "Valid input" # if board is filled out, verifies if solution is valid and updates game state self.game_state = alg.check_solution(self.board) return self.game_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_solvable(self):\n\n self.solvable=True #status of sudoku\n for i in range(0, 9):\n for j in range(0, 9):\n if self.a[i][j]==0:\n continue\n if self.check(i, j)[self.a[i][j]]==0:\n self.solvable=False\n return False", "def test_is_solved_when_p...
[ "0.7875301", "0.73579586", "0.73001826", "0.7293104", "0.7169292", "0.70995307", "0.70965403", "0.7090685", "0.70901805", "0.70878255", "0.70585865", "0.7052431", "0.69659704", "0.69108915", "0.69038904", "0.6884344", "0.6804607", "0.6775108", "0.677041", "0.67370665", "0.669...
0.0
-1
Method for retrieving game state.
def get_game_state(self): return self.game_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_game_state(self):\r\n return self._game_state", "def get_game_state(self):\n return self._game_state", "def get_game_state(self):\n return self._game_state", "def get_game_state(self):\n return self._game_state", "def get_game_state(self):\n return self._current_s...
[ "0.8740767", "0.86155", "0.86155", "0.86155", "0.8482095", "0.84146124", "0.84146124", "0.8371279", "0.8281865", "0.82611275", "0.7860468", "0.7752739", "0.7565724", "0.75503594", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294", "0.75414294"...
0.87546504
0
Method for retrieving current puzzle board.
def get_game_board(self): return self.board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_board(self):\r\n return self.board", "def get_board(self):\n return self.board", "def get_board(self):\n pass", "def getBoard(self):\n return self.board", "def get_board(self):\n return self._board", "def get_board(self):\n return self._board", "def get...
[ "0.81792754", "0.8089178", "0.8084012", "0.80593", "0.8017772", "0.8017772", "0.7993326", "0.78803223", "0.7876486", "0.775667", "0.7621218", "0.72338516", "0.72066325", "0.70492387", "0.6822986", "0.68192434", "0.681123", "0.6792606", "0.6792606", "0.6792606", "0.6792606", ...
0.7823289
9
Method for printing a puzzle board, given a board input. Adds separators for readability.
def print_board(self, board): print("Sudoku Board:") count = 0 for row in board: string = "" for num in range(len(row)): if row[num] != 0: string += str(row[num]) else: string += "_" if num != len(row) - 1: string += " " if (num+1) % 3 == 0 and num != len(row) - 1: string += "| " print(string) count += 1 if count % 3 == 0 and count < 9: print("_______________________________")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_puzzle(board):\n\n row_size = get_row_size(board)\n output = '\\n'\n\n for idx, val in enumerate(board):\n output += \" {} \".format(val)\n if idx % row_size == row_size - 1:\n output += \"\\n\"\n\n return output", "def print_board(self):\n num_rows = len(self.bo...
[ "0.78815925", "0.7831809", "0.7829072", "0.7808938", "0.7672224", "0.76157385", "0.759019", "0.7571991", "0.7562879", "0.7542402", "0.7536376", "0.7534153", "0.75278115", "0.7526931", "0.7525567", "0.75204974", "0.75150675", "0.7491006", "0.7489625", "0.7483982", "0.7483982",...
0.7443866
27
Nethod for playing a game of sudoku. Prints out rules and instructions and asks for user inputs. If current puzzle is solved, asks player if they would like to play again and provides a new puzzle.
def play_sudoku(puzzle): print_instructions() print("For review and grading purposes purposes, here is a sample solution:") puzzle.print_board(puzzle.alg_solution) # while puzzle is not solved, continues to ask user for their next input while puzzle.get_game_state() != "Solved!": puzzle.request_number_input() puzzle.print_board(puzzle.get_game_board()) # if puzzle is solved, asks user if they would like to play again play_again = input("Would you like to play again? Y/N: ") play_again = play_again.lower() if play_again == 'y': puzzle.build_game_board() play_sudoku(puzzle) else: print("Thanks for playing!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n print(WELCOME_MESSAGE)\r\n\r\n playing = True\r\n while playing:\r\n\r\n # Valid inputs that the user can use\r\n move_actions = (UP, DOWN, LEFT, RIGHT)\r\n other_actions = (GIVE_UP, HELP)\r\n\r\n grid_size = int(input(BOARD_SIZE_PROMPT))\r\n\r\n # Get th...
[ "0.7092953", "0.689259", "0.68871856", "0.6726647", "0.6684823", "0.6676533", "0.6589806", "0.6555856", "0.6482301", "0.63967913", "0.6358735", "0.6353301", "0.6346815", "0.63003695", "0.6269608", "0.6250014", "0.6243339", "0.62017316", "0.61864555", "0.6185452", "0.61633503"...
0.82174706
0
Prints to console a set of instructions for how to play a game of Sudoku.
def print_instructions(): print("Welcome to the game of Sudoku!") print("--------------------------------") print("The goal of the game is to fill every 'square' here with a number.") print("The rules of the game are simple:") print(" Rule No 1: You can only enter numbers 1-9 in each square.") print(" Rule No 2: You cannot repeat the use of a number within a row, column or 3x3 segment.") print("--------------------------------") print("Instructions:") print(" - You will be prompted to enter a row, a column, and then a number input.") print(" - The rows and column inputs are 0-indexed, meaning it goes from 0-8.") print(" - The number input is expected to be 1-9. Any other inputs will not be accepted.") print(" - Once you've filled out every square, the game will automatically check to see if your solution is valid!") print(" - If not, it will prompt you to try again, and you can continue to change your inputs or even write") print(" over your original entries.") print("Good luck, have fun!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_instructions(self):\n\t\tprint('\\n\\n==========================================================================')\n\t\tprint('==========================================================================\\n')\n\t\tprint('Welcome to Tic Tac Toe, the came you know and love. \\nThe rules are the same ones you...
[ "0.75848573", "0.71088547", "0.703432", "0.6988165", "0.69866717", "0.6634064", "0.6410708", "0.6397539", "0.6355258", "0.63541543", "0.6338774", "0.63357323", "0.6296808", "0.6284995", "0.6267385", "0.62419635", "0.6191577", "0.6185726", "0.6172115", "0.61716187", "0.6133821...
0.74090517
1
Generate a automatic configuration for Home Assistant.
def gen_ha_config(self, mqtt_base_topic): json_config = { "name": self.friendly_name, "unique_id": "DALI2MQTT_LIGHT_{}".format(self.device_name), "state_topic": MQTT_STATE_TOPIC.format(mqtt_base_topic, self.device_name), "command_topic": MQTT_COMMAND_TOPIC.format( mqtt_base_topic, self.device_name ), "payload_off": MQTT_PAYLOAD_OFF.decode("utf-8"), "brightness_state_topic": MQTT_BRIGHTNESS_STATE_TOPIC.format( mqtt_base_topic, self.device_name ), "brightness_command_topic": MQTT_BRIGHTNESS_COMMAND_TOPIC.format( mqtt_base_topic, self.device_name ), "brightness_scale": self.max_level, "on_command_type": "brightness", "availability_topic": MQTT_DALI2MQTT_STATUS.format(mqtt_base_topic), "payload_available": MQTT_AVAILABLE, "payload_not_available": MQTT_NOT_AVAILABLE, "device": { "identifiers": "dali2mqtt", "name": "DALI Lights", "sw_version": f"dali2mqtt {__version__}", "model": "dali2mqtt", "manufacturer": f"{__author__} <{__email__}>", }, } return json.dumps(json_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"lo...
[ "0.603333", "0.5879031", "0.58355165", "0.58121914", "0.58121914", "0.57822585", "0.57382154", "0.57061666", "0.57061666", "0.5690142", "0.56779516", "0.5659888", "0.5630428", "0.56107765", "0.5600595", "0.5599627", "0.55983096", "0.55772704", "0.55772704", "0.55755526", "0.5...
0.0
-1
returns the number of combinations of size k that can be made from n items. >>> nchoosek(5,3) 10 >>> nchoosek(1,1) 1 >>> nchoosek(4,2) 6
def nchoosek(n, k): if (n, k) in known: return known[(n,k)] if k == 0: return 1 if n == k: return 1 if n < k: return "n must be greater than k" result = nchoosek(n - 1, k - 1) + nchoosek(n - 1, k) known[(n,k)] = result return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nchoosek(n, k):\n if n < k:\n return 0\n return partition(n, [k, n - k])", "def n_choose_k(N,K):\n return factorial(N) // (factorial(N - K) * factorial(K))", "def n_choose_k(n: int, k: int) -> int:\n # Edge case, no possible way to choose.\n if k > n or k < 0 or n < 0: return 0\n #...
[ "0.8256239", "0.7943082", "0.77849466", "0.77155447", "0.75553346", "0.75553226", "0.7516673", "0.7516673", "0.7516148", "0.74694544", "0.73904", "0.7390276", "0.7361454", "0.72738117", "0.72258496", "0.7099127", "0.7037198", "0.7019336", "0.70099944", "0.7001311", "0.6997127...
0.7859963
2
Creates four plotly visualizations using the New York Times Archive API
def return_figures(): # Add New York Times API Key nyt = NYTAPI("AsjeHhqDYrePA2GMPpYoY1KAKAdG7P99") # Select Year and Month of articles data = nyt.archive_metadata( date = datetime.datetime(2020, 7, 1) ) def data_to_df(data): # Initiate list for restructured information data_list = [] # Collect Data from API dictionary for article in data: new_data = [article.get("section_name"), article.get("news_desk"), article.get("pub_date"), article.get("headline").get("main"), article.get("abstract"), article.get("lead_paragraph"), article.get("type_of_material"), article.get("word_count")] # Append list of information from article to data list data_list.append(new_data) # Convert data list to DataFrame df = pd.DataFrame(data_list, columns=["section_name","news_desk", "pub_date", "headline", "abstract", "lead_paragraph", "type_of_material", "word_count"]) return df df = data_to_df(data) # first chart plots section distribution # as a pie chart graph_one = [] df_one = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_one.section_name.value_counts().index values = df_one.section_name.value_counts().values graph_one.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_one = dict(title = 'Distribution of sections of this months New York Times articles') # second chart plots section distribution # as a pie chart graph_two = [] df_two = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_two.news_desk.value_counts().index values = df_two.news_desk.value_counts().values graph_two.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_two = dict(title = 'Distribution of news desk of this months articles') # third chart plots section distribution # as a pie chart graph_three = [] df_three = df.copy() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values labels = df_three.type_of_material.value_counts().index values = df_three.type_of_material.value_counts().values graph_three.append( go.Pie( labels=labels, values=values, hole=.6, textposition="inside" ) ) layout_three = dict(title = 'Distribution for type of material of this months articles') # fourth chart plots section distribution # as a pie chart graph_four = [] # Convert publishing date columns to datetime format df["pub_date"] = pd.to_datetime(df["pub_date"]).dt.date df_four = df.copy() df_four = df_four.pub_date.value_counts().to_frame().sort_index() # filter and sort values for the visualization # filtering plots the articles in decreasing order by their values x_val = df_four.index y_val = df_four.values graph_four.append( go.Scatter( x=df_four.index, y=df_four["pub_date"], mode="lines", name="Articles" ) ) layout_four = dict(title = 'Number of articles published by days') # fourth chart plots section distribution # as a pie chart graph_five = [] # Calculate average number of words for this months articles avg_word_count = round(df.word_count.mean(),0) graph_five.append( go.Table( header=dict(values=['Average Word Count']), cells=dict(values=[avg_word_count]) ) ) layout_five = dict(title = '') # append all charts figures = [] figures.append(dict(data=graph_one, layout=layout_one)) figures.append(dict(data=graph_two, layout=layout_two)) figures.append(dict(data=graph_three, layout=layout_three)) figures.append(dict(data=graph_four, layout=layout_four)) figures.append(dict(data=graph_five, layout=layout_five)) return figures
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_figures():\n\n graph_one = []\n df = cleanparrisdf('data/Salem-Village-Data-Set.csv')\n sources = [0,0,0,1,1,1]\n targets = [2,3,4,2,3,4]\n values = df[\"petition_count\"].tolist()\n\n data_one = dict(\n type = 'sankey',\n node = dict(\n pad = 10,\n ...
[ "0.64221996", "0.6253128", "0.61704546", "0.61357796", "0.59865403", "0.5960612", "0.5946181", "0.59333766", "0.585216", "0.58064705", "0.5785414", "0.576179", "0.5730726", "0.57133067", "0.57080656", "0.5644872", "0.56262666", "0.5618747", "0.559435", "0.559427", "0.5580006"...
0.715962
0
Since virtual steppers are virtual, we don't need pins or step sequences. We're still using delay and n_steps to resemble physical steppers.
def __init__(self, name = None, n_steps = 256, delay = 1e-3): self.fig, self.ax = plt.subplots(figsize=(3, 3)) self.n_steps = n_steps self.delay = delay self.step_size = 2 * pi / self.n_steps if name is None: self.name = 'Stepper {}'.format(VirtualStepper.count + 1) self.angle = 0.0 self.check() self.inv = False VirtualStepper.count += 1 plt.ion() plt.show() self.draw()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulation_step(self):\n if not self.np_trajectory.size:\n #No trajectory to go to.....\n return\n closest_ind = self.find_closest_trajectory_pose()\n ref_ind = (closest_ind + 30) # closest_ind + numpy.round(self.v / 4)\n traj_len = len(self.np_trajectory[0])\n...
[ "0.61042655", "0.58270293", "0.57595366", "0.5680195", "0.566837", "0.56112635", "0.5599372", "0.55904734", "0.5564814", "0.55495346", "0.55369097", "0.5532785", "0.552632", "0.55011874", "0.5483982", "0.5469438", "0.5464263", "0.54594064", "0.54537576", "0.5439023", "0.54092...
0.5976446
1
Rotates to the angle specified (chooses the direction of minimum rotation)
def rotate_to(self, angle, degrees = False): target = angle * pi / 180 if degrees else angle curr = self.angle diff = (target - curr) % (2*pi) if abs(diff - (2*pi)) < diff: diff = diff - (2*pi) self.rotate_by(diff)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate(self, angle):\n old_angle, tilt = self.rotation\n new_angle = old_angle + angle\n while new_angle > 90:\n new_angle = new_angle - 90\n while angle < -90:\n new_angle = new_angle + 90\n self.rotation = (new_angle, tilt)", "def rotate(self, angle)...
[ "0.76305664", "0.74709356", "0.7153996", "0.71267575", "0.71231437", "0.7109132", "0.71029294", "0.70860887", "0.7050864", "0.6986599", "0.69027513", "0.6902106", "0.6818123", "0.68133605", "0.67248964", "0.6705006", "0.6692359", "0.66833335", "0.6678019", "0.66422665", "0.66...
0.7034826
9
Rotate the stepper by this angle (radians unless specified) Positive angles rotate clockwise, negative angles rotate counterclockwise
def rotate_by(self, angle, degrees = False): target = angle * pi / 180 if degrees else angle if self.inv: target = -target if target > 0: n = int(target // self.step_size) + 1 for _ in range(n): self.step_c() else: n = int(-target // self.step_size) + 1 for _ in range(n): self.step_cc() if self.inv: diff = -diff
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_rad(self, angle):\n self.beam_angle += angle\n self.xy = rotate(self.xy, angle)\n self.angle += angle", "def rotate(self, direction):\n electro = pygame.mixer.Sound('resources/Electro_Motor.wav')\n electro.set_volume(0.2)\n self.rotation += min(max(direction, ...
[ "0.7070411", "0.7032392", "0.6987201", "0.6970376", "0.69328016", "0.6915016", "0.6913845", "0.68389475", "0.68369746", "0.682694", "0.6704316", "0.6675216", "0.6641125", "0.66407424", "0.66319656", "0.66140467", "0.65792656", "0.65759706", "0.6568908", "0.6567404", "0.651743...
0.730979
0
Resets the position of the stepper to 0
def zero(self): self.angle = 0.0 self.draw() time.sleep(self.delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_position(self):\n self.goto(STARTING_POSITION)", "def reset(self):\n self.steps = 0\n self.state = 0\n self.trajectory = []", "def reset(self):\n self._position = TwoDV(0.0, 0.0)\n self._orient = TNavigator.START_ORIENTATION[self._mode]", "def reset(self):...
[ "0.74105155", "0.72891736", "0.72350556", "0.70255756", "0.70246994", "0.6999789", "0.6948243", "0.6939481", "0.6906952", "0.68637085", "0.68452317", "0.6839638", "0.6813699", "0.68041223", "0.68041223", "0.6704756", "0.6704756", "0.6704756", "0.6691812", "0.66758686", "0.665...
0.6214618
68
Add radio buttons to an `~.axes.Axes`.
def __init__(self, ax, labels, active=0, activecolor='blue', size=49, orientation="vertical", **kwargs): AxesWidget.__init__(self, ax) self.activecolor = activecolor axcolor = ax.get_facecolor() self.value_selected = None ax.set_xticks([]) ax.set_yticks([]) ax.set_navigate(False) circles = [] for i, label in enumerate(labels): if i == active: self.value_selected = label facecolor = activecolor else: facecolor = axcolor p = ax.scatter([],[], s=size, marker="o", edgecolor='black', facecolor=facecolor) circles.append(p) if orientation == "horizontal": kwargs.update(ncol=len(labels), mode="expand") kwargs.setdefault("frameon", False) self.box = ax.legend(circles, labels, loc="center", **kwargs) self.labels = self.box.texts self.circles = self.box.legendHandles for c in self.circles: c.set_picker(5) self.cnt = 0 self.observers = {} self.connect_event('pick_event', self._clicked)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def radioButton(*args, align: Union[AnyStr, bool]=\"\", annotation: Union[AnyStr, bool]=\"\",\n backgroundColor: Union[List[float, float, float], bool]=None, changeCommand:\n Script=None, collection: AnyStr=\"\", data: Union[int, bool]=0, defineTemplate:\n AnyStr=\"\", ...
[ "0.61411875", "0.5947201", "0.58568066", "0.58213407", "0.5773773", "0.56557375", "0.56341064", "0.56140417", "0.55877143", "0.55544215", "0.55341345", "0.55151653", "0.5453786", "0.5414262", "0.53953147", "0.5390913", "0.538077", "0.5366018", "0.53010714", "0.5281877", "0.52...
0.47138745
80
Initiate the temporal GIS and set the region
def setUpClass(cls): cls.use_temp_region() cls.runModule("g.region", raster="elev_state_500m")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_region(self):\n self.new_region_name = \"\"\n self.map.regions.create_new_region()", "def update_temperature_region(self):\n self.linear_region.setRegion(\n self.temperature_plot_graph.getViewBox().viewRange()[0])", "def __init__(self, region):\r\n self.reg...
[ "0.6685489", "0.64153266", "0.6042711", "0.60086167", "0.60021317", "0.59598774", "0.58936393", "0.58936393", "0.5817339", "0.5815601", "0.57746816", "0.5762331", "0.57130456", "0.5645798", "0.5611731", "0.5609597", "0.5601189", "0.55973095", "0.55544496", "0.55544496", "0.55...
0.61658186
2
Remove the temporary region
def tearDownClass(cls): cls.runModule("g.remove", flags="rf", type="vector", name="gbif_poa3") cls.del_temp_region()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_this_region(self):", "def delete_region(self, region):\n\n self.contour_plot.vb.removeItem(region)\n del self.regions[id(region)]", "def remove():", "def removePick(self):\n self.pnt = None\n vtkRenWin.delMarker(self.renWin)", "def stop_region(self):\n self.reg...
[ "0.7985368", "0.6744757", "0.65855074", "0.6343354", "0.62646586", "0.62146246", "0.6214121", "0.6068292", "0.6062553", "0.6047077", "0.6027426", "0.6016135", "0.5975185", "0.5951818", "0.59466493", "0.58890456", "0.58813125", "0.58796966", "0.5872771", "0.58329594", "0.58293...
0.6391738
3
Show something if there isn't anything happening in the inventory
def test_nothing(self): response = self.client.get(reverse('device-list')) self.assertEqual(response.status_code, 200) self.assertQuerysetEqual(response.context['devices'], [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inventory(self):\n\n #when the item list is 0 , print out having no items \n if len(self.items) == 0:\n \n print('The player has no items')\n\n #if not, print out the item list \n else:\n print(self.items)", "def display_inventory(self):\n h...
[ "0.7831163", "0.77966297", "0.7582839", "0.70266676", "0.6962343", "0.6900891", "0.68886095", "0.686806", "0.6751272", "0.6545909", "0.65149176", "0.64721984", "0.6435862", "0.6420513", "0.6411051", "0.6353434", "0.6313033", "0.63007027", "0.6266621", "0.6233488", "0.6225111"...
0.0
-1
convert csv into numpy
def csv_2_numpy(file, path=INPUT_PATH, sep=',', type='int8'): file_path = path + file reader = csv.reader(open(file_path, "r"), delimiter=sep) x = list(reader) dataset = numpy.array(x).astype(type) return dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(csvfilename):\r\n with open(csvfilename, 'r') as f:\r\n reader = csv.reader(f, delimiter=';')\r\n #reader = csv.reader(f, delimiter=';', quotechar=\"'\")\r\n data = list(reader)\r\n # transform data into numpy array\r\n data = np.array(data).astype(float)\r\n retu...
[ "0.7606892", "0.7327829", "0.727883", "0.7161398", "0.71550566", "0.69989276", "0.69635636", "0.68933666", "0.6836764", "0.6802852", "0.6801808", "0.67944103", "0.6787268", "0.67241", "0.6684425", "0.66639805", "0.6646328", "0.6636542", "0.6630825", "0.658941", "0.6581193", ...
0.81163687
0
convert numpy into csv file , for ID(libra) algothim
def numpy_2_file(narray, file, path=OUTPUT_PATH, sep=',' ): file_path = path + file narrayc = numpy.copy(narray) numpy.place(narrayc,numpy.logical_or(narrayc==-1,narrayc==-2), 2) dataset = numpy.copy(narrayc).astype(str) numpy.place(dataset,dataset=='2', '*') d=numpy.atleast_2d(dataset) numpy.savetxt(file_path, d, delimiter=sep, fmt='%s') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def produce_solution(y):\n\n with open('out.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', lineterminator=\"\\n\")\n writer.writerow(['id', 'y'])\n for i in range(y.shape[0]):\n writer.writerow([i, y[i]])", "def write_csv_file(array, filename):...
[ "0.66480744", "0.66200095", "0.63522464", "0.6301102", "0.6266639", "0.6256515", "0.62520635", "0.6203554", "0.6200669", "0.6179599", "0.6171765", "0.61589", "0.61375374", "0.61303294", "0.61182237", "0.60997707", "0.6094575", "0.6092054", "0.6079023", "0.6075535", "0.6071643...
0.5817157
61