query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Used to hide the actual prefix dictionary.
def _getPrefixDict(self): if not hasattr(self, '_prefixDict'): self.__prefixDict = {} return self.__prefixDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _resetPrefixDict(self):\r\n self._getPrefixDict().clear()", "def remove_prefix(self, state_dict, prefix):\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x # 去除带有prefix的名字\n return {f(key): value for key, value ...
[ "0.6534732", "0.6454535", "0.6143842", "0.60910183", "0.5947643", "0.5947643", "0.58735013", "0.58210343", "0.57964146", "0.57501936", "0.57409424", "0.57313424", "0.5716517", "0.57021815", "0.5605354", "0.5605354", "0.5517104", "0.54289126", "0.54218256", "0.54174894", "0.54...
0.62041974
2
Clears the prefix dictionary, this needs to be done before creating a new typecode for a message (ie. before, and after creating a new message typecode)
def _resetPrefixDict(self): self._getPrefixDict().clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self.footnotes = OrderedDict()\n self.unique_prefix += 1", "def remove_prefix(self, state_dict, prefix):\n print('remove prefix \\'{}\\''.format(prefix))\n f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x # 去除带有prefix的名字\n return {f(key): ...
[ "0.661214", "0.6500577", "0.6333235", "0.60439557", "0.6028406", "0.60041296", "0.5998321", "0.59930265", "0.59643847", "0.59600914", "0.59294903", "0.5922057", "0.5883665", "0.5853302", "0.58453923", "0.5836675", "0.5826535", "0.5804847", "0.5762894", "0.5750483", "0.5747566...
0.8281239
0
Returns a typecode instance representing the passed in element. element XMLSchema.ElementDeclaration instance literal literal encoding? local is locally defined? namespaceURI namespace
def _getElement(self, element, literal=False, local=False, namespaceURI=None): if not element.isElement(): raise TypeError, 'Expecting an ElementDeclaration' tc = None elementName = element.getAttribute('name') tp = element.getTypeDefinition('type') typeObj = None if not (tp or element.content): nsuriType,localName = element.getAttribute('type') typeClass = self._getTypeClass(nsuriType,localName) typeObj = typeClass(elementName) elif not tp: tp = element.content if not typeObj: typeObj = self._getType(tp, elementName, literal, local, namespaceURI) minOccurs = int(element.getAttribute('minOccurs')) typeObj.optional = not minOccurs maxOccurs = element.getAttribute('maxOccurs') typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1) return typeObj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def element_type(self) -> global___Type:", "def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)", "def element_type(self):\r\n result = conf.lib.clang_getElementType(se...
[ "0.6112495", "0.5760717", "0.54377365", "0.5435102", "0.54039854", "0.5333338", "0.5304633", "0.5193657", "0.51740164", "0.51256275", "0.51102036", "0.50124407", "0.5008194", "0.4974934", "0.49698183", "0.49652553", "0.49299234", "0.4924845", "0.49117178", "0.48976466", "0.48...
0.6733242
0
Returns a typecode instance representing the passed in type and name. tp XMLSchema.TypeDefinition instance name element name literal literal encoding? local is locally defined? namespaceURI namespace
def _getType(self, tp, name, literal, local, namespaceURI): ofwhat = [] if not (tp.isDefinition() and tp.isComplex()): raise EvaluateException, 'only supporting complexType definition' elif tp.content.isComplex(): if hasattr(tp.content, 'derivation') and tp.content.derivation.isRestriction(): derived = tp.content.derivation typeClass = self._getTypeClass(*derived.getAttribute('base')) if typeClass == TC.Array: attrs = derived.attr_content[0].attributes[WSDL.BASE] prefix, localName = SplitQName(attrs['arrayType']) nsuri = derived.attr_content[0].getXMLNS(prefix=prefix) localName = localName.split('[')[0] simpleTypeClass = self._getTypeClass(namespaceURI=nsuri, localName=localName) if simpleTypeClass: ofwhat = simpleTypeClass() else: tp = self._wsdl.types[nsuri].types[localName] ofwhat = self._getType(tp=tp, name=None, literal=literal, local=True, namespaceURI=nsuri) else: raise EvaluateException, 'only support soapenc:Array restrictions' return typeClass(atype=name, ofwhat=ofwhat, pname=name, childNames='item') else: raise EvaluateException, 'complexContent only supported for soapenc:Array derivations' elif tp.content.isModelGroup(): modelGroup = tp.content for item in modelGroup.content: ofwhat.append(self._getElement(item, literal=literal, local=True)) tc = TC.Struct(pyclass=None, ofwhat=ofwhat, pname=name) if not local: self._globalElement(tc, namespaceURI=namespaceURI, literal=literal) return tc raise EvaluateException, 'only supporting complexType w/ model group, or soapenc:Array restriction'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def type(name):", "def get_typecode(self, name):\n return self.codes['type_codes'][name]", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def _PyType_Lookup(space, type, w_name):\n w_type = from_ref(space, rffi.cast(PyObject, type))\n assert isinstance(w_type, W_TypeObje...
[ "0.68378854", "0.6094486", "0.60735965", "0.5959993", "0.58422214", "0.57260895", "0.57252353", "0.5668879", "0.564833", "0.564122", "0.5636486", "0.56226254", "0.56203324", "0.5611236", "0.5606567", "0.55945", "0.5588911", "0.5588712", "0.55883664", "0.5574209", "0.55277264"...
0.5198417
48
Returns a typecode class representing the type we are looking for. localName name of the type we are looking for. namespaceURI defining XMLSchema targetNamespace.
def _getTypeClass(self, namespaceURI, localName): bti = BaseTypeInterpreter() simpleTypeClass = bti.get_typeclass(localName, namespaceURI) return simpleTypeClass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def find_class(self, class_name: str) -> Type:\n pass", "def get_class(self, class_name, output_type=\"PythonClass\"):\n uris = self.cls_converter.get_uri(class_name)\n if type(uri...
[ "0.6334454", "0.63214386", "0.621163", "0.60691124", "0.5908769", "0.58564675", "0.5796145", "0.5774467", "0.5729281", "0.5662747", "0.5652824", "0.55890405", "0.5530385", "0.5529409", "0.5522239", "0.5496328", "0.54915804", "0.54823256", "0.54342365", "0.5419367", "0.5410473...
0.7862565
0
extracts the features used to calculate neural style cost gram_style_features a list of gram matrices calculated from the style layer outputs of the style image content_feature the content layer output of the content image
def generate_features(self): content_input = self.content_image * 255 style_input = self.style_image * 255 preprocessed_content = tf.keras.applications.vgg19.preprocess_input( content_input) preprocessed_style = tf.keras.applications.vgg19.preprocess_input( style_input) outputs_content = self.model(preprocessed_content) outputs_style = self.model(preprocessed_style) num_style_layers = tf.size(self.style_layers) style_outputs, content_outputs = ( outputs_style[:num_style_layers], outputs_content[num_style_layers:]) style_outputs = [self.gram_matrix( style_output)for style_output in style_outputs] self.gram_style_features = style_outputs self.content_feature = content_outputs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_style_image_features(image):\n ### START CODE HERE ###\n # preprocess the image using the given preprocessing function\n preprocessed_style_image = preprocess_image(image)\n\n # get the outputs from the inception model that you created using inception_model()\n outputs = inception(preprocessed_style_i...
[ "0.78582704", "0.7087954", "0.6903325", "0.6433953", "0.64332956", "0.64121157", "0.6316672", "0.62753826", "0.61490476", "0.6146242", "0.61101943", "0.60842645", "0.6055493", "0.60267216", "0.60207623", "0.6020109", "0.6018783", "0.6010386", "0.5994345", "0.5985522", "0.5985...
0.78912055
0
Estimates the strongest drivers of each neuron using GTE.
def estimate_parents(D, verbose=1, **params): # Parameters CL = params.setdefault('CL', 0.25) k = params.setdefault('k', 2) IFT = params.setdefault('IFT', True) estimate_CL = params.setdefault('estimate_CL', False) num_parents = params.setdefault('num_parents', 3) if verbose > 0: print('Estimating parents using GTE') # Cast D to only two bins for activity level D = np.greater(D, 0) parents = dict() scores = calc_GTE( D.T, CL=CL, k=k, IFT=IFT, estimate_CL=estimate_CL, verbose=verbose) for i in range(scores.shape[0]): p = (-scores[:,i]).argsort()[:num_parents] parents[i] = p return parents, scores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SGD2_5all(self, training_data, test_data=None):\r\n final = []\r\n final.append(self.SGD2_5_1(training_data, 10, 10, 3.0,test_data))\r\n\r\n print(\"first done\")\r\n\r\n self.sizes=[784,30,10]\r\n self.num_layers = len(self.sizes)\r\n self.biases = [np.random.randn(y,...
[ "0.5493443", "0.54361165", "0.5433714", "0.54278624", "0.5415707", "0.5382359", "0.53555316", "0.5353432", "0.53351855", "0.5288633", "0.5276035", "0.5257488", "0.52392656", "0.5214441", "0.51326734", "0.5129553", "0.5117215", "0.51140994", "0.5112784", "0.5106431", "0.509864...
0.0
-1
Downsamples spike data to include only the top 1% of frames
def downsample_spikes(S, thres=150, verbose=1): sum_S = np.sum(S, axis=0) if verbose > 0: print( 'Downsampling spike data to {} frames using threshold {}' .format(np.sum(np.greater(sum_S, thres)), thres)) return S[:, np.greater(sum_S, thres)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsam...
[ "0.6455042", "0.61035407", "0.59346664", "0.58270806", "0.58128667", "0.57843024", "0.5715292", "0.57149154", "0.57001853", "0.5676207", "0.5668947", "0.559317", "0.5588073", "0.5547957", "0.5523909", "0.5510277", "0.5501954", "0.54938704", "0.54915655", "0.5463476", "0.54344...
0.66121924
0
Downsamples fluorescence data to include approximately the top 1% of frames based on total increase in activity. Currently the threshold is set for 1000 neurons. Original code from
def downsample_fluorescence(F, thres=20, verbose=1): diff_F = np.diff(F, axis=1) sum_F = np.sum(diff_F, axis=0) F = F[:,:-1] if verbose > 0: print( 'Downsampling fluorescence data to {} frames using threshold {}' .format(np.sum(np.greater(sum_F, thres)))) return F[:, np.greater(sum_F, thres)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n ...
[ "0.61294377", "0.58991325", "0.5723101", "0.56286037", "0.557679", "0.5432801", "0.5261728", "0.52388954", "0.5201363", "0.5196606", "0.51883674", "0.5182593", "0.51802176", "0.51758677", "0.5150192", "0.5148113", "0.5143918", "0.51152796", "0.51152796", "0.51152796", "0.5087...
0.66418946
0
Generates a balanced set of training examples from a single dataset.
def get_examples(ds_data, network, parents, verbose=1, **params): # Parameters classes = params.setdefault('classes', [-1,0,1]) target = params.setdefault('target', int(1.2e6)) slice_len = params.setdefault('slice_len', 330) assert not target % len(classes) G = np.mean(ds_data, axis=0) examples = np.zeros((target, 5, slice_len, 1)) labels = np.zeros((target, len(classes))) count = 0 if verbose > 0: print('Generating {} training examples'.format(target)) bar = pb.ProgressBar(max_value=target, widgets=[pb.Percentage(), ' - ', pb.Bar(), ' - ', pb.ETA()]) for c in classes: pairs = np.argwhere(network == c) reps = int(target/len(classes)/pairs.shape[0]) + 1 pair_idx = np.repeat(np.arange(pairs.shape[0]), reps) pair_idx = np.random.permutation(pair_idx)[:target//len(classes)] start_idx = np.random.randint( 0, ds_data.shape[1]-slice_len, size=target//len(classes)) for i in range(pair_idx.size): n1 = pairs[pair_idx[i]][0] n2 = pairs[pair_idx[i]][1] assert(network[n1,n2] == c) start = start_idx[i] end = start + slice_len p1 = np.mean(ds_data[parents[n1], start:end], axis=0) p2 = np.mean(ds_data[parents[n2], start:end], axis=0) examples[count,:,:,0] = np.vstack(( p1, ds_data[n1][start:end], G[start:end], ds_data[n2][start:end], p2 )) labels[count,:] = np.equal(classes, c, dtype=np.int32) if verbose > 0: bar.update(count) count +=1 if verbose > 0: bar.finish() print( 'Generated examples of shape:', examples.shape, '\nGenerated labels of shape:', labels.shape, '\nThere are {} classes: {}'.format(len(classes), classes) ) assert not np.isnan(examples).any() return examples, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['tr...
[ "0.6719611", "0.66851825", "0.668324", "0.65884167", "0.6578879", "0.6477982", "0.6476447", "0.6440723", "0.6435114", "0.6385229", "0.6352234", "0.634321", "0.634212", "0.63130134", "0.63006437", "0.62959516", "0.6281407", "0.6270701", "0.6244588", "0.62174755", "0.6190903", ...
0.6441262
7
Generates a balanced set of training examples from one or more datasets.
def generate_dataset( datasets, networks, parents, mode='train', mean=None, verbose=1, **params): # Parameters classes = params.setdefault('classes', [-1,0,1]) data_type = params.setdefault('data_type', 'spikes') thres = params.setdefault('thres', 150.0) target = params.setdefault('target', int(1.2e6)) valid_split = params.setdefault('valid_split', 0.1) slice_len = params.setdefault('slice_len', 330) assert len(datasets) == len(networks) == len(parents) examples = np.zeros((target, 5, slice_len, 1)) labels = np.zeros((target, len(classes))) ex_per_netw = target//len(datasets) params['target'] = ex_per_netw for i in range(len(datasets)): if verbose > 0: print('Network {} of {}'.format(i+1, len(datasets))) data = datasets[i] network = networks[i] parents_ = parents[i] if data_type == 'spikes': ds_data = downsample_spikes(data, thres=thres, verbose=verbose) elif data_type == 'fluorescence': ds_data = downsample_fluorescence( data, thres=thres, verbose=verbose) else: raise ValueError('Invalid data type') start = i*ex_per_netw end = (i+1)*ex_per_netw examples[start:end], labels[start:end] = get_examples( ds_data, network, parents_, verbose=verbose, **params) shuffle_idx = np.random.permutation(np.arange(examples.shape[0])) examples = examples[shuffle_idx] labels = labels[shuffle_idx] if mode == 'train': idx = int(examples.shape[0]*valid_split) ex_valid, ex_train = np.split(examples, [idx], axis=0) lbl_valid, lbl_train = np.split(labels, [idx], axis=0) mean = np.mean(ex_train, axis=0) ex_train -= mean ex_valid -= mean return ex_train, ex_valid, lbl_train, lbl_valid, mean elif mode == 'test': assert mean != None examples -= mean return examples, labels else: raise ValueError('Invalid mode')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['tr...
[ "0.6996355", "0.66383696", "0.6572915", "0.6517153", "0.64793134", "0.64199245", "0.64149666", "0.63592505", "0.63368434", "0.63012886", "0.6289509", "0.62852", "0.6267794", "0.62432927", "0.622932", "0.62188154", "0.62153995", "0.62117213", "0.6202799", "0.6186463", "0.61816...
0.68757796
1
Add error messages with Code for easy debugging
def add_codes(cls): class ErrorsWithCodes: # pylint: disable=too-few-public-methods """Add error messages with Code for easy debugging """ def __getattribute__(self, code): msg = getattr(cls, code) return f'[{code}] {msg}' return ErrorsWithCodes()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(message):\n print str(message)", "def error(self, message=None, show_help=True):", "def error(self, message):\n print message", "def error_mess():\n print(\"Sorry, I didn't understand that.\")", "def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n ...
[ "0.72324693", "0.71228033", "0.71033734", "0.6947497", "0.69397295", "0.6862639", "0.68276596", "0.6808824", "0.6751318", "0.67416126", "0.67329", "0.67084694", "0.6701527", "0.6690459", "0.66822183", "0.66812146", "0.66701764", "0.66615677", "0.6634007", "0.6626952", "0.6608...
0.6224254
81
Takes an URL, a filename, and the expected bytes, download the contents and returns the filename num_bytes=None disables the file size check.
def maybe_download(url, filename, prefix, num_bytes=None): local_filename = None if not os.path.exists(os.path.join(prefix, filename)): try: logger.info("Downloading file {}...".format(url + filename)) with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t: local_filename, _ = urlretrieve(url + filename, os.path.join(prefix, filename), reporthook=_reporthook(t)) except AttributeError as e: logger.error("An error occurred when downloading the file! Please get the dataset using a browser.") raise e # We have a downloaded file # Check the stats and make sure they are ok file_stats = os.stat(os.path.join(prefix, filename)) if num_bytes is None or file_stats.st_size == num_bytes: logger.info("File {} successfully loaded".format(filename)) else: raise Exception("Unexpected dataset size. Please get the dataset using a browser.") return local_filename
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maybe_download(filename, url, expected_bytes):\n if not os.path.exists(filename):\n filename, _ = urllib.request.urlretrieve(url + filename, filename)\n statinfo = os.stat(filename)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filename)\n else:\n print...
[ "0.81564283", "0.81564283", "0.81564283", "0.79952234", "0.7990289", "0.79747564", "0.7969037", "0.7969037", "0.7902681", "0.7736243", "0.761421", "0.7601226", "0.7358599", "0.7322421", "0.7298379", "0.7141095", "0.71260726", "0.6950416", "0.6945195", "0.6897762", "0.6878477"...
0.7610241
11
Submit a metric as a rate, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submit_metric(self, metric_suffix, metric, scraper_config, gauge=True, monotonic_count=True):\n metric_name = scraper_config['namespace'] + metric_suffix\n for sample in metric.samples:\n # Explicit shallow copy of the instance tags\n _tags = list(scraper_config['custom_tags...
[ "0.65285105", "0.58314323", "0.57720864", "0.55562365", "0.5346391", "0.5288677", "0.5270663", "0.5265564", "0.51896477", "0.51511014", "0.5126361", "0.51259017", "0.51075906", "0.5091107", "0.5091107", "0.5021949", "0.5016198", "0.4961651", "0.49544987", "0.49520537", "0.494...
0.7829274
0
Submit a metric as a gauge, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gauge(self, gauge, value):\n try:\n self._thread_pool_executor.submit(self._delegate.gauge, gauge, value)\n except:\n self._logger.exception('Exception caught submitting gauge metric')", "def add_gauge(self, data, metric_id=None):\n self._post_data(prefix_id='gauges...
[ "0.7285011", "0.6867734", "0.68111044", "0.6699969", "0.6642851", "0.6484831", "0.64751744", "0.6383549", "0.6362057", "0.6242089", "0.61857253", "0.60445803", "0.5981221", "0.5947248", "0.58967316", "0.5812371", "0.57682973", "0.5750602", "0.5690605", "0.5674989", "0.5456014...
0.7893012
0
Submit a metric as a monotonic count, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)...
[ "0.7103015", "0.62737775", "0.60384905", "0.60204303", "0.59881437", "0.58371973", "0.5602605", "0.55809146", "0.5569244", "0.5481773", "0.5438321", "0.5368393", "0.53387356", "0.53331786", "0.5259395", "0.5257974", "0.52503383", "0.52373564", "0.52249354", "0.5209548", "0.52...
0.75249547
0
Override so human players are blue by default
def __init__(self, playerIndex, colour="blue"): super().__init__(playerIndex, colour)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_if_bottom_color_player_well_set(self):\n ui = UIRender(TestUI.image_path)\n ui.set_bottom_player_color(CELTIC_GREEN)\n self.assertEqual(ui.bottom_player_color, CELTIC_GREEN)\n self.assertEqual(ui.top_player_color, SPQR_RED)\n ui.set_bottom_player_color(SPQR_RED)\n ...
[ "0.6794499", "0.6771435", "0.6575915", "0.65424424", "0.6528288", "0.640866", "0.62868875", "0.6146448", "0.6046117", "0.6043046", "0.60069966", "0.59718126", "0.5966473", "0.5936794", "0.5885561", "0.58830494", "0.58271277", "0.57827985", "0.57747686", "0.57669955", "0.57326...
0.65751874
3
Very important. Lets the UI know to give human player control.
def isHuman(self): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __handle_view_player(self, gamestate_component):", "def game_play(self):", "def auto_play(self):\n raise NotImplementedError(self)", "def newPlayer():\r\n pass", "def take_control(self):\n pass", "def show_playing(self):\n\n print(\"show_playing needs implementation\")", "de...
[ "0.69776434", "0.67034006", "0.6657744", "0.655654", "0.6553702", "0.6436399", "0.64160234", "0.6278436", "0.6235379", "0.6231291", "0.6205271", "0.6144004", "0.61371064", "0.6132646", "0.6123176", "0.60980403", "0.60681164", "0.6064519", "0.60623634", "0.60614514", "0.603418...
0.0
-1
String representation for a random player. Used for writing results filenames.
def __str__(self): return "{}_human".format(self.index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n \n is_random_print = \"\"\n if self.is_random == True:\n is_random_print = \"randomly\"\n else:\n is_random_print = \"deterministically\"\n\n return \"Player for \" + self.side + \", ply = \" + str(self.ply) + \", breaks ties \" + is_ran...
[ "0.7373667", "0.71827644", "0.70852", "0.69778293", "0.69040483", "0.68909234", "0.6811406", "0.6797799", "0.6739397", "0.673668", "0.6681964", "0.66097915", "0.6596855", "0.658164", "0.6565075", "0.64829636", "0.6482711", "0.64633006", "0.64110553", "0.6410214", "0.6378095",...
0.0
-1
Chooses a move for the player by calling random move.
def chooseMove(self, game): return self.randomMove(game)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_random(self, board):\n self.get_moves(board.board)\n return random.choice(self.available_moves)", "def move(self):\r\n his_move = random.randint(0, 2)\r\n return the_moves[his_move]", "def computer_move():\n\tmove = random.choice(moves)\n\tprint \"Computer's move is %s\" % ...
[ "0.8052953", "0.7979933", "0.78945196", "0.7858158", "0.7849089", "0.78423244", "0.7794394", "0.77650255", "0.7646826", "0.7567179", "0.7565184", "0.755895", "0.754698", "0.75232786", "0.74944186", "0.74268633", "0.7360817", "0.7327671", "0.73010206", "0.7293377", "0.7293366"...
0.7899985
2
Get list of legal moves and return any random one.
def randomMove(self, game): #time.sleep(0.25) return random.choice(game.get_all_legal_moves())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random_legal_move():\n return random.choice(legal_moves())", "def get_random_move(self, valid_moves):\n return random.choice(valid_moves)", "def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves", "def move_random(self, board):\n self.get_moves(board.board)\n ...
[ "0.82790005", "0.820212", "0.764488", "0.75532055", "0.7404592", "0.73601913", "0.7347978", "0.7325695", "0.73243225", "0.73121375", "0.7256089", "0.7213734", "0.71593404", "0.71414167", "0.7133645", "0.70529675", "0.7036325", "0.69154537", "0.68977284", "0.6889334", "0.68773...
0.80969596
2
Very important. Lets the UI know NOT to give AI player UI control.
def isHuman(self): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_control(self):\n pass", "def isUIused():\n return False", "def __handle_view_player(self, gamestate_component):", "def __disableControls(self):\n self.ignoreAll()", "def noyable(self):\n return False", "def control_plugin(self):\n pass", "def _control_skip(self):...
[ "0.629158", "0.62601733", "0.62392414", "0.6203768", "0.6019735", "0.60026574", "0.59875256", "0.5963308", "0.595724", "0.59545267", "0.59274596", "0.59116316", "0.5896117", "0.5860238", "0.5832094", "0.5797094", "0.57889205", "0.57613266", "0.571739", "0.5679264", "0.5675429...
0.0
-1
String representation for a random player. Used for writing results filenames.
def __str__(self): return "{}_random".format(self.index)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n \n is_random_print = \"\"\n if self.is_random == True:\n is_random_print = \"randomly\"\n else:\n is_random_print = \"deterministically\"\n\n return \"Player for \" + self.side + \", ply = \" + str(self.ply) + \", breaks ties \" + is_ran...
[ "0.7373667", "0.71827644", "0.70852", "0.69778293", "0.69040483", "0.68909234", "0.6811406", "0.6797799", "0.6739397", "0.673668", "0.6681964", "0.66097915", "0.6596855", "0.658164", "0.6565075", "0.64829636", "0.6482711", "0.64633006", "0.64110553", "0.6410214", "0.6378095",...
0.59313565
54
Replace multiple assignment with single assignments.
def visit_Assign(self, node): self.generic_visit(node) is_multiple = len(node.targets) > 1 is_compound = any(map(is_sequence_node, node.targets)) is_simple = not is_compound if is_simple and is_multiple: return self.visit_simple_assign(node) elif is_compound and (is_multiple or is_sequence_node(node.value)): return self.visit_compound_assign(node) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with differe...
[ "0.6289772", "0.6041632", "0.5921341", "0.5901611", "0.57016975", "0.5555649", "0.55510634", "0.5515211", "0.5515172", "0.55127996", "0.54904634", "0.5386734", "0.5356579", "0.5338033", "0.53110933", "0.528611", "0.52802086", "0.5280196", "0.5250439", "0.52424175", "0.5139302...
0.55901885
5
Visit assignment node whose targets are all simple.
def visit_simple_assign(self, node): temp = gensym() temp_target = to_name(temp, ast.Store()) stmts = [ ast.Assign([temp_target], node.value) ] stmts += [ ast.Assign([target], to_name(temp)) for target in node.targets ] return stmts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif i...
[ "0.69212425", "0.64886594", "0.6375846", "0.6128011", "0.5978111", "0.5925981", "0.59075874", "0.58308166", "0.57873815", "0.572006", "0.5688921", "0.5654621", "0.5651131", "0.5596672", "0.5524254", "0.5473678", "0.5473099", "0.53852975", "0.53435594", "0.529892", "0.52969265...
0.70178634
0
Visit assignment node with at least one compound target.
def visit_compound_assign(self, node): # Determine number of values (arity) of compound assignment. nvalues = { len(target.elts) for target in node.targets if is_sequence_node(target) } if len(nvalues) > 1: # A multiple, compound assignment with different arities, e.g., # `x,y = a,b,c = ...` is not a syntax error in Python, though it # probably should be because it's guaranteed to cause a runtime # error. Raise the error here, since we cannot proceed. raise SyntaxError("Multiple assignment with different arities") nvalues = nvalues.pop() # Assign temporary variables. temps = [ gensym() for i in range(nvalues) ] stmts = [] if is_sequence_node(node.value) and len(node.value.elts) == nvalues: # Special case: RHS is sequence literal of correct length. for i in range(nvalues): temp_target = to_name(temps[i], ast.Store()) stmts.append(ast.Assign([temp_target], node.value.elts[i])) else: # General case. temp_target = to_tuple( (to_name(temp, ast.Store()) for temp in temps), ast.Store()) stmts.append(ast.Assign([temp_target], node.value)) # Rewrite assignments as sequence of assignments. for target in reversed(node.targets): if is_sequence_node(target): stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i])) for i in range(nvalues)) else: temp_tuple = to_tuple(to_name(temp) for temp in temps) stmts.append(ast.Assign([target], temp_tuple)) return stmts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif i...
[ "0.7261902", "0.6757224", "0.64843816", "0.63659924", "0.624103", "0.62127984", "0.6177989", "0.61282015", "0.60786444", "0.604624", "0.60448", "0.5942896", "0.59183824", "0.5750465", "0.57303625", "0.5648947", "0.55960566", "0.5571266", "0.5543112", "0.5511169", "0.5493238",...
0.7176904
1
Replace multiple deletion with single deletions.
def visit_Delete(self, node): self.generic_visit(node) if len(node.targets) > 1: return [ ast.Delete([node.target]) for target in node.targets ] return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_deletions(murim_mutations):\n\n pass", "def del_primers(primers,\n deletions):\n \n # Sort primers in reverse order so indices remain correct during deletion\n deletions.sort(reverse=True)\n for n in deletions:\n del primers[n]\n return primers", "def delete_m...
[ "0.6490101", "0.5986913", "0.5909", "0.59087205", "0.58310133", "0.57015437", "0.56676894", "0.5641197", "0.55778617", "0.5471409", "0.54256725", "0.53889436", "0.53551024", "0.5288458", "0.52593184", "0.52453256", "0.52418804", "0.5230966", "0.51611537", "0.5143816", "0.5125...
0.5214702
18
Convert attribute access to `getattr` call.
def visit_Attribute(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): args = [ node.value, ast.Str(node.attr) ] return to_call(to_name('getattr'), args) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getattr__(self, attr):\n return getattr(self.get_function(), attr)", "def __getattr__(self, attr):\n\n\tcommand = attr.replace('_', '-')\n\n\tif command in self.lambda_cache:\n\t return self.lambda_cache[command]\n\n\tif command in self.command_table: # is valid\n\t if command in ('read-data',...
[ "0.7326412", "0.6922202", "0.6918359", "0.6700088", "0.66784173", "0.66713166", "0.66044515", "0.6591623", "0.65870625", "0.6569076", "0.6569076", "0.65116334", "0.6508529", "0.64372194", "0.6435451", "0.64023787", "0.6391228", "0.63764155", "0.6372691", "0.63678974", "0.6345...
0.61914927
29
Convert assignment to attributes to `setattr` call.
def visit_Assign(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Attribute): args = [ target.value, ast.Str(target.attr), node.value ] return ast.Expr(to_call(to_name('setattr'), args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):",...
[ "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.65005827", "0.64025", ...
0.708953
0
Convert `del` on attributes to `delattr` call.
def visit_Delete(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Attribute): args = [ target.value, ast.Str(target.attr) ] return ast.Expr(to_call(to_name('delattr'), args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __delattr__(cls, name):\n raise TypeError('May not delete attributes on definition class')", "def __delattr__(self, item):\n raise AttributeError(f'Attribute {key} can not be deleted')", "def __delattr__(self, name):\n del self[name]", "def __delattr__(self, name):\n self.unse...
[ "0.70554286", "0.6918985", "0.6561412", "0.6517371", "0.6463424", "0.63563657", "0.6319125", "0.6317646", "0.6296523", "0.62805814", "0.6253668", "0.62185895", "0.62172204", "0.6147379", "0.6073365", "0.6065975", "0.6024653", "0.6012962", "0.59854305", "0.5978835", "0.5920412...
0.6552601
3
Convert index (slice) to functional expression.
def index_to_expr(self, index): if isinstance(index, ast.Index): return index.value elif isinstance(index, ast.Slice): if index.lower is None and index.step is None: args = [ index.upper ] elif index.step is None: args = [ index.lower, index.upper ] else: args = [ index.lower, index.upper, index.step ] args = [ to_name_constant(None) if arg is None else arg for arg in args ] return to_call(to_name('slice'), args) elif isinstance(index, ast.ExtSlice): indexes = list(map(self.index_to_expr, index.dims)) return ast.Tuple(elts=indexes, ctx=ast.Load()) elif isinstance(index, ast.Tuple): elts = list(map(self.index_to_expr, index.elts)) return ast.Tuple(elts=elts, ctx=ast.Load()) else: return index
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, index: Any) -> ColumnOperators:\n return self.operate(getitem, index)", "def special_slice(self, form):\n obj = self.reallyCompile(form[1])\n rest = form[2:]\n if len(rest) == 1:\n return ast.Subscript(obj, 'OP_APPLY', [self.reallyCompile(rest[0])])\n ...
[ "0.6217184", "0.5981041", "0.5913932", "0.5875691", "0.57255584", "0.56947947", "0.55419147", "0.5474115", "0.5463078", "0.5445567", "0.5418668", "0.53971356", "0.5378227", "0.53756636", "0.5320697", "0.53108877", "0.5268785", "0.52610666", "0.52600414", "0.5241331", "0.52413...
0.7314768
0
Convert indexing to `getitem` call.
def visit_Subscript(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): args = [ node.value, self.index_to_expr(node.slice) ] return to_call(to_attribute(self.operator, 'getitem'), args) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]", "def __getitem__(self, idx):\n return self.getitem(idx)", "def __getitem__(self, index):\n # type: (int) -> Any\n items = list.__ge...
[ "0.7552691", "0.7426195", "0.7328249", "0.7320789", "0.7315738", "0.7315738", "0.72576785", "0.7215699", "0.7099744", "0.7099744", "0.7095745", "0.7095745", "0.7084862", "0.7081519", "0.70618874", "0.70596737", "0.7020587", "0.69942844", "0.69942844", "0.69866604", "0.6984443...
0.0
-1
Convert indexed assignment to `setitem` call.
def visit_Assign(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Subscript): fun = to_attribute(self.operator, 'setitem') args = [target.value, self.index_to_expr(target.slice), node.value] return ast.Expr(to_call(fun, args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def __setitem__(self, index: in...
[ "0.7797051", "0.7401053", "0.73361707", "0.7178626", "0.7156668", "0.71429896", "0.7103829", "0.70937896", "0.6975121", "0.6972701", "0.69715893", "0.6964855", "0.69308025", "0.6930015", "0.69153154", "0.6901336", "0.6901336", "0.6901336", "0.6901336", "0.69008344", "0.685737...
0.6483923
62
Convert indexed `del` operation to `delitem` call.
def visit_Delete(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Subscript): fun = to_attribute(self.operator, 'delitem') args = [ target.value, self.index_to_expr(target.slice) ] return ast.Expr(to_call(fun, args)) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __delitem__(self, key):\n if self.__pepth__ != 0:\n return plist.__getattr__(self, '__delitem__')(key)\n try:\n if (isinstance(key, list)\n and plist(key).all(isinstance, int)):\n for k in sorted(key, reverse=True):\n operator.__delitem__(self, k)\n else:\n ...
[ "0.66921794", "0.6634915", "0.6633796", "0.6570889", "0.6552683", "0.65220124", "0.6386673", "0.637424", "0.633695", "0.6330479", "0.63189936", "0.629664", "0.62494636", "0.6222551", "0.6222551", "0.6154767", "0.6154767", "0.6136742", "0.6106075", "0.6077464", "0.60384274", ...
0.67726755
0
Convert indexed augmented assignment to `getitem`/`setitem` calls.
def visit_AugAssign(self, node): self.generic_visit(node) stmts = [] target = node.target if not isinstance(target, ast.Subscript): return node # AST node for target value, gensym-ed if necessary. if self.can_reevaluate(target.value): target_node = target.value else: target_node = to_name(gensym()) stmts.append(ast.Assign( [set_ctx(target_node, ast.Store())], target.value)) # AST node for index, gensym-ed if necessary. index_expr = self.index_to_expr(target.slice) if self.can_reevaluate(index_expr): index_node = index_expr else: index_node = to_name(gensym()) stmts.append(ast.Assign( [set_ctx(index_node, ast.Store())], index_expr)) # Main AST node for the indexed augemented assignment. stmts.append(ast.Expr( to_call(to_attribute(self.operator, 'setitem'), [ target_node, index_node, to_call(self.op_to_function(node.op), [ to_call(to_attribute(self.operator, 'getitem'), [ target_node, index_node, ]), node.value ]) ]) )) return stmts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def special_setitem(self, form):\n obj = self.reallyCompile(form[1])\n key = self.reallyCompile(form[2])\n value = self.reallyCompile(form[3])\n return ast.Assign([ast.Subscript(obj,\n 'OP_ASSIGN',\n [key])]...
[ "0.628359", "0.6214197", "0.5985369", "0.5843468", "0.56949717", "0.5647313", "0.56271034", "0.5615518", "0.55904377", "0.5575391", "0.5575391", "0.5575391", "0.5575391", "0.55560446", "0.5552748", "0.5485441", "0.5474524", "0.54711777", "0.54630387", "0.54482377", "0.5441601...
0.6089785
2
Whether the AST node can be safely evaluated twice.
def can_reevaluate(self, node): return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \ (six.PY3 and isinstance(node, ast.Bytes)) or \ (ast_has_name_constant and isinstance(node, ast.NameConstant))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evil_hack(self, other):\n if isinstance(other, FExpr):\n return other == self\n return isinstance(other, self.__class__) and self.id == other.id", "def can_rewrite(self, lhs):\n return len(self[lhs]) > 0", "def is_used_as_expression(item):\n # note: this is not accurate because of th...
[ "0.5983335", "0.5781899", "0.5668821", "0.5606019", "0.5595086", "0.556334", "0.5537646", "0.55246955", "0.542583", "0.5423907", "0.5420764", "0.54188806", "0.5378163", "0.5366631", "0.53475344", "0.53304714", "0.5311328", "0.5301695", "0.5297375", "0.52919585", "0.5287243", ...
0.6013396
0
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() return to_attribute(self.operator, inplace_operator_table[name])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_l...
[ "0.7403406", "0.68868965", "0.670449", "0.6681713", "0.65929717", "0.65538996", "0.6334976", "0.6330674", "0.63050497", "0.62751913", "0.59945136", "0.59220994", "0.58847594", "0.582918", "0.58211267", "0.5793635", "0.579274", "0.57560617", "0.56892043", "0.5672466", "0.56405...
0.7269648
1
Convert augmented assignment to assignment plus function call.
def visit_AugAssign(self, node): # FIXME: Gensym the LHS to avoid two evaluations. self.generic_visit(node) rhs = to_call(self.op_to_function(node.op), [set_ctx(node.target), node.value]) return ast.Assign([node.target], rhs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment no...
[ "0.6730379", "0.60008764", "0.59139097", "0.5711847", "0.56486744", "0.56235904", "0.558098", "0.5525872", "0.5452114", "0.5416577", "0.5416467", "0.53805333", "0.53741395", "0.53737843", "0.53596747", "0.5325352", "0.5319974", "0.5307721", "0.52776676", "0.5269577", "0.52540...
0.61607265
1
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() name = operator_table.get(name, name) return to_attribute(self.operator, name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if ...
[ "0.7269387", "0.68856734", "0.67033535", "0.6680751", "0.6593878", "0.65538615", "0.6333541", "0.6330422", "0.63046694", "0.6274489", "0.5995828", "0.5922164", "0.5883966", "0.5829021", "0.5820589", "0.5794242", "0.5792682", "0.57558066", "0.5689504", "0.5672094", "0.56409925...
0.740287
0
Convert unary operator to function call.
def visit_UnaryOp(self, node): self.generic_visit(node) if isinstance(node.operand, ast.Num): # Don't transform negations of numeric literals. Just treat them # as literals. return node return to_call(self.op_to_function(node.op), [node.operand])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types retur...
[ "0.72016424", "0.694165", "0.6555362", "0.6547756", "0.64053464", "0.63873965", "0.63089716", "0.6237462", "0.6215301", "0.6152752", "0.61284393", "0.6099685", "0.5980428", "0.5957063", "0.5941364", "0.59279454", "0.59157944", "0.590933", "0.5902454", "0.5870478", "0.58691585...
0.68839115
2
Convert binary operator to function call.
def visit_BinOp(self, node): self.generic_visit(node) return to_call(self.op_to_function(node.op), [node.left, node.right])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_...
[ "0.69218695", "0.67679876", "0.66050005", "0.6560225", "0.6488998", "0.63463247", "0.6271635", "0.62432164", "0.62399954", "0.61941004", "0.61931217", "0.6169983", "0.6155207", "0.6142081", "0.6140598", "0.6131111", "0.61122173", "0.61021495", "0.60998577", "0.6044439", "0.60...
0.6633928
2
Convert comparison operator to function call.
def visit_Compare(self, node): self.generic_visit(node) if len(node.ops) > 1: raise NotImplementedError("Multiple comparisons not supported") op, comparator = node.ops[0], node.comparators[0] if isinstance(op, ast.In): # Special case: `contains` reverses the operands. return to_call(to_attribute(self.operator, 'contains'), [comparator, node.left]) elif isinstance(op, ast.NotIn): # Special case: there is no `not_contains`. return to_call(to_attribute(self.operator, 'not_'), [ to_call(to_attribute(self.operator, 'contains'), [comparator, node.left]) ]) else: # General case return to_call(self.op_to_function(op), [node.left, comparator])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare(self, operator, value, **kw):\n\n return operator(self.comparator, value)", "def comparison(op):\n def comp(*args):\n if args:\n item = args[0]\n for o in args[1:]:\n if op(item, o):\n item = o\n else:\n ...
[ "0.73119915", "0.6936276", "0.67142797", "0.6697332", "0.6670698", "0.63985837", "0.6394015", "0.6347988", "0.6329906", "0.62401164", "0.6220372", "0.6147607", "0.6115169", "0.6115023", "0.6092435", "0.6043819", "0.6031347", "0.60203665", "0.5976809", "0.5946226", "0.5938605"...
0.6528138
5
Convert list literal to function call.
def visit_List(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__list__'), node.elts) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l", "...
[ "0.6245148", "0.61611956", "0.60648584", "0.6038298", "0.59792304", "0.59553707", "0.59013915", "0.5873387", "0.5854381", "0.5801732", "0.57718545", "0.5754236", "0.5722947", "0.5686056", "0.5682627", "0.56783223", "0.5655683", "0.5624097", "0.5621373", "0.5564674", "0.555357...
0.6453626
0
Convert tuple literal to function call.
def visit_Tuple(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__tuple__'), node.elts) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_func_tuple(f_args):\n return f_args[0](*f_args[1:])", "def tuple(x):\n pass", "def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))", "def func_call(s...
[ "0.7458055", "0.63818896", "0.6287098", "0.62121403", "0.6174", "0.61714363", "0.61672425", "0.61028904", "0.6084416", "0.606182", "0.6054684", "0.6038045", "0.5894287", "0.58655924", "0.5855375", "0.5848195", "0.58276147", "0.58071357", "0.57829064", "0.57389504", "0.5675090...
0.65799105
1
Convert set literal to function call.
def visit_Set(self, node): self.generic_visit(node) return to_call(to_attribute(self.operator, '__set__'), node.elts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set(self, arg: SeField[Any]) -> str:\n if is_bare_set(arg.type):\n return f\"list({arg.varname}) if convert_sets else {arg.varname}\"\n else:\n earg = arg[0]\n earg.name = \"v\"\n return (\n f\"[{self.render(earg)} for v in {arg.varname}]...
[ "0.6105887", "0.60869235", "0.6042422", "0.594492", "0.58734345", "0.5806317", "0.5636314", "0.5612864", "0.5483707", "0.546873", "0.54369307", "0.5421345", "0.5389135", "0.5195353", "0.5110589", "0.5105132", "0.5087218", "0.5083517", "0.5051965", "0.50517464", "0.5040997", ...
0.6132109
0
Convert dictionary literal to function call, if possible.
def visit_Dict(self, node): self.generic_visit(node) if all(isinstance(key, ast.Str) for key in node.keys): keywords = [ ast.keyword(arg=key.s, value=value) for key, value in zip(node.keys, node.values) ] return to_call(to_name('dict'), keywords=keywords) return node
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callableize(f_or_d):\n return f_or_d.get if isinstance(f_or_d,dict) else f_or_d", "def callFuncBasedOnDict(func, argdict, **kwargs):\n if argdict is None:\n argdict = {}\n seldict = selectArgsFromDict(func, argdict)\n if kwargs is not None:\n seldict.update(kwargs)\n return func(...
[ "0.64434266", "0.63088524", "0.61875296", "0.6126158", "0.5683456", "0.5670447", "0.56030446", "0.5584195", "0.55224985", "0.5522239", "0.5520842", "0.54833007", "0.54591936", "0.53802735", "0.53729284", "0.5354577", "0.529072", "0.52826643", "0.524588", "0.5245523", "0.52242...
0.0
-1
User inputs the of and names of all players
def addPlayers(): # stackoverflow.com/questions/12169258/should-i-use-entrys-get-or-its-textvariables-for-tkinter-in-python print("\nInitial # of players (Line #26) = " + str(self.number_of_players)) # Collect user input from the entry widget & turn it into an int while (self.number_of_players < 2): try: user_input_number_of_players = int(entry_player_number.get()) print("Inside try block, user_input = " + str(user_input_number_of_players)) if(user_input_number_of_players < 2): tkinter.messagebox.showerror('Non-Integer Input', 'User MUST enter a player # > 1.', icon='error') tkinter.messagebox.quit() tkinter.messagebox.destroy() user_input_number_of_players = int(entry_player_number.get()) else: self.number_of_players = user_input_number_of_players except ValueError: tkinter.messagebox.showerror('Non-Integer Input', 'User MUST enter a player # greater than 1.', icon='error') tkinter.messagebox.quit() tkinter.messagebox.destroy() # Add a label myLabel1b = tkinter.Label(self.root, text="Please Enter Player Names: ", width=25) myLabel1b.config(font="Courier 14 bold") myLabel1b.grid(row=2, column=1) # GET PLAYER NAMES FROM USER....USE A SCROLLING CANVAS FRAME #Make a scrollable frame appear # Scroll appears, but doesn't function # Code for scrollable frame came from: myframe = tkinter.Frame(root, relief=tkinter.GROOVE, width=100, height=100) myframe.grid(row=3, column=3, columnspan=2, pady=30, padx=30) myframe.config(width=5) # https://stackoverflow.com/questions/16188420/tkinter-scrollbar-for-frame self.tree = ttk.Treeview(myframe, selectmode="extended") scbVDirSel = ttk.Scrollbar(myframe, orient=tkinter.VERTICAL, command=self.tree.yview) self.tree.configure(yscrollcommand=scbVDirSel.set) # self.tree["columns"] = (self.columnListOutput) self.tree.column("#0", width=40) self.tree.heading("#0", text='SrNo', anchor='w') self.tree.grid(row=2, column=0, sticky=tkinter.NSEW, in_=myframe, columnspan=10, rowspan=10) scbVDirSel.grid(row=2, column=10, rowspan=10, sticky=tkinter.NS, in_=myframe) myframe.rowconfigure(0, weight=1) myframe.columnconfigure(0, weight=1) # put entry boxes for player names inside the scrollable frame for x in range(self.number_of_players): print(x+1) # Add a label myLabel1b = tkinter.Label(myframe, text="Player #" + str(x+1)+ ": ") myLabel1b.config(font="Courier 14 bold") myLabel1b.grid(row=4+x, column=3) # Fix this textVariable parameter - unecessary? # https://stackoverflow.com/questions/32640219/creating-stringvar-variables-in-a-loop-for-tkinter-entry-widgets # user_input_player_names = tkinter.StringVar() name_entries = [] for i in range(self.number_of_players): entry_player_name = tkinter.Entry(myframe, width=10, borderwidth=2) # entry_player_name.set(str(x+1)) entry_player_name.grid(row=4+x, column=4) name_entries.append(entry_player_name) # specify a default value inside the entry box # entry_player_number.insert(0,int("2")) # Add a button for adding players names into the game addPlayerNamesButton = tkinter.ttk.Button(self.root, text="Enter Names", command=get_player_names_from_user) addPlayerNamesButton.grid(row=self.number_of_players+2, column=4) # Make old label, entry, and button dissapear myLabel1.grid_forget() entry_player_number.grid_forget() addPlayerButton.grid_forget() print("# of players after button click = " + str(self.number_of_players)) # Set class instance value to this input from the user return self.number_of_players
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_players(self):\n for i in range(self.number_of_players):\n self.players_names.append(pyip.inputStr(\n prompt=f'\\nEnter name of player {i + 1}:\\n'))", "def set_name(self):\n player1 = input('Enter a name for player 1: ')\n self._players.append(player1)\n...
[ "0.79158205", "0.785065", "0.7320951", "0.7222905", "0.71919775", "0.71211344", "0.7065533", "0.7033948", "0.69090784", "0.67799306", "0.6779573", "0.6625977", "0.6590808", "0.6588982", "0.65594393", "0.65389705", "0.6485963", "0.648116", "0.64683044", "0.6448013", "0.6418692...
0.0
-1
Reads a file and returns statistics about the contents.
def analyzeFile(filename): fileData = open(filename, encoding="utf-8") # open the file counts = {} for line in fileData: # iterates over every line of the file words = line.split() # turns each line into a list for word in words: #iterates over the words in each line list word = word.lower().strip(string.whitespace+string.punctuation) if len(word) > 0: #make sure word is longer than 0 before adding it to the dictionary counts[word] = counts.get(word, 0) + 1 #look up if the dictionary has that word and if not then it'll add that word with the value 0 associated with it and then add one to that, if it has seen it it'll add 1 to the value stored in the counts dictionary #when it gets here for the first line it goes back up to the top and repeats for the 2nd line mostCommonWord = [word] leastCommonWord = [word] shortestWord = [word] longestWord = [word] for item in counts: if counts[mostCommonWord[0]] < counts[item]: mostCommonWord = [item] elif counts[mostCommonWord[0]] == counts[item]: mostCommonWord.append(item) if counts[leastCommonWord[0]] > counts[item]: leastCommonWord = [item] elif counts[leastCommonWord[0]] == counts[item]: leastCommonWord.append(item) if len(shortestWord[0]) > len(item): shortestWord = [item] elif len((shortestWord[0])) == len(item): shortestWord.append(item) if len(longestWord[0]) < len(item): longestWord = [item] elif len(longestWord[0]) == len(item): longestWord.append(item) return (mostCommonWord, leastCommonWord, shortestWord, longestWord)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n ...
[ "0.6311686", "0.6240214", "0.6233331", "0.6196064", "0.6166038", "0.6145933", "0.6106974", "0.6092859", "0.6058845", "0.6050925", "0.60243064", "0.60074306", "0.60046774", "0.5990698", "0.59887534", "0.5984321", "0.59824026", "0.5976109", "0.5953878", "0.5952983", "0.59175605...
0.0
-1
This takes a string as an input parameter and treats it as a zip code, looks up the weather for that zipcode, and returns the current temperature at that zipcode in Fahrenheit.
def weather(zipcode): URL = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zipcode + ',us&appid=' + '7d7a3cf9902ef14f54f49f160fc8a550' + '&units=imperial' webpage = urllib.request.urlopen(URL) contents = webpage.read() contents = contents.decode('ascii') weather = eval(contents) #this line turns it from a string into dictionaries and lists temperature = weather['main']['temp'] return temperature
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, zipcode):\n response = hereService.getWeatherByZipcode(zipcode)\n return response", "def get_current_temperature(zipcode, country):\n owm = pyowm.OWM(os.environ.get('OWM_KEY'))\n observation = owm.weather_at_zip_code(zipcode, country)\n weather = observation.get_weather()\n ...
[ "0.6631745", "0.6549503", "0.6489142", "0.6372373", "0.63502383", "0.62462974", "0.6226757", "0.6073102", "0.6010607", "0.5896707", "0.58758765", "0.58282274", "0.57842165", "0.57408905", "0.57213604", "0.57094175", "0.56968915", "0.56946063", "0.5681781", "0.5656904", "0.561...
0.787071
0
Simple check to see if this cog (plugin) is enabled.
async def cog_check(self, ctx): guild_doc = await db.PLUGINS.find_one({"_id": ctx.guild.id}) if guild_doc.get("Verification"): return True else: await ctx.send( embed=discord.Embed( description=( f"{var.E_DISABLE} The Verification plugin" " is disabled in this server" ), color=var.C_ORANGE ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_enabled(self):\n return self.sdk.is_enabled", "def is_enabled(self):", "def __enabled__(component):\n registry = context.app.component_registry\n return (component.__module__ in registry.modules)", "def is_on(self):\n return self._program.get(\"enabled\") is True", "def e...
[ "0.69357497", "0.6897424", "0.685277", "0.6834709", "0.68308216", "0.68143165", "0.6804309", "0.66417366", "0.6623949", "0.6623949", "0.6623949", "0.6623949", "0.6623949", "0.6623949", "0.66176057", "0.66100556", "0.6593205", "0.65567094", "0.65470624", "0.653174", "0.6516565...
0.6632713
8
Finds the token where the value is stored.
def _value_token_index(self): # TODO: memoize this value for i, token in enumerate(self.tokens): if not token.type.is_metadata: return i raise RuntimeError('could not find a value token')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token", "def...
[ "0.6786406", "0.6442385", "0.631681", "0.61292017", "0.60891455", "0.60803986", "0.5987252", "0.5974573", "0.5974573", "0.59730595", "0.5952903", "0.5946502", "0.59407806", "0.5928285", "0.5908328", "0.5895346", "0.5840744", "0.5825658", "0.58217704", "0.58217704", "0.5821770...
0.6961675
0
Returns a Python value contained in this atomic element.
def value(self): return toml2py.deserialize(self._tokens[self._value_token_index()])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvalue(self):\n return BytesIO.getvalue(self)", "def value(self):\n\n\t\treturn self.__value", "def value(self):\n return self._read()", "def value(self) -> Any:\n return self._value", "def value(self):\n return self.__value", "def value(self):\n return self.__val...
[ "0.75496817", "0.7389974", "0.7359954", "0.7347043", "0.733962", "0.733962", "0.7327104", "0.7327104", "0.7303945", "0.7303945", "0.7303945", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0.7290579", "0...
0.68537635
87
Sets the contained value to the given one.
def set(self, value): assert (not is_sequence_like(value)) and (not is_dict_like(value)), 'the value must be an atomic primitive' token_index = self._value_token_index() self._tokens[token_index] = py2toml.create_primitive_token(value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_value(self, field, value):\n field = self.find_first(field)\n if field is not None:\n field.value = value", "def assignValue(self,value):\n self.itemset(value)", "def assignValue(self,value):\n self.itemset(value)", "def set_value(self,x):\n self._value =...
[ "0.7084912", "0.7043695", "0.7043695", "0.702026", "0.702026", "0.70160013", "0.70115966", "0.69155234", "0.69025266", "0.68340504", "0.6826874", "0.6826183", "0.67819715", "0.6768549", "0.67635816", "0.6759227", "0.67537683", "0.6748124", "0.6729947", "0.67173505", "0.671735...
0.0
-1
>>> import shutil >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> ori = cv1.name >>> des = cv1.copy() >>> cv1.name == ori False >>> f.close() >>> shutil.rmtree(basepath)
def copy(self, des=None, name=None): if des is None: des = self.source_path if name is None: name = self.name location = os.path.join(des, name) while os.path.isfile(location) is True: self.base.reset_random() self.name = self.base.random name = self.name location = os.path.join(des, name) with open(location, 'wb') as f: f.write(self.stream) return location
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def conditional_copy(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n\n filename = save_cmake_filename(doc_file)\n\n filename1 = os.path.join(asciitest_out_dir, filename + \".temp\").replace(\"\\\\\",\"/\")\n filename2 = os.path.join(asciitest_out_dir, fi...
[ "0.58008", "0.5709822", "0.5659337", "0.5595037", "0.5574128", "0.54875076", "0.5483825", "0.5405747", "0.53936034", "0.53486127", "0.53021306", "0.52604765", "0.52571845", "0.52143526", "0.52143526", "0.5193308", "0.5167983", "0.5155168", "0.51270175", "0.5124503", "0.509708...
0.47389197
49
>>> import os >>> import shutil >>> import core.docprocessor >>> import xml.etree.ElementTree >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> e = xml.etree.ElementTree.parse(os.path.join( ... cv1.docbook_path, cv1.name.xml)).getroot() >>> e.findall('para')[0].text
def convert(self): logger.info('Convert: %s' % self.base) if self.mimetype in ['application/msword', "application/vnd.openxmlformats-officedocument" ".wordprocessingml.document"]: if 'multipart/related' in self.stream: self.process_mht() returncode = self.convert_docfile(self.docx_path, self.name.docx, self.docbook_path, self.name.xml) else: returncode = self.convert_docfile(self.source_path, self.name, self.docbook_path, self.name.xml) if returncode is False: returncode = self.convert_docfile(self.source_path, self.name, self.docx_path, self.name.docx) returncode = self.convert_docfile(self.docx_path, self.name.docx, self.docbook_path, self.name.xml) if not os.path.exists(os.path.join( self.docbook_path, self.name.xml)): logger.info('Not exists') self.resultcode = 2 return False if returncode is False: self.resultcode = 3 return False self.remove_note() self.file_docbook_to_markdown() logger.info(' '.join([self.base.base, self.name.base, 'Success'])) self.resultcode = 0 return True else: logger.info('Skip') self.resultcode = 1 return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_docx_text(path):\n document = zipfile.ZipFile(path)\n xml_content = document.read('word/document.xml')\n document.close()\n tree = XML(xml_content)\n \n paragraphs = []\n for paragraph in tree.getiterator(PARA):\n texts = [node.text\n for node in paragraph.getiterat...
[ "0.58561695", "0.57505774", "0.57142305", "0.56927013", "0.5655105", "0.55894864", "0.5549124", "0.54334044", "0.5345397", "0.5345397", "0.5336243", "0.5295321", "0.52721536", "0.52331185", "0.52325106", "0.5207037", "0.5178504", "0.51673114", "0.5162166", "0.51004875", "0.50...
0.0
-1
>>> import shutil >>> import os.path >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) True >>> cv1.deleteconvert() >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) False >>> f.close() >>> shutil.rmtree(basepath)
def deleteconvert(self): filename = os.path.join(self.docx_path, self.name.docx) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.html_path, self.name.html) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.docbook_path, self.name.xml) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.markdown_path, self.name.md) if os.path.isfile(filename): os.remove(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup(asciitest_out_dir, doc_file):\n # path join uses backslash win32 which is not cmake compatible\n filename = os.path.join(asciitest_out_dir, save_cmake_filename(doc_file)).replace(\"\\\\\",\"/\")\n \n #print(\"cleanup %s %s\" % (doc_file, filename))\n try:\n os.remove(filename)\n ...
[ "0.65258646", "0.63766783", "0.62430525", "0.6167361", "0.5848033", "0.57737476", "0.5623585", "0.5583894", "0.5582149", "0.5558321", "0.5547012", "0.55358094", "0.553127", "0.5530632", "0.5524219", "0.5511022", "0.54836005", "0.5472407", "0.5460167", "0.53966826", "0.5396682...
0.73595536
0
Read ascii file to get weather info
def read_weather(self): print "Reading weather data from file",self.datafile tab = ascii.read(self.datafile) # Fix 'T' values in precipitation column, which represent tiny # amounts of rain (not measurable) TINY_VALUE = '.005' # 0.005 is half the smallest measurable value rain = tab['PrecipitationIn'] wbad = (rain == 'T') rain[wbad] = TINY_VALUE rain = numpy.array(rain).astype("float") # Replace string version of precip with float version tab['PrecipIn'] = rain tab.remove_column('PrecipitationIn') self.table = tab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_raw_temperature():\n with open(device_file, 'r') as f:\n content = f.readlines()\n return content", "def read_ascii(file):\n wvlen, band, mag, emag, fmag, unit, beam, odate, ref = [],[],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\...
[ "0.6585251", "0.65576595", "0.6403347", "0.60498667", "0.59925395", "0.59534574", "0.59316444", "0.58384484", "0.5829025", "0.5764885", "0.57411146", "0.571867", "0.57075197", "0.56858313", "0.5685079", "0.56707263", "0.56707263", "0.56704044", "0.5669247", "0.5663918", "0.56...
0.67925274
0
Get features (for regression) based on this bikedata's weather data
def get_weather_features(self): if self.weather_features is None: raise Exception("Weather features not made yet.") ### self.make_weather_features() else: return self.weather_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_extraction(self) -> None:\n # Add the hour, minute, and x column to the data\n self.df_poly[\"hour\"] = self.df_poly[\"time\"].apply(lambda y: y.hour)\n self.df_poly[\"minute\"] = self.df_poly[\"time\"].apply(lambda y: y.minute)\n self.df_poly[\"x\"] = self.df_poly[\"hour\"]...
[ "0.65693516", "0.6504419", "0.63095343", "0.6184907", "0.6181109", "0.61156017", "0.6097132", "0.60952747", "0.60912675", "0.6084227", "0.6035357", "0.59760165", "0.5962229", "0.5936362", "0.5921777", "0.59124935", "0.59112424", "0.58922076", "0.58688956", "0.58619624", "0.58...
0.7378489
0
Get features (for regression) based on the weather data
def make_weather_features(self, timeline_dt_list): print "Making weather features..." N_FEATURES = 2 n_examples = len(timeline_dt_list) XX = numpy.zeros((n_examples, N_FEATURES)) indices = numpy.zeros(n_examples,dtype='int') ind_weatherday = 0 # Loop over all times in the timeline for ii, time in enumerate(timeline_dt_list): # Find where this time in the timeline matches the date # of some weather data. jj = ind_weatherday while time.date() != self.datetimes[jj].date(): # Make sure jj does not get too large to be an index to # the list. # Note this is probably a bad idea to do it this way. if jj == len(self.datetimes)-1: break jj += 1 ## print jj ind_weatherday = jj indices[ii] = ind_weatherday # XX[ii, 0] = self.table['PrecipIn'][ind_weatherday] # XX[ii, 1] = self.table['Mean TemperatureF'][ind_weatherday] ## XX[ii, 2] = self.table['MeanDew PointF'][ind_weatherday] XX[:,0] = self.table['PrecipIn'][indices] XX[:,1] = self.table['Mean TemperatureF'][indices] self.weather_features = XX return XX
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_weather_features(self):\n if self.weather_features is None:\n raise Exception(\"Weather features not made yet.\")\n### self.make_weather_features()\n else:\n return self.weather_features", "def feature_extraction(self) -> None:\n # Add the hour, minut...
[ "0.7582345", "0.6779436", "0.6377024", "0.6304283", "0.6291205", "0.62426645", "0.62333375", "0.61894417", "0.6091039", "0.6048604", "0.6045246", "0.6044608", "0.60266703", "0.5981941", "0.59668094", "0.5964047", "0.5908457", "0.5897442", "0.5888104", "0.5868746", "0.5860874"...
0.6208372
7
Loads pretrained pytorch model
def load_model(model_path: str) -> object: model = torch.load(model_path) model.eval() return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % sel...
[ "0.82864565", "0.8224309", "0.81519234", "0.7998859", "0.7852287", "0.7793966", "0.7782619", "0.7755613", "0.7727618", "0.7702701", "0.767298", "0.7656771", "0.7558395", "0.7558245", "0.75549746", "0.75289506", "0.7481736", "0.747583", "0.743038", "0.741285", "0.74045056", ...
0.7447873
18
Given image file predict and return class label
def inference_on_data(image) -> str: result = inference_model(image) class_label = torch.argmax(result[0]) # Print to log acts as a proxy of saving to an actual DB print(f'Image Class : {class_label}') return str(class_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def model_predict(img_path):\n img = open_image(img_path)\n pred_class, pred_idx, outputs = learn.predict(img)\n print(pred_class)\n return pred_class", "def predict_car():\n img = open_image(request.files['image'])\n pred_class, pred_idx, outputs = learn.predict(img)\n return str(pred_class...
[ "0.8147907", "0.8066166", "0.7634904", "0.7613867", "0.7565054", "0.75600857", "0.7542438", "0.74744403", "0.7456327", "0.74438494", "0.7441574", "0.73974526", "0.73938596", "0.73485583", "0.73224735", "0.7307081", "0.7293566", "0.72353786", "0.72337973", "0.7217787", "0.7184...
0.0
-1
Publish response to kafka topic
def publish_response(class_label): client = KProducer(config=publisher_config) client.produce(class_label, PUBLISHER_TOPIC)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, node, topic, data={}, on_publish=None, on_response=None):\n pass", "def produce(self, response, regex, ts):\n self.logger.info(\"Producing message...\")\n\n payload = {\n \"url\": response.url,\n \"latency\": response.elapsed,\n \"status\": ...
[ "0.6920841", "0.6638568", "0.6578799", "0.6561684", "0.6525909", "0.65240884", "0.6514028", "0.641655", "0.62882924", "0.627717", "0.62517506", "0.61733186", "0.61410475", "0.6117747", "0.61167103", "0.61138386", "0.61064917", "0.608827", "0.6083523", "0.6068794", "0.60644025...
0.7494711
0
Start Consuming data coming in as images from requesters
def enable_subscription(): client = KConsumer(config=subscriber_config) counter = 0 while 1: data = client.consume() if data: print("Received Data", counter) class_label = inference_on_data(data.value) publish_response(class_label)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __image_request_handler(self):\n self.__logger.info(\"Image Request Handling Thread started\")\n ticker = Ticker(2)\n while self._running:\n timeout = ticker.end_tick(False)\n try:\n task = self.__image_queue.get(True, timeout)\n except Queue...
[ "0.63510275", "0.6212329", "0.60436887", "0.597931", "0.593665", "0.58673537", "0.57487386", "0.57463884", "0.5703741", "0.56677437", "0.5652428", "0.5626479", "0.56006426", "0.55784166", "0.55780387", "0.5559267", "0.554615", "0.5536241", "0.5526545", "0.55169946", "0.549977...
0.0
-1
Serializer used by the Producer Service to send class_label to subscribers. class_label is generated by inferring on image using pretrained lenet
def kafka_serializer(data): return json.dumps(data).encode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_voc_label(is_training):\n voc_dir = config.voc_dir\n cls_map = {name: i for i, name in enumerate(config.coco_classes)}\n sub_dir = 'train' if is_training else 'eval'\n voc_dir = os.path.join(voc_dir, sub_dir)\n if not os.path.isdir(voc_dir):\n raise ValueError(f'Cannot find {sub_di...
[ "0.57493436", "0.56853515", "0.5624522", "0.55395037", "0.55254596", "0.550018", "0.54977113", "0.5459444", "0.54317695", "0.5422993", "0.54141396", "0.5414098", "0.54095036", "0.5399831", "0.53884953", "0.53860307", "0.5378658", "0.5374597", "0.5360391", "0.5358538", "0.5353...
0.0
-1
Deserializer used by the Consumer Service to parse images sent to the model
def kafka_deserializer(data): return pickle.loads(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deserialize(self, data):", "def _post_deserialize (self):\n pass", "def getDeserializer():", "def deserialize(self, blob):\n pass", "def deserialise_image(data):\n if \"data:image\" in data:\n data = data[data.find(\",\") + 1:]\n\n return Image.open(io.BytesIO(base64.urlsafe_...
[ "0.64817536", "0.6376675", "0.63246363", "0.6318897", "0.62756526", "0.62049454", "0.61352426", "0.6036559", "0.59855014", "0.59849745", "0.5978087", "0.59708434", "0.58874786", "0.58653855", "0.57746744", "0.5726982", "0.5700346", "0.56928164", "0.565674", "0.5638816", "0.56...
0.0
-1
return img from disk
def api_get_icon(): pkg_name = request.args.get('pkg') if pkg_name: pkg_files = Database().db.get_pkg_files(pkg_name) for src in pkg_files: if src.startswith("/usr/share/icons/hicolor/32x32/apps/"): return send_file(src, as_attachment=False) return send_file("static/images/null.gif") else: src = request.args.get('i') if not os.path.isfile(src): #abort(404) return send_file("static/images/null.gif") return send_file(src, as_attachment=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_image_from_file(path):\n try:\n img = Image.open(path)\n return img\n except IOError as e:\n print e\n return None", "def read_image(path):\n img = misc.imread(path)\n return img", "def get_image(image_path):\r\n\r\n return Image.open(image_path)", "def getI...
[ "0.71957695", "0.70855176", "0.70432746", "0.7038061", "0.68815845", "0.6864167", "0.6857217", "0.68482894", "0.6821152", "0.6813624", "0.68091524", "0.67426455", "0.6739715", "0.6640925", "0.66380155", "0.66144145", "0.6573041", "0.65571743", "0.6553333", "0.6547051", "0.654...
0.0
-1
page list one repo
def get_repo(repo_id): if repo_id == "orphans": pkgs = Database().db.get_orphans() else: pkgs = Database().db.get_repo_pkgs(repo_id) return render_template("repo.html", title=" - "+repo_id, repos=Database().db.get_repos_names(), pkgs=pkgs, repo=repo_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def repos(request):\n # Clean...
[ "0.7262444", "0.7217461", "0.7216748", "0.71565646", "0.6850128", "0.6780763", "0.67550576", "0.6684879", "0.6644136", "0.64276356", "0.6412211", "0.6325303", "0.6321167", "0.6286405", "0.62793666", "0.6230717", "0.61821365", "0.6155869", "0.6142341", "0.6085627", "0.60169744...
0.63984853
11
page list one repo
def get_pkg(pkg_name): pkg = Database().db.get_pkg_details(pkg_name, "", False) pkg = PackageDetail.surClass(pkg) print('dir: ', dir(pkg)) #print('name:', pkg.nane) #print('props.name:', pkg.props.nane) return render_template("pkg.html", title=" - "+pkg_name, repos=Database().db.get_repos_names(), pkg=pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def repos(request):\n # Clean...
[ "0.7262444", "0.7217461", "0.7216748", "0.71565646", "0.6850128", "0.6780763", "0.67550576", "0.6684879", "0.6644136", "0.64276356", "0.6412211", "0.63984853", "0.6325303", "0.6321167", "0.6286405", "0.62793666", "0.6230717", "0.61821365", "0.6155869", "0.6142341", "0.6085627...
0.0
-1
Returns an iterator of documents being read from disk.
def get_documents(self, batch=None): files = None if not batch: # no batch = all the batches files = self._get_batch_files() elif batch == "random": # get all the batches and pick one from random batches = self._get_batches() files = [ self._get_batch_file(batch=random.randint(1, len(batches))) ] else: # get the specified batch files = [ self._get_batch_file(batch=batch) ] # loop through all the batch files for f in files: with gzip.open(f, "rb") as infile: for line in infile: # parse the JSON for each line yield json.loads(line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_documents(self):\n raise NotImplementedError", "def __iter__(self):\n return self.iter_documents()", "def iter_local_docs(docs_path, skip=0, stop=sys.maxsize):\n for i, line in enumerate(open(docs_path)):\n if i < skip:\n continue\n elif i < stop:\n ...
[ "0.7139281", "0.66851294", "0.6661767", "0.6630697", "0.661265", "0.65598124", "0.655498", "0.6535111", "0.64075583", "0.6239595", "0.6149933", "0.61477643", "0.61379325", "0.6104141", "0.6089389", "0.6088322", "0.60240936", "0.60088474", "0.5915154", "0.5884814", "0.58526784...
0.61733896
10
Returns an interator of paragraphs being read from disk.
def get_paragraphs(self, batch=None): # loop through the document stream for this document database for document in self.get_documents(batch): for paragraph in document["paragraphs"]: # yield the paragraphs one by one yield paragraph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readFile(self):\n with pdfplumber.open(self.path) as pdf:\n first_page = pdf.pages[0]\n text = first_page.extract_text()\n text = text.split('\\n')\n return processText(text)", "def generate_paragraphs(self, count=3):\n\n with self.open_text_data() as...
[ "0.6528536", "0.64675736", "0.6323226", "0.6145701", "0.61151904", "0.6101688", "0.6096735", "0.60733026", "0.5850335", "0.5826958", "0.57282406", "0.57275575", "0.5669418", "0.5661601", "0.5554305", "0.5551181", "0.55444384", "0.547488", "0.5394992", "0.53804976", "0.5364042...
0.5878271
8
Returns an interator of sentences being read from disk.
def get_sentences(self, batch=None): # loop through the paragraph stream for this document database for paragraph in self.get_paragraphs(batch): # loop through the sentences for sentence in paragraph["sentences"]: # yield the individual tokens yield sentence["tokens"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_sentences(path):\n sentences = []\n sentence = []\n num = 0\n with codecs.open(path, 'r', 'utf8') as fread:\n # n_lines = len(fread)\n print(\"Read from {:s}\".format(path))\n # pbar = progressbar.ProgressBar(max_value=n_lines)\n for line_idx, line in enumerate(frea...
[ "0.691375", "0.6559297", "0.6529974", "0.647818", "0.64719594", "0.6423339", "0.6421859", "0.640695", "0.63902426", "0.63782775", "0.6366519", "0.630013", "0.62926716", "0.61867374", "0.6146142", "0.60801035", "0.60796994", "0.6057292", "0.60563534", "0.605594", "0.605538", ...
0.5305037
78
Returns the ``BatchStats`` for a specific batch.
def get_batch_stats(self, batch): return self.batch_stats[batch]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_batch_stats():\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.suffici...
[ "0.65609884", "0.65114206", "0.65014577", "0.6211535", "0.59668744", "0.5954246", "0.58340657", "0.5821755", "0.57448745", "0.57334435", "0.57184416", "0.5670613", "0.56242794", "0.560072", "0.55778915", "0.5565209", "0.55386823", "0.5498818", "0.54651314", "0.5450578", "0.54...
0.89985496
0
Returns the number of batches in the current document inventory.
def num_batches(self): return len(self.batch_stats)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def count_records(batches: List[Batch]) -> int:\n return sum(b.current_size for b in batches)", "def get_num_batches(self, instances: Iterable[Instance]...
[ "0.687607", "0.6610767", "0.657245", "0.64142066", "0.63633513", "0.6283254", "0.6276345", "0.6246339", "0.62053406", "0.6200216", "0.61746264", "0.61746264", "0.61746264", "0.61746264", "0.6167673", "0.61566156", "0.6134254", "0.6134254", "0.61117995", "0.6078967", "0.606971...
0.65176564
3
Convenience method that sums up all the sentences across all batches.
def get_total_sentences(self): # loop through batches and add up all their individual sentence counts total_sentences = 0 for batch in self.batch_stats: total_sentences += self.batch_stats[batch].total_sentences return total_sentences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > sel...
[ "0.62009716", "0.6035176", "0.60308325", "0.6027758", "0.58148813", "0.5691236", "0.567909", "0.5636329", "0.562765", "0.56133324", "0.5553161", "0.55471104", "0.55366445", "0.5535218", "0.5534292", "0.5531749", "0.5505629", "0.54945356", "0.5477114", "0.54692113", "0.5455944...
0.6880938
0
Adds ``documents`` to the document inventory, writing to disk in batches of 500,000.
def add_documents(self, documents): # flag for StopIteration exceptions more_documents = True # loop while there are still documents in the iterator while more_documents: # increment batch number batch = len(self.batch_stats) + 1 # count sentences sentences_count = 0 # create temporary batch data file in the version directory batch_file = os.path.join(self.file_base.get_version_path(self.version), "data.jl.gz.temp") # try to read the next batch of files, catch exception and stop if there are no more try: # get next document before opening the file just to make sure it's there document = documents.next() # open the data file with gzip.open(batch_file, "wb") as outfile: # loop through DOCUMENT_BATCH_SIZE documents for i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE): # count sentences in document for paragraph in document["paragraphs"]: sentences_count += len(paragraph["sentences"]) # write JSON to file one line at a time outfile.write("%s\n" % json.dumps(document)) # if we are not done with this batch, retrieve the next document if i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1: document = documents.next() except StopIteration: # the end of the documents stream, set the flag to False more_documents = False # make sure the batch isn't empty if sentences_count > 0: # create the new batch in the file system self.version_batches.create_latest_version() # add the stats to the statistics hash self.batch_stats[batch] = BatchStats(sentences_count) # write the batch statistics to file with codecs.open(self._get_batch_stat_file(batch), "wb", "utf-8") as outfile: # write the JSON representation for the stats outfile.write(json.dumps(self.batch_stats[batch].to_json())) # move the temp data file to the correct location inside the version folder os.rename(batch_file, self._get_batch_file(batch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_documents(self, documents: list):\n requests = [\n {'PutRequest': {'Item': Item}} \n for Item in documents\n ]\n ticks = [d['symbol'] for d in documents]\n size = getsizeof(requests)\n exceptions = self.dynamo_client.exceptions\n errors = (e...
[ "0.74635196", "0.68780833", "0.6735508", "0.66293705", "0.65308714", "0.64703435", "0.6461322", "0.6320846", "0.6297765", "0.6272216", "0.61952156", "0.618961", "0.6179138", "0.6159434", "0.61464745", "0.6131274", "0.6127692", "0.60197544", "0.60097003", "0.6002732", "0.59492...
0.73529315
1
Loads a document database with the specified version from the directory.
def load(db_path="data/documents/trigrams", version=None): # create database at the desired path and with the desired version db = DocumentDatabase(db_path, version) # loop through batches for batch in db._get_batches(): # get the path to the stats file stats_file = db._get_batch_stat_file(batch) # load the stats stats_json = json.loads(codecs.open(stats_file, "rb", "utf-8").read()) # save in the batch statistics hash db.batch_stats[batch] = BatchStats(stats_json["total_sentences"]) # return the database return db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_or_create_db(self):\n try:\n with open(self._filename, 'rb') as f:\n self.db = pickle.load(f)\n except FileNotFoundError:\n pass", "def load_DB(self):\n\t\tprint 'Loadind Data Base...'\n\t\tstream = open(self.DB_file)\n\t\tself.DB = cPickle.load(stream)...
[ "0.6015772", "0.59988827", "0.59276336", "0.5849796", "0.5840075", "0.5790545", "0.5701773", "0.56872916", "0.5661583", "0.56539536", "0.56524754", "0.5596583", "0.5581138", "0.5557227", "0.5549356", "0.5532614", "0.55302", "0.5490099", "0.54712546", "0.5464255", "0.5438158",...
0.7193331
0
Returns the latest version of the documents inventory at the specified path.
def get_latest_version(db_path): # create a file system and return latest version return VersionedFile(db_path).get_latest_version()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None", "async def get_local_version(self, path):\n return_value = ''\n if os.path.isfile(path):\n with open(path, 'r') as local:\n ...
[ "0.64565796", "0.59338427", "0.59115607", "0.5859803", "0.58483046", "0.5716326", "0.5692346", "0.5668634", "0.56481016", "0.5610619", "0.56099266", "0.556473", "0.5553882", "0.55534357", "0.5535717", "0.55330545", "0.55328286", "0.5523551", "0.547328", "0.54688823", "0.54657...
0.67488974
0
Returns a JSON representation of ``BatchStats``.
def to_json(self): return { "total_sentences": self.total_sentences }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_batch_stats(self, batch):\n\t\t\n\t\treturn self.batch_stats[batch]", "def stats_to_json(self):\n return json.dumps({'name': self.name,\n 'points': self.points,\n 'field_researchers': self.field_researchers,\n 'control_t...
[ "0.69310164", "0.6797208", "0.66209364", "0.63575345", "0.6177473", "0.6155753", "0.6147767", "0.6138568", "0.604386", "0.60371757", "0.6009655", "0.59683806", "0.5925637", "0.5886954", "0.588449", "0.5846016", "0.58390856", "0.5832954", "0.5819733", "0.58175325", "0.5811385"...
0.5677763
27
A simple wrapper for scipy.optimize.minimize using JAX.
def minimize(fun, x0, method=None, args=(), bounds=None, constraints=(), tol=None, callback=None, options=None): # Use tree flatten and unflatten to convert params x0 from PyTrees to flat arrays x0_flat, unravel = ravel_pytree(x0) # Wrap the objective function to consume flat _original_ # numpy arrays and produce scalar outputs. def fun_wrapper(x_flat, *args): x = unravel(x_flat) loss_val = float(fun(x, *args)) return loss_val # Wrap the gradient in a similar manner jac = jit(grad(fun)) def jac_wrapper(x_flat, *args): x = unravel(x_flat) g_flat, _ = ravel_pytree(jac(x, *args)) og = onp.array(g_flat) return og # Wrap the callback to consume a pytree def callback_wrapper(x_flat, *args): if callback is not None: x = unravel(x_flat) return callback(x, *args) # Minimize with scipy results = scipy.optimize.minimize(fun_wrapper, x0_flat, args=args, method=method, jac=jac_wrapper, callback=callback_wrapper, bounds=bounds, constraints=constraints, tol=tol, options=options) # pack the output back into a PyTree results["x"] = unravel(results["x"]) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def minimize(fun: Callable[..., float],\n x0: np.ndarray,\n args: Tuple = (),\n method: Optional[str] = None,\n **kwargs) -> scipy.optimize.OptimizeResult:\n if method.lower() in OPTIMIZERS:\n optimizer = OPTIMIZERS[method.lower()]\n return optimizer...
[ "0.63569784", "0.6333814", "0.6261257", "0.6101453", "0.60971725", "0.60695785", "0.5995225", "0.5894081", "0.589276", "0.5768566", "0.5727531", "0.57158124", "0.57106614", "0.57106614", "0.56989694", "0.56768656", "0.56757146", "0.5599489", "0.5598426", "0.55934316", "0.5571...
0.57981586
9
Ask bot to decide for you, Eg .decide hue, hue2, ...
async def decide(self, ctx, *, args: str): await ctx.send(f":point_right: **{random.choice(args.split(','))}**")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decide():", "def handDecision(handIn):", "def whats_my_color_handler(handler_input):\n # type: (HandlerInput) -> Response\n if color_slot_key in handler_input.attributes_manager.session_attributes:\n fav_color = handler_input.attributes_manager.session_attributes[\n color_slot_key]\...
[ "0.6226377", "0.5935546", "0.5760656", "0.574801", "0.5716039", "0.56322324", "0.561385", "0.5553106", "0.55161047", "0.5512562", "0.5495656", "0.54472333", "0.5444497", "0.5443797", "0.5440587", "0.53947", "0.5392457", "0.5389444", "0.53876394", "0.5375197", "0.5374281", "...
0.51799744
46
Leave L and b unchanged
def _buildMatrix(self, SparseMatrix, Ncells, MaxFaces, coeff): return (0, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l_degenerate(self):\n self.tmp = self.right\n self.right = self.left", "def le_inplace(a,b):", "def l_un_degenerate(self):\n self.right = self.tmp", "def lt_inplace(a,b):", "def ge_inplace(a,b):", "def restore(self):\n self.u = self.ub.copy()\n self.w = self.wb.copy...
[ "0.6592938", "0.6592725", "0.6477138", "0.6335981", "0.5866531", "0.58485544", "0.58470535", "0.58193916", "0.5793898", "0.5778269", "0.57440275", "0.5707928", "0.56628865", "0.56374824", "0.5598262", "0.55937904", "0.5581618", "0.5562398", "0.553753", "0.5497139", "0.5493476...
0.0
-1
Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value.
def valid_credentials(): if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired): return None return credentials
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or credentials.access_token_expired):\n return None\n return credentials", "def get_cr...
[ "0.8314805", "0.67917407", "0.66121364", "0.6458647", "0.644396", "0.6412883", "0.6292584", "0.6262902", "0.6216184", "0.6159604", "0.60996187", "0.60749036", "0.60712475", "0.5996059", "0.5910496", "0.59005594", "0.5890843", "0.5890843", "0.5890843", "0.5890843", "0.5890843"...
0.83706456
1
We need a Google calendar 'service' object to obtain list of calendars, busy times, etc. This requires authorization. If authorization is already in effect, we'll just return with the authorization. Otherwise, control flow will be interrupted by authorization, and we'll end up redirected back to /choose without a service object. Then the second call will succeed without additional authorization.
def get_gcal_service(credentials): app.logger.debug("Entering get_gcal_service") http_auth = credentials.authorize(httplib2.Http()) service = discovery.build('calendar', 'v3', http=http_auth) app.logger.debug("Returning service") return service
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate_google():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n ...
[ "0.6547268", "0.64876544", "0.62233627", "0.61843467", "0.614813", "0.6039895", "0.5898657", "0.5767838", "0.5764027", "0.5762848", "0.5696502", "0.5657998", "0.5613315", "0.5523032", "0.54875094", "0.5481039", "0.5432056", "0.5416046", "0.540904", "0.5369274", "0.5323026", ...
0.631776
3
The 'flow' has this one place to call back to. We'll enter here more than once as steps in the flow are completed, and need to keep track of how far we've gotten. The first time we'll do the first step, the second time we'll skip the first step and do the second, and so on.
def oauth2callback(): app.logger.debug("Entering oauth2callback") flow = client.flow_from_clientsecrets( CLIENT_SECRET_FILE, scope= SCOPES, redirect_uri=flask.url_for('oauth2callback', _external=True)) ## Note we are *not* redirecting above. We are noting *where* ## we will redirect to, which is this function. ## The *second* time we enter here, it's a callback ## with 'code' set in the URL parameter. If we don't ## see that, it must be the first time through, so we ## need to do step 1. app.logger.debug("Got flow") if 'code' not in flask.request.args: app.logger.debug("Code not in flask.request.args") auth_uri = flow.step1_get_authorize_url() return flask.redirect(auth_uri) ## This will redirect back here, but the second time through ## we'll have the 'code' parameter set else: ## It's the second time through ... we can tell because ## we got the 'code' argument in the URL. app.logger.debug("Code was in flask.request.args") auth_code = flask.request.args.get('code') credentials = flow.step2_exchange(auth_code) flask.session['credentials'] = credentials.to_json() ## Now I can build the service and execute the query, ## but for the moment I'll just log it and go back to ## the main screen app.logger.debug("Got credentials") return flask.redirect(flask.url_for('choose'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_step(self) -> None:", "def next_step(self):\n self.proceed()\n self.execute_current()", "def _step(self) -> None:", "def step_forward(self):", "def _step(self):\n pass", "def step(self):\n\n pass", "def _step(self, whence):\n pass", "def getCurrentStep():", ...
[ "0.74529886", "0.74457574", "0.7440825", "0.73785466", "0.7311282", "0.70527315", "0.7038946", "0.68671244", "0.6764548", "0.67290527", "0.66969025", "0.6665891", "0.6658122", "0.6636712", "0.6608603", "0.66074103", "0.6606269", "0.6533289", "0.6533289", "0.6533289", "0.64716...
0.0
-1
User chose a date range with the bootstrap daterange widget.
def setrange(): app.logger.debug("Entering setrange") flask.flash("Setrange gave us '{}'".format( request.form.get('daterange'))) daterange = request.form.get('daterange') flask.session['daterange'] = daterange daterange_parts = daterange.split() flask.session['begin_date'] = interpret_date(daterange_parts[0]) flask.session['end_date'] = interpret_date(daterange_parts[2]) app.logger.debug("Setrange parsed {} - {} dates as {} - {}".format( daterange_parts[0], daterange_parts[1], flask.session['begin_date'], flask.session['end_date'])) return flask.redirect(flask.url_for("choose"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_date_range(self, range):\n\n date_range_string = \"{0} to {1}\".format(\n self.fmt_date(range.start_date), self.fmt_date(range.end_date)\n )\n log.info(\"Specifying a date range of: {0}\".format(date_range_string))\n\n # Enter the specified date range\n selecto...
[ "0.7240307", "0.708021", "0.6923517", "0.66862506", "0.64068097", "0.6244564", "0.6082898", "0.6051793", "0.5973784", "0.5954892", "0.5869089", "0.57650083", "0.5739868", "0.57391983", "0.5736123", "0.57263255", "0.571437", "0.57027483", "0.56858015", "0.5682606", "0.5677092"...
0.67631936
3
Start with some reasonable defaults for date and time ranges. Note this must be run in app context ... can't call from main.
def init_session_values(): # Default date span = tomorrow to 1 week from now now = arrow.now('local') # We really should be using tz from browser tomorrow = now.replace(days=+1) nextweek = now.replace(days=+7) flask.session["begin_date"] = tomorrow.floor('day').isoformat() flask.session["end_date"] = nextweek.ceil('day').isoformat() flask.session["daterange"] = "{} - {}".format( tomorrow.format("MM/DD/YYYY"), nextweek.format("MM/DD/YYYY")) # Default time span each day, 8 to 5 flask.session["begin_time"] = interpret_time("9am") flask.session["end_time"] = interpret_time("5pm")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ConfigureDefaults(area_bounds=None, \n area_bounds_format=['x_min','y_min','x_max','y_max'], \n area_bounds_range=None, years_are_bounds=False,\n dates_are_bounds=False, init_date_str_format='%y%m%d',\n member_name='rea...
[ "0.6126687", "0.6049868", "0.5839494", "0.57524276", "0.5737345", "0.57099867", "0.5672743", "0.5655215", "0.560356", "0.5569601", "0.55678964", "0.55200195", "0.55126786", "0.549733", "0.54575974", "0.540262", "0.5393907", "0.5381829", "0.5373057", "0.53664577", "0.5354388",...
0.5581906
9
Read time in a humancompatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats.
def interpret_time( text ): app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpret_time(text):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try:\n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n as_arrow = as_arrow.replace(year=2016) # HACK see below\n app...
[ "0.67903686", "0.66916007", "0.6613034", "0.6586266", "0.6439515", "0.64221704", "0.6381868", "0.6360631", "0.63137174", "0.6284302", "0.62411284", "0.62141776", "0.61804426", "0.61793673", "0.6178851", "0.6156706", "0.61117786", "0.6111385", "0.6104648", "0.6101974", "0.6100...
0.6865028
0
Convert text of date to ISO format used internally, with the local time zone.
def interpret_date( text ): try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_to_iso(string):\r\n\r\n # disregard tokenisation, if it's there, to make this an easier conversion for GUTime\r\n string = re.sub(r'<([^~]*)~.+?>', r'\\1 ', string)\r\n\r\n # Defaults\r\n d = None\r\n m = None\r\n y = None\r\n h = None\r\n min = None\r\n s = None\r\n fs = Non...
[ "0.6905238", "0.65572876", "0.6553589", "0.64506024", "0.64370036", "0.6369056", "0.6347456", "0.62363726", "0.6200606", "0.6184233", "0.6160375", "0.6148745", "0.61423504", "0.6141841", "0.6125747", "0.60974556", "0.606667", "0.60619223", "0.6043991", "0.60352457", "0.603524...
0.6582443
1
ISO date + 1 day (used in query to Google calendar)
def next_day(isotext): as_arrow = arrow.get(isotext) return as_arrow.replace(days=+1).isoformat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on obje...
[ "0.6720962", "0.6390619", "0.6365043", "0.62321824", "0.62134486", "0.6139947", "0.61357707", "0.6097243", "0.60584015", "0.60497934", "0.6018173", "0.60177577", "0.6013842", "0.6012742", "0.5981008", "0.5978285", "0.59668523", "0.59668523", "0.59668523", "0.59668523", "0.595...
0.5598111
68
Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars.
def list_calendars(service): app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars with service\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n app.logger.debug(\"Got calendar list\")\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n...
[ "0.82477534", "0.6850524", "0.6471131", "0.63463163", "0.63463163", "0.63192886", "0.62622386", "0.6232313", "0.61395335", "0.61257684", "0.6111868", "0.61005104", "0.603974", "0.59506303", "0.5812229", "0.5798276", "0.56924987", "0.5668386", "0.5658008", "0.55682445", "0.553...
0.8079838
1
A helper method that generates a dictionary of arguments needed to instantiate a BaseBoto object. The purpose of this method is to abstract out the code to handle optional CLI arguments and not duplicate the None handling code.
def __get_arguments(args=None, logger=None, stats=None): if not args: parser = get_parser() add_boto_cli_arguments(parser) # Parse only the known arguments added by add_boto_cli_arguments(). # We only need those arguments to create Boto object, nothing else. # parse_known_args() return (Namespace, list of unknown arguments), # we only care about the Namespace object here. args = parser.parse_known_args()[0] if not logger: logger = get_logger(name=NAME) if not stats: stats = get_stats(prefix=NAME) return { 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()), 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()), 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()), 'region': getattr(args, 'boto_region', DEFAULT['region']()), 'logger': logger, 'stats': stats, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def GetArgs():\n \n UserArgs = {}\n UserArgs['help'] = False\n UserArgs['RsodFileName'] = \"\"\n UserArgs['BiosPathX64'] = \"\"\n\n for i in range(1,len(sys.argv)):\n if sys.argv[i].lo...
[ "0.64982295", "0.6270005", "0.60605526", "0.6001975", "0.5956914", "0.5867535", "0.586068", "0.5848655", "0.58337194", "0.57882416", "0.5759382", "0.57521", "0.5736829", "0.57335955", "0.572861", "0.571756", "0.5702735", "0.5665511", "0.5653056", "0.5628118", "0.5623889", "...
0.7327621
0
Return a usable Boto object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output)
def get_boto(args=None, logger=None, stats=None): return Boto(**__get_arguments(args, logger, stats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boto3(args=None, logger=None, stats=None):\n return Boto3(**__get_arguments(args, logger, stats))", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by ad...
[ "0.7120333", "0.6446755", "0.53901947", "0.53732514", "0.5315737", "0.5265874", "0.5252197", "0.522644", "0.5211605", "0.521066", "0.5205573", "0.51422757", "0.5054793", "0.5037921", "0.5032993", "0.50301266", "0.49558958", "0.49513885", "0.49364442", "0.491438", "0.49119216"...
0.81121886
0
Return a usable Boto3 object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output)
def get_boto3(args=None, logger=None, stats=None): return Boto3(**__get_arguments(args, logger, stats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boto(args=None, logger=None, stats=None):\n return Boto(**__get_arguments(args, logger, stats))", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_...
[ "0.767378", "0.62587637", "0.5730653", "0.5656308", "0.5650132", "0.5583414", "0.5518301", "0.5486912", "0.5486354", "0.5425678", "0.54201967", "0.541126", "0.5337484", "0.5296419", "0.5290054", "0.52891475", "0.52808595", "0.5232185", "0.52223915", "0.5181535", "0.5169401", ...
0.81879747
0
Proxies calls to ``boto.`` methods.
def __getattr__(self, attr): # This way, we don't have to write: rv = Boto().boto.some_call # But can just write: rv = Boto().some_call # This also gives us hooks for future logging/timers/etc and # extended wrapping of things the attributes return if we so # choose. self._logger.debug('Calling wrapped boto attribute: %s on %s', attr, self) attr = getattr(self._boto, attr) if callable(attr): self._logger.debug("Boto attribute '%s' is callable", attr) @wraps(attr) def wrapper(*args, **kwargs): return attr(*args, **kwargs) return wrapper return attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monkey_patch_botocore_for_xray():\n wrapt.wrap_function_wrapper(\n \"botocore.client\", \"BaseClient._make_api_call\", xray_botocore_api_call,\n )", "def xray_botocore_api_call(wrapped, instance, args, kwargs):\n return generic_xray_wrapper(\n wrapped,\n instance,\n args,...
[ "0.6867628", "0.6424001", "0.55805993", "0.5508542", "0.5488377", "0.5417456", "0.53769153", "0.534277", "0.5236493", "0.5206002", "0.5194725", "0.51764464", "0.5175369", "0.51672405", "0.5091889", "0.5048222", "0.50451034", "0.5020509", "0.5010022", "0.49961105", "0.49813974...
0.6325436
2
Gets all AWS regions that Krux can access
def get_valid_regions(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] fo...
[ "0.8312565", "0.779635", "0.77942985", "0.77942985", "0.7289586", "0.728351", "0.7272279", "0.71950173", "0.7140144", "0.70876795", "0.69920516", "0.6973136", "0.692615", "0.6878461", "0.6822473", "0.6779286", "0.6745307", "0.67410904", "0.6683353", "0.664014", "0.6598619", ...
0.65673536
24
Gets all AWS regions that Krux can access
def get_valid_regions(self): conn = self._boto.ec2.connect_to_region(self.cli_region) regions = [] for region in conn.get_all_regions(): if getattr(RegionCode.Region, region.name, None) is not None: regions.append(RegionCode.Region[region.name]) else: regions.append(region.name) return regions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] fo...
[ "0.8312565", "0.779635", "0.77942985", "0.77942985", "0.728351", "0.7272279", "0.71950173", "0.7140144", "0.70876795", "0.69920516", "0.6973136", "0.692615", "0.6878461", "0.6822473", "0.6779286", "0.6745307", "0.67410904", "0.6683353", "0.664014", "0.6598619", "0.65845656", ...
0.7289586
4
Gets all AWS regions that Krux can access
def get_valid_regions(self): client = self._boto.client('ec2') regions = [] for region in client.describe_regions().get('Regions', []): if getattr(RegionCode.Region, region.get('RegionName'), None) is not None: regions.append(RegionCode.Region[region.get('RegionName')]) else: regions.append(region.get('RegionName')) return regions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] fo...
[ "0.8312565", "0.779635", "0.77942985", "0.77942985", "0.7289586", "0.7272279", "0.71950173", "0.7140144", "0.70876795", "0.69920516", "0.6973136", "0.692615", "0.6878461", "0.6822473", "0.6779286", "0.6745307", "0.67410904", "0.6683353", "0.664014", "0.6598619", "0.65845656",...
0.728351
5
Initializes a read operation from Debezium.
def __init__( self, connector_class, username, password, host, port, max_number_of_records=None, connection_properties=None, expansion_service=None): self.params = ReadFromDebeziumSchema( connector_class=connector_class.value, username=username, password=password, host=host, port=port, max_number_of_records=max_number_of_records, connection_properties=connection_properties) self.expansion_service = expansion_service or default_io_expansion_service()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.reader = reader.Reader()", "def read(self):\n pass", "def read(self):", "def read(self):\n raise NotImplementedError", "def read():\n # TODO", "def read(self):\n pass", "def read(self, **kwargs):\n pass", "def _read_data(self):", "def r...
[ "0.6265758", "0.6161929", "0.6149388", "0.60283214", "0.5988332", "0.5961205", "0.5949067", "0.59235144", "0.5830563", "0.575123", "0.5719261", "0.5699496", "0.5671507", "0.5631414", "0.56239974", "0.5621243", "0.5612608", "0.56093657", "0.5547174", "0.5521234", "0.55149037",...
0.0
-1
Extract plastic class label from Image Name and return it
def ExtractLabel(ImgName): # Each img has name notation "*****a0X*" where X is PlasticType PlasticType = ImgName[7] return { '1': 0, # PET '2': 1, # HDPE '4': 2, # LDPE '5': 3, # PP '6': 4, # PS '7': 5, # Other }[PlasticType]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index...
[ "0.6750595", "0.67307", "0.6638844", "0.6555292", "0.6546279", "0.65367347", "0.64170223", "0.6413202", "0.6403757", "0.6369213", "0.63066596", "0.6250891", "0.6241231", "0.6123709", "0.6118442", "0.61034334", "0.60980034", "0.6039952", "0.6016841", "0.60058093", "0.5924414",...
0.7817808
0
Specify where files in our default root are uploaded.
def upload_location(instance, filename): new_id = randint(0, 1000) return "%s/%s" % (new_id, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,self.upload_dir_rel())", "def public_upload_dir(self):\n return os.path.join(settings.MEDIA_ROOT,\n self.public_upload_dir_rel())", "def upload_dir_rel(self):\n return os.path.join(self.short_name,\"...
[ "0.6755884", "0.67076856", "0.6568125", "0.6368969", "0.62236637", "0.61067545", "0.6066317", "0.6020004", "0.5959959", "0.59437656", "0.5911991", "0.58240676", "0.5714736", "0.57077825", "0.5693331", "0.5683937", "0.5659573", "0.5636132", "0.56256604", "0.56158495", "0.56020...
0.0
-1