query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Calculates the log of the given TF value
def logTF(self, tf): return math.log(tf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_tf_log(doc):\r\n tf = calc_tf(doc)\r\n max_tf = tf[max(tf, key=tf.get)]\r\n tf_log = {}\r\n for key, val in tf.items():\r\n tf_log[key] = (1 + math.log(val)) / (1 + math.log(max_tf))\r\n return tf_log", "def logp(self, value: TensorType, **kwargs) -> TensorType:", "def logp(self,...
[ "0.7719589", "0.77123725", "0.77123725", "0.76386464", "0.7574258", "0.7372678", "0.72950846", "0.7153347", "0.71502346", "0.71078914", "0.70195645", "0.6958456", "0.6947913", "0.68994933", "0.6897144", "0.68653464", "0.68525743", "0.68525743", "0.6830809", "0.6821966", "0.68...
0.85959244
0
Calculates the okapiTF value from the given parameters.
def okapiTF(self, tf, dlen, avgdlen): return tf / (tf + 0.5 + 1.5 * (dlen/avgdlen))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate(self) -> float:", "def calculate_tf(self, book_dict, term):\n term_frequency = 0\n try:\n term_frequency = (\n book_dict[\"SanitizedText\"][term] / book_dict[\"TotalNoOfTerms\"]\n )\n except KeyError:\n print(\"Key Error, Term doe...
[ "0.5624942", "0.5566399", "0.55482346", "0.5489444", "0.5478475", "0.53710085", "0.5316732", "0.52735984", "0.52552027", "0.5215894", "0.5206082", "0.5185447", "0.5171882", "0.5163169", "0.51528233", "0.5145025", "0.513191", "0.5121332", "0.5108843", "0.5080807", "0.5073275",...
0.58334565
0
Call MyClass.class_mock_two(), but mock MyMock.class_mock_one.
def test_mock_a_class_func(): print() myclass = mymodule.MyClass() myclass.class_mock_one = Mock() myclass.class_mock_one.return_value = 2 xx = myclass.class_mock_two() print(xx) myclass.class_mock_one.assert_called_with()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provider_and_mock_two(monkeypatch, provider_two, mock_inner_provider):\n mock_make_provider = mock.Mock(return_value=mock_inner_provider)\n monkeypatch.setattr(provider_two, 'make_provider', mock_make_provider)\n return provider_two, mock_inner_provider", "def provider_and_mock_one(monkeypatch, prov...
[ "0.6849225", "0.6707006", "0.62774473", "0.5856203", "0.5846057", "0.5818575", "0.5705756", "0.57026553", "0.5469705", "0.54603136", "0.53983986", "0.53659284", "0.5362481", "0.53552854", "0.53475523", "0.52593946", "0.52591026", "0.52479494", "0.5157788", "0.5140922", "0.509...
0.7694529
0
Loads default and custom services for use within CORE.
def _load_services(self) -> None: # load default services self.service_errors = ServiceManager.load_locals() # load custom services service_paths = self.config.get("custom_services_dir") logger.debug("custom service paths: %s", service_paths) if service_paths is not None:...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_services(service_store):\n service_store.register_service(GetDrugStoreService)\n service_store.register_service(FuelLevelService)\n service_store.register_service(SetFuelLevelService)\n service_store.register_service(GetRobotPosition)\n service_store.register_service(SetRobotPosition)", "...
[ "0.7005679", "0.6346517", "0.63041884", "0.6099403", "0.60553855", "0.59759414", "0.5793857", "0.5764348", "0.5748033", "0.57183653", "0.57032514", "0.5648803", "0.5618671", "0.5606773", "0.5566092", "0.5551873", "0.5527294", "0.5522216", "0.5508008", "0.549366", "0.5394617",...
0.76303107
0
Check if emane is installed and load models.
def _load_emane(self) -> None: # check for emane path = utils.which("emane", required=False) self.has_emane = path is not None if not self.has_emane: logger.info("emane is not installed, emane functionality disabled") return # get version emane_ver...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadmodels(self):\n for emane_model in EMANE_MODELS:\n logger.info(\"loading emane model: (%s) %s - %s\",\n emane_model, emane_model.name, RegisterTlvs(emane_model.config_type))\n self._modelclsmap[emane_model.name] = emane_model\n self.session.add...
[ "0.7130563", "0.69357973", "0.6056981", "0.60229075", "0.59975207", "0.59605366", "0.5903529", "0.5884921", "0.57850355", "0.5753735", "0.5745536", "0.5728512", "0.5696617", "0.56890875", "0.568479", "0.5596342", "0.5584993", "0.5554426", "0.55042964", "0.5497591", "0.5494007...
0.8328525
0
Shutdown all CORE session.
def shutdown(self) -> None: logger.info("shutting down all sessions") while self.sessions: _, session = self.sessions.popitem() session.shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n # shutdown all known sessions\n for session in self.sessions.values():\n session.shutdown()\n\n # if we are a daemon remove pid file\n if self.config[\"daemonize\"]:\n pid_file = self.config[\"pidfile\"]\n try:\n os.u...
[ "0.7054555", "0.7012447", "0.6973338", "0.69510627", "0.6917507", "0.6871635", "0.6789869", "0.67295724", "0.67286533", "0.6678548", "0.66759485", "0.6605788", "0.6588758", "0.6574293", "0.654621", "0.6505152", "0.6505152", "0.6505152", "0.6468864", "0.6468864", "0.64258343",...
0.77668494
0
Sets the snapshot_date of this ZacksAnalystRatingSnapshot.
def snapshot_date(self, snapshot_date): self._snapshot_date = snapshot_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rating_date(self, rating_date):\n\n self._rating_date = rating_date", "def set_rdate(self, rdate):\n self.__rdate = rdate", "def revision_date(self, revision_date):\n\n self._revision_date = revision_date", "def rating_start_date(self, rating_start_date):\n\n self._rating_star...
[ "0.6246575", "0.5952784", "0.5949085", "0.58533394", "0.5798545", "0.57474214", "0.57247525", "0.5682116", "0.5682116", "0.5682116", "0.5682116", "0.5682116", "0.5641901", "0.55963737", "0.55963737", "0.55558777", "0.55346274", "0.54616004", "0.54373753", "0.5357031", "0.5351...
0.82132596
0
Sets the rating_date of this ZacksAnalystRatingSnapshot.
def rating_date(self, rating_date): self._rating_date = rating_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rating_start_date(self, rating_start_date):\n\n self._rating_start_date = rating_start_date", "def rating_end_date(self, rating_end_date):\n\n self._rating_end_date = rating_end_date", "def _update_rating_history(self, rating: float, date: Union[str, float]):\n self.rating_history.appe...
[ "0.7667483", "0.6481708", "0.6421852", "0.6388244", "0.62596005", "0.6228435", "0.59492034", "0.59492034", "0.59492034", "0.59492034", "0.59492034", "0.59451556", "0.5936428", "0.5929696", "0.5898084", "0.5878672", "0.5782881", "0.5771359", "0.5771359", "0.5727382", "0.572738...
0.835676
0
Sets the mean of this ZacksAnalystRatingSnapshot.
def mean(self, mean): self._mean = mean
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_average(self, avg, num_samples):\n self._average = avg * num_samples\n self.num_samples = num_samples", "def mean(self, avg=True):\n if not self.fp_init:\n if not avg:\n return self._calc_mean(self.f, self.a, self.b, self.Z)\n else:\n ...
[ "0.6930181", "0.67363703", "0.6655305", "0.66153383", "0.6466718", "0.646478", "0.6442922", "0.6403945", "0.6403945", "0.63922215", "0.6262132", "0.6262132", "0.6237978", "0.62258685", "0.6158235", "0.6155664", "0.6135507", "0.60126746", "0.598887", "0.5976859", "0.5922096", ...
0.7557623
0
Sets the percentile of this ZacksAnalystRatingSnapshot.
def percentile(self, percentile): self._percentile = percentile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_rating(self, value):\n try:\n self._rating = float(value)\n except ValueError:\n pass", "def percentile(self, pct):\n return percentile(self.results, pct, interpolation='nearest')", "def surprise_percent(self, surprise_percent):\n\n self._surprise_perce...
[ "0.55393034", "0.551872", "0.54966956", "0.54480946", "0.5393786", "0.5393786", "0.5380648", "0.53383744", "0.5170568", "0.51268977", "0.510302", "0.50660586", "0.50660586", "0.5058735", "0.5058735", "0.5058735", "0.5058735", "0.5056522", "0.50451845", "0.5021903", "0.5003763...
0.68931764
0
Sets the strong_buys of this ZacksAnalystRatingSnapshot.
def strong_buys(self, strong_buys): self._strong_buys = strong_buys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def buys(self, buys):\n\n self._buys = buys", "def player_b_rating(self, player_b_rating):\n\n self._player_b_rating = player_b_rating", "def weightedBoldness(self):\n\n\t\treturn sum([blend.varietal.boldness ...
[ "0.7033873", "0.5824333", "0.51823777", "0.49280098", "0.48780355", "0.48700973", "0.48350552", "0.4807561", "0.4807561", "0.47917977", "0.47565135", "0.46617305", "0.46569103", "0.46393418", "0.46238536", "0.46224138", "0.46224138", "0.46091217", "0.46026292", "0.4574411", "...
0.8174262
0
Sets the buys of this ZacksAnalystRatingSnapshot.
def buys(self, buys): self._buys = buys
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_buys(self, strong_buys):\n\n self._strong_buys = strong_buys", "def sells(self, sells):\n\n self._sells = sells", "def buying_rate(self, buying_rate):\n\n self._buying_rate = buying_rate", "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", ...
[ "0.5943578", "0.58260715", "0.5590503", "0.5314247", "0.5207249", "0.51555926", "0.51555926", "0.50987566", "0.49954265", "0.4991696", "0.49129245", "0.4755713", "0.47396746", "0.47301865", "0.47301865", "0.4660928", "0.4651183", "0.46375775", "0.4636185", "0.45578387", "0.45...
0.74221236
0
Sets the holds of this ZacksAnalystRatingSnapshot.
def holds(self, holds): self._holds = holds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ratings(self, ratings):\n\n self._ratings = ratings", "def ratings(self, ratings):\n\n self._ratings = ratings", "def binarize(self):\n total = 0\n count = 0\n avg_rating = 0\n for movie_id, movie in enumerate(self.ratings):\n for user_id, rating in enumerate(movi...
[ "0.5835982", "0.5835982", "0.4976238", "0.48544487", "0.48544487", "0.48132378", "0.4691935", "0.46543285", "0.46492696", "0.4644989", "0.46391696", "0.4630273", "0.46297568", "0.46284485", "0.4615285", "0.45675987", "0.45078152", "0.44719487", "0.44543308", "0.44518128", "0....
0.6009718
0
Sets the sells of this ZacksAnalystRatingSnapshot.
def sells(self, sells): self._sells = sells
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strong_sells(self, strong_sells):\n\n self._strong_sells = strong_sells", "def sales(self, sales):\n\n self._sales = sales", "def selling_rate(self, selling_rate):\n\n self._selling_rate = selling_rate", "def set_sell_amount_from_buy_amount(self, *args, **kwargs):\n self._sell...
[ "0.66252005", "0.6244003", "0.60547477", "0.59269816", "0.5827568", "0.5623113", "0.55961716", "0.5472035", "0.5459563", "0.54290825", "0.5401822", "0.53335994", "0.53216684", "0.5297495", "0.52785707", "0.5262857", "0.523921", "0.5225706", "0.517438", "0.51605576", "0.513034...
0.79995453
0
Sets the strong_sells of this ZacksAnalystRatingSnapshot.
def strong_sells(self, strong_sells): self._strong_sells = strong_sells
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sells(self, sells):\n\n self._sells = sells", "def strong_buys(self, strong_buys):\n\n self._strong_buys = strong_buys", "def set_sell_amount_from_buy_amount(self, *args, **kwargs):\n self._sell_amount = self.get_sell_amount_from_buy_amount(*args, **kwargs)", "async def soft_sell(sel...
[ "0.6637629", "0.65615624", "0.54384863", "0.5224136", "0.5186626", "0.51016766", "0.5085686", "0.50319314", "0.49949828", "0.49781185", "0.4922862", "0.49039868", "0.4898402", "0.48969296", "0.4878801", "0.4862272", "0.48586756", "0.48254427", "0.47724262", "0.47236255", "0.4...
0.86615497
0
Recursive function to return records from CMR using scroll. The first time The function is called, just send a dictionary of what your looking for. This function will call itself as many times as needed to collect all the records and return them in a list. When calling recursively, send the scroll id
def get_block_of_records(search, scroll_id=None): url = "https://cmr.uat.earthdata.nasa.gov/search/collections.umm_json" accept = "application/vnd.nasa.cmr.umm_results+json" body = search.copy() if scroll_id is None: # first time here, request a scroll id and clear out headers body.updat...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scroll_for_sessions(self, callback):\n \n print('%s Scrolling for sessions in %r...' % (datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), self.index))\n \n response = ElasticConnection._es.search(\n index = self.index,\n scroll = '5m',\n ...
[ "0.5582365", "0.55574197", "0.54616094", "0.52296954", "0.5199142", "0.51651984", "0.51044744", "0.50029343", "0.49887702", "0.49234888", "0.4922378", "0.48991418", "0.4898491", "0.48976377", "0.48586583", "0.48360264", "0.48242566", "0.48100665", "0.48091716", "0.4801426", "...
0.7180109
0
Keep references to classes that are about to be instrumented. Used to search for unpatched classes after the instrumentation has run so that they can be patched manually.
def _record_unpatched_classes(): # type: () -> Dict[str, type] installed_packages = _get_installed_modules() original_classes = {} for package, orig_path in CLASSES_TO_INSTRUMENT.items(): if package in installed_packages: try: original_cls = _import_by_path(orig_pat...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _patch_remaining_classes(original_classes):\n # type: (Dict[str, type]) -> None\n # check which classes have actually been instrumented\n instrumented_classes = {}\n\n for package in list(original_classes.keys()):\n original_path = CLASSES_TO_INSTRUMENT[package]\n\n try:\n ...
[ "0.76276684", "0.57317924", "0.5657213", "0.56442374", "0.5530063", "0.5519416", "0.55182266", "0.54908854", "0.54908854", "0.54908854", "0.54908854", "0.54908854", "0.54908854", "0.54071677", "0.540273", "0.5328092", "0.532508", "0.5324896", "0.53190255", "0.5307411", "0.529...
0.7603913
1
Besteffort attempt to patch any uninstrumented classes in sys.modules. This enables us to not care about the order of imports and sentry_sdk.init() in user code. If e.g. the Flask class had been imported before sentry_sdk was init()ed (and therefore before the OTel instrumentation ran), it would not be instrumented. Th...
def _patch_remaining_classes(original_classes): # type: (Dict[str, type]) -> None # check which classes have actually been instrumented instrumented_classes = {} for package in list(original_classes.keys()): original_path = CLASSES_TO_INSTRUMENT[package] try: cls = _import_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _record_unpatched_classes():\n # type: () -> Dict[str, type]\n installed_packages = _get_installed_modules()\n\n original_classes = {}\n\n for package, orig_path in CLASSES_TO_INSTRUMENT.items():\n if package in installed_packages:\n try:\n original_cls = _import_by...
[ "0.7268333", "0.6414055", "0.6173626", "0.5864623", "0.5856952", "0.579441", "0.574507", "0.5737741", "0.5699048", "0.5694578", "0.5617746", "0.553665", "0.54914683", "0.5490592", "0.5467694", "0.5413526", "0.5408118", "0.5367441", "0.53309304", "0.53181887", "0.5274783", "...
0.79764235
0
Applies batch normalization to this layer. Batch normalization must be deleted from the dnn afterwards and layers which were connected to the batch norm must be connected to this layer.
def apply_batch_normalization(self, layer): if type(layer) is not BatchNormalization: raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.') self._internal.apply_batch_normalization(layer._internal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def batch_norm(inputs, training, data_format):\n # We set fused=True for a significant performance boost. See\n # https://www.tensorflow.org/performance/performance_guide#common_fused_ops\n return tf.compat.v1.layers.batch_normalization(\n inputs=inputs, axis=1 if data_format == 'channels_first' else 3,\n ...
[ "0.71378446", "0.7028209", "0.699852", "0.6959648", "0.6896941", "0.68904907", "0.68822706", "0.66368127", "0.66204464", "0.65987813", "0.659512", "0.65843093", "0.6543911", "0.65430003", "0.65364105", "0.65344566", "0.65333307", "0.65062654", "0.6484016", "0.6480447", "0.645...
0.7985232
0
Gets the free term vector, of element_count length.
def free_term(self): return Blob.Blob(self._internal.get_free_term())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zero_free_term(self):\n return self._internal.get_zero_free_term()", "def f_vector(self):\n try:\n return self._f_vector\n except AttributeError:\n self._f_vector = vector(ZZ,[len(x) for x in self.face_lattice().level_sets()])\n return self._f_vector", ...
[ "0.6391553", "0.6213692", "0.58426434", "0.5717408", "0.5716961", "0.5586635", "0.55293155", "0.5515443", "0.54636407", "0.54122466", "0.5402079", "0.5396786", "0.5375249", "0.53743243", "0.536684", "0.536628", "0.5319798", "0.52847767", "0.52813905", "0.52696437", "0.5250845...
0.6329039
1
Get model last response
def get_model_api_last_response(self): return self._last_response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_api_last_response_id(self):\n return self._last_response_id", "def last(self):\n data = self._http_get(\"last\")\n return data.json()", "def get_latest_model():\n return get_models()[-1]", "def get_last_result(self):\n return self.last_result", "def test_get_las...
[ "0.73073435", "0.6988277", "0.67950827", "0.67702544", "0.65754414", "0.6431394", "0.6355076", "0.6326154", "0.63088065", "0.62795436", "0.624241", "0.62157893", "0.61976624", "0.61938447", "0.6164053", "0.61567557", "0.6153006", "0.6150398", "0.61497515", "0.61475986", "0.61...
0.8733788
0
Get last model response ID
def get_model_api_last_response_id(self): return self._last_response_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model_api_last_response(self):\n return self._last_response", "def getLastObjectId(self):\n return self.objId", "def latest_id(self):\n return self.checkpoints[-1]", "def _get_id(self):\n return self.id", "def get_id(self):\n pass", "def get_id(self):\n p...
[ "0.75364536", "0.69300216", "0.6603426", "0.6570031", "0.6540917", "0.6540917", "0.6540917", "0.6540917", "0.6533437", "0.6508722", "0.65032995", "0.64846146", "0.64710206", "0.64190453", "0.6388252", "0.6376812", "0.6371169", "0.63536155", "0.6344545", "0.6344545", "0.634454...
0.8848147
0
Pad an image up to the target size.
def pad_image(img, target_size): rows_missing = target_size[0] - img.shape[2] cols_missing = target_size[1] - img.shape[3] padded_img = np.pad(img, ((0, 0), (0, 0), (0, rows_missing), (0, cols_missing)), 'constant') return padded_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resize_and_pad(img, target_size): \n max_shape = np.max(img.shape)\n max_index = np.argmax(img.shape)\n if img.shape[1] < target_size and img.shape[2] < target_size: \n #interpolate\n factor = float(target_size)/max_shape\n scaled_img = misc.imresize(img, (max(1,int(img.shap...
[ "0.75301945", "0.7414062", "0.73439056", "0.72765875", "0.7182036", "0.7022596", "0.6945628", "0.6880894", "0.6790839", "0.6768397", "0.6740291", "0.6721193", "0.66918796", "0.6651381", "0.65719867", "0.6508936", "0.6486176", "0.64239997", "0.64179003", "0.637561", "0.6374147...
0.83163095
0
Calcute the confusion matrix by given label and pred
def get_confusion_matrix(gt_label, pred_label, class_num): index = (gt_label * class_num + pred_label).astype('int32') label_count = np.bincount(index) confusion_matrix = np.zeros((class_num, class_num)) for i_label in range(class_num): for i_pred_label in range(class_num): c...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confusion_matrix(label, pred, num_class, ignore=255):\n\toutput = pred.cpu().numpy().transpose(0, 2, 3, 1)\n\t#mask = label.cpu().numpy().transpose(0, 2, 3, 1)\n\tseg_pred = np.asarray(np.argmax(output, axis=3), dtype=np.uint8)\n\t#seg_gt = np.asarray(np.argmax(mask, axis=3), dtype=np.int)\n\tseg_gt = labe...
[ "0.7771614", "0.75971806", "0.7573982", "0.7315963", "0.730737", "0.7280311", "0.71592414", "0.71516883", "0.71338004", "0.7126695", "0.7106781", "0.6955897", "0.6951744", "0.69315773", "0.69232666", "0.6904852", "0.6879925", "0.6878973", "0.6876468", "0.6837224", "0.6835314"...
0.7601832
1
Try to create a track, with a bad source_id. Returns a 404 response with detail message.
def test_create_with_bad_id(self): # Count the number of records before the save existing_records_count = Track.objects.all().count() post_data = {'source_type': 'spotify', 'source_id': 00} resp = self.api_client.post('/api/metadata/tracks/', data=post_data) data = json.loads(res...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_with_bad_backend(self):\n # Count the number of records before the save\n post_data = {\n 'source_type': 'test',\n 'source_id': '4bCOAuhvjsxbVBM5MM8oik',\n }\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json...
[ "0.7134957", "0.65041846", "0.6113626", "0.5729739", "0.55509627", "0.5464614", "0.5420374", "0.5404015", "0.54009336", "0.5392408", "0.5372617", "0.53524536", "0.53095245", "0.53046906", "0.53046906", "0.53046906", "0.53046906", "0.5304292", "0.5304292", "0.5304292", "0.5272...
0.7263801
0
Remove a track from the database Returns a successful response, with a detail message.
def test_delete(self): # Count the number of records before the save existing_records_count = Track.objects.all().count() resp = self.api_client.delete('/api/metadata/tracks/2/') data = json.loads(resp.content) new_records_count = Track.objects.all().count() # Ensure req...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_music():\n track_id = request.vars.track_id\n if track_id is None:\n raise HTTP(500)\n db(db.track_data.track_id == track_id).delete()\n return \"ok\"", "def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n ...
[ "0.6953594", "0.6746213", "0.65705466", "0.63468224", "0.6179682", "0.6166458", "0.6075665", "0.60428214", "0.59959084", "0.5920831", "0.5845369", "0.58197093", "0.5800224", "0.5790172", "0.5773988", "0.5727949", "0.57239854", "0.5679066", "0.56446403", "0.5642042", "0.563766...
0.6781372
1
Load (selected) image(s) from zip archive file
def load_zip_archive(archive_file, file_list=None, suffix_list=['jpeg', 'jpg', 'png'], as_float=False): if type(file_list) in [str]: file_list = [file_list] if type(suffix_list) in [str]: suffix_list = [suffix_list] if suffix_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_zipped_image(zipfilename):\n\n # Read each image and append in a list\n img = []\n filenames = []\n with ZipFile(zipfilename) as archive:\n for entry in archive.infolist():\n with archive.open(entry) as file:\n tmp = Image.open(file)\n img.append...
[ "0.76349974", "0.6311234", "0.6153032", "0.59051687", "0.5845452", "0.5826613", "0.5734629", "0.5730039", "0.5701617", "0.5680505", "0.5641235", "0.56268257", "0.5621598", "0.56099635", "0.55637586", "0.5561061", "0.55251", "0.5472375", "0.54687905", "0.5451084", "0.5445658",...
0.67017627
1
Create a Meter from dict object.
def from_dict(cls, d: Dict, method: SerializationType = SerializationType.Status) -> "Meter": name = d["name"] min_value = d["min_value"] max_value = d["max_value"] meter = Meter( name=name, min_value=min_value, max_value=max_value, ) i...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_dict(cls, d):\n return loadd(d, cls)", "def from_dict(cls, d):\n return cls(**d)", "def from_dict(cls, data):\n return cls(**data)", "def fromdict(cls,datadict):\n return cls(fmetric=datadict.get('fmetric'),\n fhost=datadict.get('fhost'),\n ...
[ "0.6572882", "0.6530952", "0.6431576", "0.63849896", "0.6335211", "0.630731", "0.6291888", "0.6274567", "0.62618166", "0.62144405", "0.6184855", "0.6163284", "0.61541444", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.61479396", "0.614...
0.80224687
0
Check that all valence terms have been assigned.
def _check_all_valence_terms_assigned( handler, assigned_terms, topology, valence_terms, ): if len(assigned_terms) == len(valence_terms): return # Convert the valence term to a valence dictionary to make sure # the order of atom indices doesn't matter for comparison. valence_ter...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def _var_check(self):\n missing = set()\n for v in self.variables:\n if getattr(self, v) is None:\n missing.add(v)\n self.missing = missing", "def _check_assigned(self):\n\n ...
[ "0.6390863", "0.6386517", "0.6137061", "0.61295885", "0.61179125", "0.60907435", "0.60848904", "0.5932524", "0.5888458", "0.58481896", "0.5843493", "0.57511234", "0.5699283", "0.569033", "0.56810856", "0.56619775", "0.5629971", "0.561073", "0.5604263", "0.55888325", "0.557981...
0.7500235
0
Return a subset of `supported_parameters` that are meant to be included in potentials.
def potential_parameters(cls): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def supported_parameters(cls):\n raise NotImplementedError()", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def potential_parameters(cls):\n return [\"k\", \"periodicity\", \"phase\", \"idivf\"]", "def supported_parameters(cls):\n retu...
[ "0.61632377", "0.60749733", "0.60749733", "0.6045904", "0.5958558", "0.59476894", "0.5931817", "0.5903901", "0.5861894", "0.58335835", "0.5776701", "0.5754737", "0.56521624", "0.5636197", "0.56098294", "0.5578262", "0.55694485", "0.55178815", "0.5493202", "0.5493202", "0.5449...
0.63488674
0
Creates a RFXtrxDSMR asyncio protocol.
def create_rfxtrx_dsmr_protocol(dsmr_version, telegram_callback, loop=None, **kwargs): protocol = _create_dsmr_protocol(dsmr_version, telegram_callback, RFXtrxDSMRProtocol, loop, **kwargs) return protocol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfxtrx_tcp_dsmr_reader(host, port, dsmr_version,\n telegram_callback, loop=None,\n keep_alive_interval=None):\n if not loop:\n loop = asyncio.get_event_loop()\n protocol, _ = create_rfxtrx_dsmr_protocol(\n dsmr_version...
[ "0.59781486", "0.5941775", "0.5809846", "0.57470286", "0.5497206", "0.5259741", "0.5149718", "0.51136464", "0.50592905", "0.49969345", "0.49969345", "0.49862215", "0.49718344", "0.49435478", "0.49141228", "0.48773542", "0.48727253", "0.4815847", "0.4808103", "0.4757261", "0.4...
0.6974144
0
Creates a DSMR asyncio protocol coroutine using a RFXtrx serial port.
def create_rfxtrx_dsmr_reader(port, dsmr_version, telegram_callback, loop=None): protocol, serial_settings = create_rfxtrx_dsmr_protocol( dsmr_version, telegram_callback, loop=None) serial_settings['url'] = port conn = create_serial_connection(loop, protocol, **serial_settings) return conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfxtrx_tcp_dsmr_reader(host, port, dsmr_version,\n telegram_callback, loop=None,\n keep_alive_interval=None):\n if not loop:\n loop = asyncio.get_event_loop()\n protocol, _ = create_rfxtrx_dsmr_protocol(\n dsmr_version...
[ "0.65169317", "0.61707854", "0.6049925", "0.59440464", "0.5872075", "0.55788165", "0.55407685", "0.55030054", "0.54878443", "0.54046285", "0.535371", "0.53511745", "0.5315994", "0.53064007", "0.52906173", "0.52837414", "0.5231402", "0.52189577", "0.5208599", "0.5160057", "0.5...
0.70299643
0
Creates a DSMR asyncio protocol coroutine using a RFXtrx TCP connection.
def create_rfxtrx_tcp_dsmr_reader(host, port, dsmr_version, telegram_callback, loop=None, keep_alive_interval=None): if not loop: loop = asyncio.get_event_loop() protocol, _ = create_rfxtrx_dsmr_protocol( dsmr_version, telegram_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rfxtrx_dsmr_reader(port, dsmr_version, telegram_callback, loop=None):\n protocol, serial_settings = create_rfxtrx_dsmr_protocol(\n dsmr_version, telegram_callback, loop=None)\n serial_settings['url'] = port\n\n conn = create_serial_connection(loop, protocol, **serial_settings)\n retur...
[ "0.65162337", "0.62807643", "0.6247426", "0.5982679", "0.5889128", "0.5806483", "0.57464033", "0.5730266", "0.566644", "0.5604687", "0.5599714", "0.5582977", "0.5576975", "0.55476266", "0.5544629", "0.5533422", "0.5531221", "0.54850537", "0.54660755", "0.5438965", "0.5341547"...
0.72272205
0
takes two images tilted with respect to one another and tries to find overlap img1 (as numpy array) img2 (as numpy array) tiltdiff (in degrees) negative, img1 is more compressed (tilted) positive, img2 is more compressed (tilted) picks1, list of particles picks for image 1
def getTiltedCoordinates(img1, img2, tiltdiff, picks1=[], angsearch=True, inittiltaxis=-7.2, msg=True): t0 = time.time() #shrink images bin = 2 binned1 = apImage.binImg(img1, bin) binned2 = apImage.binImg(img2, bin) #apImage.arrayToJpeg(binned1, "binned1.jpg") #apImage.arrayToJpeg(binned2, "binned2.jpg") filt1 ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTiltedRotateShift(img1, img2, tiltdiff, angle=0, bin=1, msg=True):\n\n\t### untilt images by stretching and compressing\n\t# choose angle s/t compressFactor = 1/stretchFactor\n\t# this only works if one image is untilted (RCT) of both images are opposite tilt (OTR)\n\t#halftilt = abs(tiltdiff)/2.0\n\thalfti...
[ "0.65603113", "0.620282", "0.6117331", "0.6108788", "0.59726435", "0.5869135", "0.57968843", "0.5794731", "0.5768454", "0.57521975", "0.5680387", "0.5629242", "0.5622987", "0.5622519", "0.5594791", "0.55890703", "0.55723226", "0.55641454", "0.5550029", "0.5542601", "0.5538139...
0.7586962
0
takes two images tilted with respect to one another and tries to find overlap img1 (as numpy array) img2 (as numpy array) tiltdiff (in degrees) negative, img1 is more compressed (tilted) positive, img2 is more compressed (tilted)
def getTiltedRotateShift(img1, img2, tiltdiff, angle=0, bin=1, msg=True): ### untilt images by stretching and compressing # choose angle s/t compressFactor = 1/stretchFactor # this only works if one image is untilted (RCT) of both images are opposite tilt (OTR) #halftilt = abs(tiltdiff)/2.0 halftiltrad = math.aco...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTiltedCoordinates(img1, img2, tiltdiff, picks1=[], angsearch=True, inittiltaxis=-7.2, msg=True):\n\tt0 = time.time()\n\t#shrink images\n\tbin = 2\n\tbinned1 = apImage.binImg(img1, bin)\n\tbinned2 = apImage.binImg(img2, bin)\n\t#apImage.arrayToJpeg(binned1, \"binned1.jpg\")\n\t#apImage.arrayToJpeg(binned2, \...
[ "0.7282096", "0.634349", "0.6294025", "0.6291031", "0.62548995", "0.62249696", "0.62239516", "0.6202705", "0.61410165", "0.6133832", "0.6100082", "0.608749", "0.6062438", "0.60573244", "0.6006357", "0.5999533", "0.59512717", "0.5941091", "0.59402233", "0.5899524", "0.58825576...
0.70631665
1
rotates then stretches or compresses an image only along the xaxis
def transformImage(img, xfactor, angle=0, msg=False): if xfactor > 1.0: mystr = "_S" else: mystr = "_C" if msg is True: if xfactor > 1: apDisplay.printMsg("stretching image by "+str(round(xfactor,3))) else: apDisplay.printMsg("compressing image by "+str(round(xfactor,3))) ### image has swapped coordi...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self, previousimage):", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def _image_transfor...
[ "0.65015787", "0.64518785", "0.6217382", "0.6127605", "0.60567856", "0.60441583", "0.6004752", "0.59420073", "0.59081256", "0.5899423", "0.58687973", "0.584643", "0.5801105", "0.57971156", "0.57939214", "0.5779098", "0.57559", "0.57457566", "0.5745267", "0.5711234", "0.570488...
0.6457804
1
Checks if name either starts with '/' or it is the last component of a stream.
def check_name(self, name: str): if name[0] == "/" or self.check_end_streaming(name): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_streaming(self, arg: str):\n if not arg:\n return False\n elif arg.startswith(\"sdo:\"):\n print(\"[check_streaming] File is for streaming\")\n tmp_list = arg.splitlines()\n tmp_list.pop(0)\n for x in tmp_list:\n if self....
[ "0.6286656", "0.61531717", "0.6124441", "0.6101799", "0.6014477", "0.598726", "0.58650726", "0.5787407", "0.57717556", "0.57680005", "0.5682801", "0.5670426", "0.5670426", "0.56299", "0.5595348", "0.55865085", "0.5574163", "0.55718523", "0.55560887", "0.5553138", "0.553181", ...
0.81663287
0
Checks if the file is in the streaming format.
def check_streaming(self, arg: str): if not arg: return False elif arg.startswith("sdo:"): print("[check_streaming] File is for streaming") tmp_list = arg.splitlines() tmp_list.pop(0) for x in tmp_list: if self.check_name(x) is ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_streaming(self) -> bool:\n return self.stream_status == StreamStatus.STREAMING", "def is_stream(self):\r\n return self.stream", "def is_streaming(self):\n if self.dev:\n e = ctypes.POINTER(rs_error)()\n is_streaming = lrs.rs_is_device_streaming(self.dev, ctypes...
[ "0.7130998", "0.710802", "0.6719451", "0.6635441", "0.6616881", "0.6497147", "0.6312404", "0.6073387", "0.60378087", "0.5994497", "0.5983883", "0.5969729", "0.5962305", "0.5950484", "0.5886839", "0.5875284", "0.58541244", "0.58405656", "0.58238244", "0.5818902", "0.57951295",...
0.76708
0
Checks if interest is in the buffer and returns the content object if it is.
def check_buffer(self, interest_name: str): if str(interest_name) in self.get_next_buffer: return self.get_next_buffer[str(interest_name)] else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n ...
[ "0.611804", "0.5382398", "0.53668123", "0.53386456", "0.5272251", "0.51945955", "0.5134532", "0.51309985", "0.5036701", "0.501785", "0.49557382", "0.4912878", "0.4908423", "0.48984125", "0.48739424", "0.4873825", "0.48655587", "0.48433942", "0.4822918", "0.4822918", "0.482010...
0.5786912
1
Checks if the content from content_object corresponds to the interest requested with content_name. If the content is not relevant for this computation it is put to self.queue_from_lower again.
def check_for_correct_content(self, content_object: Content, content_name: str): if isinstance(content_name, Name): content_name = content_name.to_string() # inner comp is a name instead of a string # outter comp starts with sdo:\n elif content_name.startswith("sdo:\n"): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n ...
[ "0.5501636", "0.516063", "0.4942079", "0.48911357", "0.48543528", "0.48305845", "0.48142743", "0.47831565", "0.47767526", "0.47362074", "0.47147942", "0.464042", "0.4610563", "0.45970887", "0.45742327", "0.4517563", "0.4517236", "0.45113727", "0.44908625", "0.44677654", "0.44...
0.8077127
0
Checks if a metatitle is present (ends with '/streaming/p').
def check_for_metatitle(self, interest_name: str): if interest_name.endswith("/streaming/p*"): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def valid_title...
[ "0.6491945", "0.63269085", "0.63049346", "0.60652703", "0.5991051", "0.5728946", "0.57284164", "0.56637734", "0.56167585", "0.5610711", "0.55762154", "0.55655116", "0.5517774", "0.55089664", "0.5489789", "0.54672796", "0.546499", "0.546499", "0.5458705", "0.5458184", "0.54503...
0.8413392
0
Gets the negative amount of digits after the '/streaming/p' on the single name.
def get_amount_of_digits(self, name: str): x = -1 while name[x - 1].isdigit(): x -= 1 if name[:x].endswith("/streaming/p"): return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def return_episode_num(name):\n return int(name.split(\".\")[0].split(\"ep_\")[1]) # Use split to return only the episode number needed to sort the files in increasing order", "def extract_journal(name):\n match = re.search(\"\\d+\", name)\n if match != None: \n return name[:match.start()], int(...
[ "0.6315995", "0.6267976", "0.5928007", "0.58780754", "0.5774592", "0.5626339", "0.5619285", "0.559705", "0.5593243", "0.55860585", "0.5539551", "0.55384886", "0.5524113", "0.5524029", "0.55239666", "0.55148023", "0.5513144", "0.5452538", "0.5429969", "0.5423451", "0.54014844"...
0.8547603
0
Gets the name for the next part with help of the negative amount of digits (get_amount_of_digits).
def get_following_name(self, name: Name): name = str(name) amount_of_digits = self.get_amount_of_digits(name) number = int(name[amount_of_digits:]) number += 1 following_name = name[:amount_of_digits] following_name += str(number) return following_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_partname(self):\n return '{0:03.0f}{1}'.format(self.lon1, self.part)", "def get_suffix(self):\n return '%s%d' % (self.disk.devletters(), self.get_index() + 1)", "def find_next_name(self, etfName):\n etfName = etfName.split('-')[0]\n max_n = max(list(map(lambda x: int(x.s...
[ "0.6544332", "0.63715714", "0.61771786", "0.61768734", "0.6149106", "0.60808223", "0.6070014", "0.6048259", "0.6003679", "0.59391767", "0.58499116", "0.5833618", "0.58285874", "0.5824863", "0.58153534", "0.58108354", "0.57850534", "0.577331", "0.57347023", "0.57323456", "0.57...
0.6799442
0
Gets content from the queue from lower and checks if the result is a list with the packetid on the first entry and the content object on the second entry
def get_content_from_queue_from_lower(self): queue_from_lower_entry = self.queue_from_lower.get() if isinstance(queue_from_lower_entry, list): if isinstance(queue_from_lower_entry[1], Nack): print("NACK:", queue_from_lower_entry[1].interest, queue_from_lower_entry[1].reason) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n ...
[ "0.555139", "0.5506232", "0.54767245", "0.5419503", "0.53994626", "0.5372635", "0.52298725", "0.5192108", "0.518906", "0.51844054", "0.51400787", "0.51395917", "0.51212037", "0.5103552", "0.5094152", "0.50883275", "0.5030852", "0.4973149", "0.4923798", "0.4916206", "0.4909990...
0.7497363
0
Handles getting the content. Checks if the name is present in the buffer otherwise gets it from the queue_from_lower. If result is a metatitle, the following part is retrieved.
def get_content(self, next_name: str): buffer_output = self.check_buffer(next_name) if buffer_output: print("[get_next_content] Resulting content object out of the buffer:", buffer_output.name, buffer_output.content) resulting_content_object = buffer_output result = b...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content_from_queue_from_lower(self):\n queue_from_lower_entry = self.queue_from_lower.get()\n if isinstance(queue_from_lower_entry, list):\n if isinstance(queue_from_lower_entry[1], Nack):\n print(\"NACK:\", queue_from_lower_entry[1].interest, queue_from_lower_entry[...
[ "0.6432524", "0.59715605", "0.57843566", "0.575296", "0.57495564", "0.55204153", "0.5494455", "0.5369599", "0.53649986", "0.52783656", "0.5223427", "0.5204933", "0.5191491", "0.5146155", "0.51180285", "0.5115465", "0.51074815", "0.50316614", "0.5002304", "0.50022376", "0.4946...
0.69451576
0
get next for the single name case. Before returning the result the next name get already put into the queue_to_lower. The first name is the only one which is put into the queue immediately before requesting.
def get_next_single_name(self, arg: str): current_name = arg if self.get_next_part_counter == 0: self.sent_interests[str(current_name)] = True self.queue_to_lower.put((self.packetid, Interest(current_name))) result = self.get_content(current_name) if self.check_en...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and nex...
[ "0.6534286", "0.6476175", "0.60643685", "0.59021914", "0.5780743", "0.5695691", "0.5597527", "0.5528981", "0.5254184", "0.5216752", "0.5201601", "0.5185522", "0.5162252", "0.51276624", "0.51116544", "0.5095773", "0.5073627", "0.50501657", "0.50407803", "0.5026237", "0.5020459...
0.66704434
0
get next for the multiple name case. Before returning the result the next name get already put into the queue_to_lower. The first name is the only one which is put into the queue immediately before requesting.
def get_next_multiple_names(self, arg: str): self.initialize_get_next_multiple(arg) if self.pos_name_list_multiple < len(self.name_list_multiple)-1: current_name = self.name_list_multiple[self.pos_name_list_multiple] # Only first call puts two names (current_name and next_name) i...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if se...
[ "0.62946945", "0.6215512", "0.6121404", "0.6006907", "0.56078154", "0.553223", "0.5502037", "0.53517884", "0.53446215", "0.5037705", "0.502239", "0.50016195", "0.4936732", "0.49000195", "0.48903698", "0.48809487", "0.4872933", "0.48728496", "0.48441896", "0.4841367", "0.48091...
0.7067897
0
get_next for the classic multiple name case. The name only gets put in the queue_to_lower before requesting it.
def get_next_multiple_names_classic(self, arg: str): self.initialize_get_next_multiple(arg) if self.pos_name_list_multiple < len(self.name_list_multiple)-1: current_name = self.name_list_multiple[self.pos_name_list_multiple] self.sent_interests[str(current_name)] = True ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_multiple_names(self, arg: str):\n self.initialize_get_next_multiple(arg)\n if self.pos_name_list_multiple < len(self.name_list_multiple)-1:\n current_name = self.name_list_multiple[self.pos_name_list_multiple]\n # Only first call puts two names (current_name and nex...
[ "0.73124295", "0.6605842", "0.64387214", "0.63707966", "0.6359002", "0.633987", "0.59783775", "0.5580597", "0.5580597", "0.5559593", "0.5515206", "0.5506333", "0.54304075", "0.53769016", "0.5359713", "0.5350913", "0.53147024", "0.5285153", "0.5285153", "0.5285153", "0.521028"...
0.6632271
1
Transform the inner name to correct syntax so it can be parsed. Replaces first and last '=' with an '"' and the '' with an '_'.
def transform_inner(self, arg: str): first = arg.find("=") last = len(arg) - arg[::-1].find("=") - 1 hash = arg.find("#") arg = list(arg) arg[first] = '"' arg[last] = '"' arg[hash] = "_" arg = "".join(arg) return arg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_name():\n def _extract_name(quoted_name):\n return e.String(quoted_name.subexpression.name)\n yield (\"(λ &[name] . str)\", _extract_name)", "def make_python_name(self, name):\n # FIXME see cindex.SpellingCache\n for k, v in [('<', '_'), ('>', '_'), ('::', '__')...
[ "0.6225861", "0.6090764", "0.60897714", "0.59808266", "0.59418184", "0.5897039", "0.5874254", "0.5845735", "0.582517", "0.5824621", "0.58186024", "0.5805057", "0.57880336", "0.57758003", "0.57509226", "0.57499963", "0.57029814", "0.57001", "0.5684109", "0.56642014", "0.566280...
0.7548356
0
Encodes the name components so it can be handled from the lower layers.
def encode_name_components(self, name: Name): first_quot = False new_component = "" for component in name.components: if '"' in str(component): if first_quot is True: new_component += str(component) first_quot = False ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode_name(param):\n sname = param\n # replace all kind of unwanted chars in a python dictname.\n sname = sname.strip()\n for ch in ['/', ' + ', ' ', '#', '&', '-', ',', '+', ]:\n if ch in sname:\n sname = sname.replace(ch, \"_\")\n\n # replace brackets\n for ch in ['(', ')...
[ "0.6692533", "0.6645358", "0.6490253", "0.64649105", "0.63855964", "0.62322897", "0.6221056", "0.6217053", "0.61058635", "0.6068496", "0.6068496", "0.6068496", "0.6068496", "0.6068496", "0.6014892", "0.60105383", "0.6001321", "0.6001321", "0.6001321", "0.59878004", "0.5968081...
0.75535154
0
Handles the inner computation part from get_next. Transforms and encodes the name and puts it into the queue_to_lower and calls get_content() to retrieve the result.
def get_next_inner_computation(self, arg: str): print("[get_next - inner computation] starts here.") # Start of transformation and component encoding name_str = self.transform_inner(arg) # print("[get_next - inner computation] after transform:", arg) name_after_transform = Name(n...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_content(self, next_name: str):\n buffer_output = self.check_buffer(next_name)\n if buffer_output:\n print(\"[get_next_content] Resulting content object out of the buffer:\", buffer_output.name, buffer_output.content)\n resulting_content_object = buffer_output\n ...
[ "0.6603064", "0.61165065", "0.5809198", "0.5797553", "0.55958676", "0.51660335", "0.50432974", "0.50409937", "0.49529454", "0.48004133", "0.47770488", "0.47690475", "0.47548085", "0.47340557", "0.47298717", "0.470084", "0.46876696", "0.46718737", "0.4669386", "0.46650812", "0...
0.7423148
0
The get_next function which is used for the named functions. This function handles getting the desired content according to its case. Two cases are possible. The multi name case for getting the next part if the length of the stream is given. The handling of an inner computation where the name has to be changed to thr c...
def get_next(self, arg: str): if self.check_for_singlename(arg): if self.classic is False: return self.get_next_single_name(arg) else: return self.get_next_single_name_classic(arg) if self.check_streaming(arg): if self.classic is False:...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_single_name(self, arg: str):\n current_name = arg\n if self.get_next_part_counter == 0:\n self.sent_interests[str(current_name)] = True\n self.queue_to_lower.put((self.packetid, Interest(current_name)))\n result = self.get_content(current_name)\n if se...
[ "0.68529546", "0.6562418", "0.6089761", "0.6070447", "0.6034299", "0.5983131", "0.5961039", "0.5956156", "0.5956156", "0.59460646", "0.58795226", "0.58600503", "0.5786577", "0.5783287", "0.57088214", "0.57027763", "0.57027763", "0.5626713", "0.5601194", "0.5573359", "0.549744...
0.73879164
0
The write_out function which is used for the named functions. Stores content object as parts into the content store. Before the first element is stored a meta title is stored into the content store so the node who gets this content object can detect and start the stream.
def write_out(self, content_content: str): print("[write_out] Computation name: ", self.comp_name) # meta_title_content object creation to return as a first part if self.write_out_part_counter < 0: metatitle_content = Content(self.comp_name, "sdo:\n" + str(self.comp_name) + "/streami...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, content):\n ...", "def write(self, content):\n pass", "def write(self, out):", "def last_write_out(self):\n end_name = self.comp_name\n self.write_out_part_counter += 1\n end_name += \"/streaming/p\" + str(self.write_out_part_counter)\n end_stream...
[ "0.6725428", "0.63619083", "0.6285846", "0.6174999", "0.59593", "0.58944917", "0.58546984", "0.584093", "0.5826792", "0.58217865", "0.58128077", "0.58128077", "0.5803597", "0.5695644", "0.5638194", "0.5635738", "0.5605294", "0.55881625", "0.55847704", "0.5579301", "0.5565279"...
0.78408104
0
The last_write_out function which is used for the named functions.
def last_write_out(self): end_name = self.comp_name self.write_out_part_counter += 1 end_name += "/streaming/p" + str(self.write_out_part_counter) end_streaming_content_object = Content(end_name, "sdo:endstreaming") self.cs.add_content_object(end_streaming_content_object) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLastFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def lastsave(self):\r\n return self.format_inline('LASTSAVE')", "def hook_output(self, name: str, func: Callable[[\"self.value_type\"], None]):\n self.write_hooks[name] = func", "def get_output(self, last = 1):\...
[ "0.6481691", "0.58430004", "0.5758233", "0.5632203", "0.5593506", "0.54959834", "0.5467705", "0.54547566", "0.5439232", "0.5418257", "0.53904366", "0.5386019", "0.5384885", "0.5378884", "0.53705084", "0.5370292", "0.53638005", "0.5259693", "0.5248828", "0.52414453", "0.522801...
0.62631506
1
Streaming function for inner nodes. Runs get_next and writes out the result until end of stream is reached.
def write_out_on_get_next(self, arg: Name): res = self.get_next(arg) while res and self.check_end_streaming(res) is False: self.write_out(res) res = self.get_next(arg) self.last_write_out()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _next(self):\n node = self.head\n while node != None:\n yield node.data\n node = node.right", "def next():", "def next():", "def __next__(self):\n return next(self.stream_chunker)", "def __next__(self):\n return next(self.buffered_streamer)", "def __i...
[ "0.6188734", "0.61485165", "0.61485165", "0.6088194", "0.6049867", "0.5947307", "0.5947307", "0.5915881", "0.5863198", "0.5844089", "0.5831229", "0.58085644", "0.57978195", "0.57714903", "0.5739277", "0.5733169", "0.5733169", "0.5733169", "0.5733169", "0.5733169", "0.5733169"...
0.66700655
0
Perform timeout. Resets the info counter of API calls done since the last timeout, as well as the session
def doTimeout(self): log.info('Executed {} calls until timeout'.format(self.calls_to_timeout)) self.calls_to_timeout = 0 self.resetSession() time.sleep(self.timeout)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_timeout(self):\n self.logger.debug('id=%d, Session timed out!', self.id)\n self.close(SessionCloseErrorCode.SESSION_DIED)", "def reset_time_out(self):\n self.reconnect()\n self.reconnect_params()", "def timeout(self):\n self._status_update(\"Pyloton: Timeout\")\n ...
[ "0.644774", "0.6438548", "0.6436007", "0.631921", "0.6305379", "0.62621534", "0.6204743", "0.61857486", "0.6181763", "0.61060214", "0.6093841", "0.6060867", "0.60562134", "0.5959294", "0.59495", "0.59370124", "0.59354204", "0.59328425", "0.59189075", "0.5898101", "0.58769494"...
0.76772344
0
Gets a new API key, configured with it's correspondent access rights. If all keys are already used, start from the first one.
def getNextApiKey(self): self.resetSession(get_new_api_key=False) if self.key_idx == len(self.api_keys): self.key_idx = 0 self.session.auth = (self.api_keys[self.key_idx][0], '') self.number_of_max_req = self.api_keys[self.key_idx][1] self.key_idx += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regenerate_API_key(self) -> None:\n session = create_session()\n new_key = generate_random_string(24)\n # Check if there is any user with exact same API key as just generated\n if new_key not in session.query(User.API_KEY).all():\n self.API_KEY = new_key\n sess...
[ "0.6455453", "0.63159895", "0.6125672", "0.6044153", "0.60338324", "0.5977004", "0.5949956", "0.5945266", "0.5931769", "0.5898856", "0.5832408", "0.5827095", "0.58270764", "0.5792817", "0.57863086", "0.5750034", "0.5749857", "0.57467425", "0.5730262", "0.5726184", "0.57043725...
0.6538258
0
Number of additions to favorites.
def count_favorite(self, obj): return obj.recipe_fav.count()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_favourites_page_count(self, page, page_size):\n total_roms = Roms(self._connection).page_size(page_size).page_offset(page).get_count()\n return int(float(total_roms) / page_size)", "def get_number_watched(self):\n movies_watched = 0\n for movie in self.movies:\n if ...
[ "0.69606006", "0.6493855", "0.6325194", "0.6325194", "0.6303647", "0.62787765", "0.62787765", "0.62787765", "0.62787765", "0.62150556", "0.6180549", "0.6174093", "0.61588585", "0.6152416", "0.6150293", "0.61248976", "0.6096311", "0.6087514", "0.60759777", "0.60732627", "0.606...
0.7623228
0
Move files/folders from source path to the show's directory Directory is specified by user, or if possible, loaded from data.json
def move_files(): if "defaultdirectory" in data: print("\n*** Default video source directory:", data["defaultdirectory"]) srcdir = input("Enter '1' to use default video source directory\n" "Otherwise, please enter the full path where your videos are located.\n" ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n ...
[ "0.6254314", "0.60398024", "0.59097177", "0.58858806", "0.58225375", "0.5794603", "0.5771358", "0.5735009", "0.5731622", "0.5720588", "0.569069", "0.5670436", "0.56292623", "0.5626693", "0.56261504", "0.5596827", "0.5595239", "0.5590468", "0.5582738", "0.5580043", "0.55735177...
0.6373189
0
Add directories to the data dictionary User provides keywords for each show and the directory for that show. Keywords are used so 'X.Files', 'XFiles', and 'X Files' will all be matched by the keywords 'X' and 'Files'
def add_directory(): showKeywords = input("\nInput mandatory keywords for the show title seperated by a space.\n" "Example: X files\n").lower().strip() while re.search('[^A-Za-z0-9 ]+', showKeywords) or showKeywords.startswith('defaultdirectory'): showKeywords = input("Inval...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_directory_entries(self, key, config):\n # Initialize key variables\n updated = False\n dir_dict = {\n 'log_directory': 'log',\n 'ingest_cache_directory': 'cache',\n }\n directory = general.root_directory()\n\n # Setup the key value to a kn...
[ "0.55344474", "0.55103064", "0.5479557", "0.54403985", "0.54251444", "0.5390415", "0.53592753", "0.53480476", "0.53404677", "0.5294248", "0.5284923", "0.5277684", "0.5273796", "0.5273391", "0.52395517", "0.52274686", "0.52246755", "0.5173277", "0.51578385", "0.5152383", "0.51...
0.74111634
0
Remove directory from data dictionary Remove show/directory from list and update data.json
def remove_directory(): count = 1 # Creates a dict to map user selection numbers to keys of the data dict deleteDict = {} print('\n') for key in sorted(data): if not key.startswith('defaultdirectory'): print("{}. {} --> {}".format(count, key, data[key])) delet...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def anonymize(data_dir):\n\n for root, dirs, files in os.walk(dst_data_dir, topdown=False):\n for dir_name in sorted(dirs):\n dir_path = os.path.join(root, dir_name)\n json_path = os.path.join(root, dir_name, 'data.json')\n print(json_path)\n\n with open(json_p...
[ "0.6780833", "0.62113416", "0.6203642", "0.5911219", "0.58356", "0.57853866", "0.57088006", "0.5708134", "0.56424284", "0.5641765", "0.56255066", "0.5620272", "0.56011605", "0.5587325", "0.55452627", "0.5542413", "0.5533419", "0.55274105", "0.5480318", "0.54572684", "0.544358...
0.679928
0
Had sni ovoce, pokud je seznam ovoce prazdny, vola se funkce na vytvoreni ovoce
def snez (seznam_ovoce, seznam_tahu, souradnice, radky, sloupce): seznam_tahu.append(souradnice) #snezeni seznam_ovoce.pop(seznam_ovoce.index(souradnice)) #vymazani ze seznamu ovoce if (len(seznam_tahu)) == radky * sloupce: #v poli jiz neni ani jedno volne policko, konec ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cliquer_sur_unité(self):", "def mezclar_bolsa(self):", "def actualizar_velocidad(self):\r\n pass", "def uvozi(self, encoding=\"UTF-8\"):\n insert = self.stroskovno_mesto.dodajanje(stevilo=1)\n super().uvozi(encoding=encoding, insert=insert)", "def inscricao(self):\n\n return...
[ "0.62168837", "0.6099113", "0.60562164", "0.5946035", "0.5821264", "0.5761619", "0.5727161", "0.5719703", "0.5699508", "0.5662515", "0.56390893", "0.5636412", "0.5547472", "0.55309504", "0.55096954", "0.5478416", "0.5466994", "0.54400057", "0.54392844", "0.54176474", "0.53980...
0.632577
0
ensure client_authentication_required() is properly called
def test_authentication_required(self): self.auth.validate_token_request(self.request) self.mock_validator.client_authentication_required.assert_called_once_with(self.request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_replace_o_auth_client_authorization(self):\n pass", "def test_patch_o_auth_client_authorization(self):\n pass", "def requires_auth(self):\n return True", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def test_read...
[ "0.70707756", "0.7026512", "0.6958877", "0.6917434", "0.69145656", "0.68942106", "0.68654615", "0.6813348", "0.6727116", "0.6723939", "0.6688306", "0.6673083", "0.6637916", "0.6620205", "0.66194654", "0.6589787", "0.6528338", "0.6500637", "0.64950204", "0.64694965", "0.644229...
0.7124051
0
Ping (GET /ping) A simple ping to check health of the image server.
def ping(): return json_response({ 'ping': 'pong', 'version': __version__, 'imgapi': False, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ping():\n\treturn HTTPResponse(status=200)", "async def ping(self):\n uri = \"/fapi/v1/ping\"\n success, error = await self.request(\"GET\", uri)\n return success, error", "def ping():\n return ping_response()", "def ping():\n return ping_response()", "def ping():\n return...
[ "0.7904067", "0.78574514", "0.7829969", "0.7829969", "0.7829969", "0.7829969", "0.77984995", "0.7741611", "0.7618637", "0.74545103", "0.7417597", "0.72806305", "0.71756196", "0.7174422", "0.71716267", "0.7128376", "0.7119203", "0.71128374", "0.70566857", "0.7026292", "0.70162...
0.7998161
0
Select a move to make from the given board position (game_state). The algorithm uses a combination of a neural network and a Monte Carlo tree search to search the decision tree stemming from the given board position. It returns the move associated with the most visited branch stemming from the root. This method creates...
def select_move(self, game_state, return_visit_counts=False): # Start with a tree consisting of a root node only. The root node # is associated with the given board position. root = self.create_node(game_state) # If no legal moves can be made from the given board positi...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mcts_search(self, state):\n assert state.current_player() == self.player\n root = SearchNode(None, 1)\n for _ in range(self.max_simulations):\n visit_path, working_state = self._apply_tree_policy(root, state)\n if working_state.is_terminal():\n node_value = working_state.player_return...
[ "0.6655577", "0.6577943", "0.6247116", "0.62271196", "0.6220835", "0.6209169", "0.6201025", "0.61390984", "0.6119295", "0.6110233", "0.60752344", "0.6050629", "0.6048138", "0.6048097", "0.60161257", "0.60156405", "0.5996535", "0.59938693", "0.5979693", "0.59731513", "0.593674...
0.8414711
0
This method creates a tree node for the given board position and adds it to the tree structure. It will be linked to the given parent node and the given move is stored as the last move taken to produce the given game state. This is useful for trversing and updating the tree structure when other nodes are added to it.
def create_node(self, game_state, move=None, parent=None): # Pass the game state to the neural network to both evaluate the # how good the board position is and get the prior probability # distribution over possible next moves (ie the predicted distribution # of visit counts). ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AddChild(self, move, state, isGameOver):\n node = Node(Move = move, state = state, isGameOver = isGameOver, parent = self)\n self.untried_moves.remove(move) # this move is now not available\n self.child.append(node)\n return node", "def create_tree(self, tree):\n # print(s...
[ "0.7166232", "0.7162426", "0.6847326", "0.6512595", "0.6462386", "0.6367318", "0.6355977", "0.6332234", "0.6067168", "0.6048204", "0.6035471", "0.60014105", "0.59406286", "0.5914846", "0.5905294", "0.5870249", "0.5810087", "0.57710886", "0.57569855", "0.57233304", "0.5707177"...
0.7931713
0
This method evaluates the current bot against a given opponent bot by letting them play a number of games against each other. The number of games played is specified by 'num_games'. A random starting position for the games is generated if a maximum number of white and black pieces is given by the parameters 'num_white_...
def evaluate_against_bot(self, opponent_bot, num_games, num_white_pieces = None, num_black_pieces = None, max_num_of_turns = 1000): zero_bot_player = 1 score = 0 num_games_won_as_black = 0 num_games_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_against_rand_bot(self, num_games, \n num_white_pieces = None, \n num_black_pieces = None):\n print('Evaluating against random bot')\n results = self.evaluate_against_bot(self.rand_bot, num_games,\n ...
[ "0.74361", "0.6622078", "0.65925574", "0.65638083", "0.6523164", "0.6452009", "0.64427716", "0.6395758", "0.6380628", "0.6225637", "0.6210784", "0.6170021", "0.61530846", "0.614373", "0.61072975", "0.60683894", "0.6060449", "0.6058013", "0.6040808", "0.60289985", "0.602816", ...
0.85560715
0
Method to save the attributes of the current bot and the weights of its neural network under the directory given by the parameter 'prefix'
def save_bot(self, prefix="model_data/"): network_load_command = self.network.save_network(prefix) attributes = {"num_rounds" : self.num_rounds, "c" : self.c, "alpha" : self.alpha, "loss_history" : self.loss_history, ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, prefix):\n model_file = prefix + \".json\"\n weight_file = prefix + \".h5\"\n json.dump(self.model.to_json(), open(model_file, \"w\"))\n self.model.save_weights(weight_file)\n return self", "def saveWeights(self, basename, generation):\n for i,wt in enumer...
[ "0.695643", "0.67557275", "0.6676642", "0.65116364", "0.64193225", "0.64169306", "0.639074", "0.6336839", "0.6331544", "0.6298259", "0.6251713", "0.618245", "0.618245", "0.61786866", "0.61513954", "0.614478", "0.61215967", "0.6093279", "0.60930675", "0.6048004", "0.6039565", ...
0.7971842
0
Method to save the current bot as the 'old_bot' used in bot evaluation.
def save_as_old_bot(self, prefix="model_data/old_bot/"): self.save_bot(prefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_old_bot(self, prefix=\"model_data/old_bot/\"):\n self.load_bot(prefix)", "def evaluate_against_old_bot(self, num_games,\n num_white_pieces = None, \n num_black_pieces = None,\n prefix=\"model_data/old_...
[ "0.62592715", "0.60499334", "0.5963151", "0.57505435", "0.5727542", "0.5663036", "0.55086035", "0.5487756", "0.54278994", "0.53914005", "0.5381366", "0.53616077", "0.5353202", "0.5349034", "0.5345035", "0.53073835", "0.5297173", "0.52896994", "0.5284209", "0.52690077", "0.524...
0.7734785
0
Method to load the old_bot for evaluating the current bot.
def load_old_bot(self, prefix="model_data/old_bot/"): self.load_bot(prefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_against_old_bot(self, num_games,\n num_white_pieces = None, \n num_black_pieces = None,\n prefix=\"model_data/old_bot/\"):\n print('Evaluating against old bot')\n old_bot = ZeroBot(1)\n ...
[ "0.67853993", "0.63987565", "0.5985643", "0.54981875", "0.5478145", "0.53499985", "0.53470945", "0.5338737", "0.5289171", "0.5090192", "0.5078427", "0.5064622", "0.50458825", "0.50444275", "0.50434035", "0.49430028", "0.4917829", "0.49122745", "0.49122745", "0.4912221", "0.49...
0.79424274
0
Initialize a BigBiGAN from the given TF Hub module.
def __init__(self, module_path='https://tfhub.dev/deepmind/bigbigan-resnet50/1', allow_growth=True): self._module = hub.Module(module_path) # encode graph self.enc_ph = self.make_encoder_ph() self.z_sample = self.encode_graph(self.enc_ph) self.z...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, BurnExpFireP, StartNNodes, ForwBurnProb, BackBurnProb, DecayProb, Take2AmbasPrb, OrphanPrb):\n _snap.TFfGGen_swiginit(self, _snap.new_TFfGGen(BurnExpFireP, StartNNodes, ForwBurnProb, BackBurnProb, DecayProb, Take2AmbasPrb, OrphanPrb))", "def create_tokenizer_from_hub_module(self):\n ...
[ "0.5314377", "0.52984065", "0.52972853", "0.5297207", "0.5283063", "0.52782154", "0.5270465", "0.5240721", "0.51944226", "0.51933235", "0.51820195", "0.51775867", "0.516507", "0.51644117", "0.51597863", "0.51499027", "0.5143408", "0.5113054", "0.51060724", "0.50795203", "0.50...
0.6927771
0
Creates a tf.placeholder with the dtype & shape of generator inputs.
def make_generator_ph(self): info = self._module.get_input_info_dict('generate')['z'] return tf.placeholder(dtype=info.dtype, shape=info.get_shape())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_placeholders(self):\n # \"None\" means the batches may have a variable batch size and length.\n self.x = tf.placeholder(tf.int64, shape=[None, None])", "def placeholder_input():\r\n source_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, 1, S_ENGLISH, T_ENGLISH), name='source')\r\...
[ "0.73510855", "0.6947808", "0.68811524", "0.6843197", "0.6786487", "0.6739757", "0.67395514", "0.6737801", "0.6683461", "0.65602267", "0.6556576", "0.65447986", "0.65442115", "0.654158", "0.6479429", "0.64442724", "0.6391434", "0.63816166", "0.63666093", "0.6313228", "0.62930...
0.7611539
0
Creates a tf.placeholder with the dtype & shape of encoder inputs.
def make_encoder_ph(self): info = self._module.get_input_info_dict('encode')['x'] return tf.placeholder(dtype=info.dtype, shape=info.get_shape())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_placeholders(self):\n\n\n # encoder part\n self._enc_batch = tf.placeholder(tf.int32, [config.batch_size, None], name='enc_batch')\n self._enc_lens = tf.placeholder(tf.int32, [config.batch_size], name='enc_lens')\n self._enc_padding_mask = tf.placeholder(tf.float32, [config.batch_size, None], ...
[ "0.67486584", "0.6601982", "0.6598257", "0.6536034", "0.6417243", "0.6407907", "0.6378354", "0.6350807", "0.63072497", "0.6267904", "0.62510544", "0.6248996", "0.62423813", "0.62370676", "0.62248755", "0.62113446", "0.6210726", "0.6207823", "0.62027955", "0.61931306", "0.6176...
0.767189
0
Interface to numbajitted Stokes Kernels
def Stokes_Kernel_Apply_numba(source, target, forces=None, dipstr=None, dipvec=None, weights=None): weights = 1.0 if weights is None else weights weighted_weights1 = 0.25*weights/np.pi weighted_weights2 = weights/np.pi sx = source[0] sy = source[1]...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tuto_kernel_overview(optimize=True, plot=True):\r\n ker1 = GPy.kern.rbf(1) # Equivalent to ker1 = GPy.kern.rbf(input_dim=1, variance=1., lengthscale=1.)\r\n ker2 = GPy.kern.rbf(input_dim=1, variance = .75, lengthscale=2.)\r\n ker3 = GPy.kern.rbf(1, .5, .5)\r\n\r\n print ker2\r\n\r\n if plot:\r\...
[ "0.61699444", "0.5723231", "0.5606339", "0.55607325", "0.5518511", "0.55096704", "0.55096334", "0.5484832", "0.5463777", "0.5462731", "0.54323053", "0.54275984", "0.5419684", "0.5415917", "0.54151106", "0.54038984", "0.53997", "0.5388906", "0.53794676", "0.5318565", "0.530233...
0.63298035
0
Returns an op to increase the eval step for TPU evaluation.
def _increase_eval_step_op(iterations_per_loop): eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access # Estimator evaluate increases 1 by default. So, we increase the difference. return state_ops.assign_add( eval_step, math_ops.cast(iterations_per_loop - 1, dtype=eval_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TpuEvalStep(self, *args):\n with tf.name_scope('tpu_eval'):\n self._model.ConstructFPropGraph()\n per_step_eval_metrics = self._eval_metrics.PackStepMetricsForAccumulation(\n self._task.eval_metrics, args)\n return [x + y for x, y in zip(per_step_eval_metrics, args)]", "def eval_st...
[ "0.64432496", "0.63263434", "0.6229572", "0.60930693", "0.6043687", "0.5865047", "0.58446443", "0.5816877", "0.55298465", "0.5529144", "0.54990685", "0.54590845", "0.5441091", "0.54245347", "0.5421626", "0.5421626", "0.5403084", "0.5372202", "0.5353022", "0.5350679", "0.53279...
0.7544818
0
Creates a validated `TPUEstimatorSpec` instance.
def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metrics=None, export_outputs=None, scaffold_fn=None, host_call=None): host_calls = {} if eval_metrics is not None: host_ca...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_estimator_spec(self):\n host_calls = {}\n if self.eval_metrics is not None:\n host_calls['eval_metrics'] = self.eval_metrics\n if self.host_call is not None:\n host_calls['host_call'] = self.host_call\n host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)\n eval_metric_o...
[ "0.58872473", "0.56756306", "0.5586498", "0.5519904", "0.54025954", "0.5400498", "0.53641117", "0.53511024", "0.5270698", "0.52680284", "0.52115655", "0.51035535", "0.5102771", "0.50725776", "0.50706977", "0.5068895", "0.5065051", "0.50586456", "0.5047727", "0.5010918", "0.50...
0.6168227
0
Creates an equivalent `EstimatorSpec` used by CPU train/eval.
def as_estimator_spec(self): host_calls = {} if self.eval_metrics is not None: host_calls['eval_metrics'] = self.eval_metrics if self.host_call is not None: host_calls['host_call'] = self.host_call host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) eval_metric_ops = None ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_estimator_spec(features, labels, mode):\n if mode not in {\"train\", \"infer\", \"eval\"}:\n raise ValueError('mode should be in {\"train\", \"infer\", \"eval\"}')\n\n logits = get_logits(features)\n preds = tf.argmax(logits, axis=-1)\n probs = tf.nn.softmax(logits, axis=-1)\n predict...
[ "0.653255", "0.6415198", "0.62002885", "0.61464673", "0.60211784", "0.59734035", "0.5882333", "0.58066183", "0.57011044", "0.5623417", "0.55480283", "0.55394053", "0.5519858", "0.5499919", "0.54941463", "0.54804045", "0.5472486", "0.5453627", "0.5434046", "0.5416209", "0.5372...
0.7923026
0
Log an infeed or outfeed error. This logs a short error message immediately, and schedules a timer to emit the full stack trace and error message after a short period of time. If the main session has terminated by the time the timer triggers, we assume the real source of the error was from the main session and avoid em...
def _log_error(self, session, error): logging.warning( '\n\n' 'Error occurred during infeed/outfeed. This may be due to a compile ' 'error in the main session. Waiting for a short time for the main ' 'session to come back.\n\n%s', error) self._feed_error = traceback.format_exc...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logError(self, text):\n time = datetime.now().strftime(\"%H:%M:%S \")\n self.log(time + \"(ERR):\\t\", text)", "def error(self, *args):\n\n self.log(\"ERROR:\", args)\n if not self.transport.connected:\n Timer(5, connect(self.host, self.port)).register(self)", "def er...
[ "0.6038217", "0.5916494", "0.5900259", "0.55589175", "0.5555639", "0.5525812", "0.55219215", "0.5496659", "0.54817575", "0.5462014", "0.5454981", "0.54069924", "0.5402037", "0.53926015", "0.5391754", "0.5385907", "0.5369441", "0.53470963", "0.5342299", "0.5338065", "0.5317107...
0.736192
0
A fn returns enqueue_ops.
def enqueue_ops_fn(): num_cores_per_host = ctx.num_of_cores_per_host per_host_sharded_inputs = [] for core_ordinal in range(num_cores_per_host): with ops.name_scope('ordinal_%d' % (core_ordinal)): inputs = _Inputs.from_input_fn(input_fn()) if inputs.is_dataset: raise TypeErro...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_pe...
[ "0.6918618", "0.67096776", "0.66286814", "0.6531821", "0.64277637", "0.61345917", "0.6119504", "0.6111642", "0.6108845", "0.61017036", "0.6042078", "0.6023918", "0.60035884", "0.59705424", "0.58645797", "0.58212256", "0.58058554", "0.57611626", "0.56956524", "0.56872046", "0....
0.6903641
1
Generates the per_host enqueue ops.
def enqueue_ops_fn(): control_deps = [] per_host_sharded_inputs = [] num_replicas_per_host = ctx.num_of_replicas_per_host with ops.device(device): if not inputs.is_dataset: raise TypeError('`input_fn` must return a `Dataset` for this mode.') for _ in range(num_replicas_per_host): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,\n inputs_structure_recorder):\n captured_infeed_queue = _CapturedObject()\n\n def enqueue_ops_fn():\n \"\"\"A fn returns enqueue_ops.\"\"\"\n num_cores_per_host = ctx.num_of_cores_per_host\n per_hos...
[ "0.65835243", "0.6533586", "0.6291134", "0.62328285", "0.6117832", "0.57363117", "0.5343912", "0.53310704", "0.53279525", "0.5282919", "0.5259834", "0.5183685", "0.5105475", "0.50972563", "0.50761807", "0.5038841", "0.5036618", "0.5018514", "0.5013788", "0.49980277", "0.49547...
0.6555443
1
Validates and records the structure of features` and `labels`.
def validate_and_record_structure(self, features, labels, signals=None): def _extract_key_names(tensor_or_dict): if tensor_or_dict is None: return [] return sorted(tensor_or_dict.keys()) if isinstance( tensor_or_dict, dict) else [] # Extract structure. has_label...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loadValid(self, features, labels):\n\t\tself.validX_, self.validY_, self.validLabel_ = self.__load(features, labels)", "def _check_labels_features_exist(\n labels_example: List[\"Message\"], attribute: Text\n ) -> bool:\n\n for label_example in labels_example:\n if (\n ...
[ "0.6781636", "0.62255114", "0.6186637", "0.61535", "0.6147909", "0.6134168", "0.61082655", "0.610818", "0.61062473", "0.6101338", "0.60780364", "0.5975347", "0.5925683", "0.5920382", "0.5888969", "0.587738", "0.5873737", "0.5835565", "0.57987905", "0.5770602", "0.5758468", ...
0.72594035
0
Flattens the `features` and `labels` to a single tensor list.
def flatten_features_and_labels(self, features, labels, signals=None): flattened_inputs = [] if self._feature_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend( [features[name] for name in self._feature_names]) else: flattened_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pack_features_vector(features, labels):\n features = tf.stack(list(features), axis=1)\n return features, labels", "def pack_features_vector(features, labels):\n features = tf.stack(list(features.values()), axis=1)\n return features, labels", "def pack_features_vector(features, labels):\n f...
[ "0.74422634", "0.73549277", "0.73549277", "0.6901035", "0.65079063", "0.648174", "0.629152", "0.6247123", "0.6247123", "0.6247123", "0.6202904", "0.6201719", "0.6159463", "0.6156918", "0.61519694", "0.6068398", "0.5983221", "0.5978041", "0.5978041", "0.5969851", "0.5969851", ...
0.7374709
1
Restores the flattened inputs to original features and labels form.
def unflatten_features_and_labels(self, flattened_inputs): expected_num_features = ( len(self._feature_names) if self._feature_names else 1) if self._has_labels: expected_num_labels = ( len(self._label_names) if self._label_names else 1) else: expected_num_labels ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _restore_data_inputs(self):\n super()._restore_data_inputs()\n self.training_data = (\n self._data.training_data.data if self._data.training_data and self._data.training_data.data else None\n )\n self.validation_data = (\n self._data.validation_data.data if sel...
[ "0.6197647", "0.6104975", "0.6052526", "0.6033052", "0.6020405", "0.5886211", "0.5778616", "0.57276195", "0.5723627", "0.5686435", "0.5685843", "0.56825626", "0.56755847", "0.56697255", "0.5663335", "0.5650272", "0.5640793", "0.5625429", "0.5615306", "0.5614015", "0.5612984",...
0.69521254
0
Generates infeed enqueue ops and dequeue_fn.
def generate_infeed_enqueue_ops_and_dequeue_fn(self): # While tf.while_loop is called, the body function, which invokes # `enqueue_fn` passed in, is called to construct the graph. So, input_fn # structure is recorded. enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( self._invoke_input_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_pe...
[ "0.67614406", "0.66683185", "0.5815572", "0.58151203", "0.5797748", "0.54712415", "0.538349", "0.5365341", "0.5356222", "0.53494734", "0.53477305", "0.53301996", "0.524572", "0.52256685", "0.52183527", "0.51829576", "0.51430917", "0.50617474", "0.50612164", "0.50002146", "0.4...
0.83329666
0
dequeue_fn is used by TPU to retrieve the tensors.
def dequeue_fn(): # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatt...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _OutfeedDequeue(self, decode_nm):\n num_decode_tensors = len(decode_nm.Flatten())\n outfeed_ops = [[]] * num_decode_tensors\n device_assignment = py_utils.GetTpuDeviceAssignment()\n assert device_assignment\n num_cores_per_replica = (1 if self.spmd else\n (device_assi...
[ "0.6000878", "0.5903571", "0.58939743", "0.58114827", "0.5764046", "0.5732872", "0.5658689", "0.5658689", "0.5578449", "0.5576985", "0.5566643", "0.5562891", "0.5546443", "0.55329204", "0.5524676", "0.5519212", "0.5513018", "0.55024093", "0.5486132", "0.54662687", "0.54569936...
0.72722965
0
Deploys the input pipeline and record input structure.
def _invoke_input_fn_and_record_structure(self): enqueue_ops = [] infeed_queues = [] all_hooks = [] num_hosts = self._ctx.num_hosts tpu_host_placement_fn = self._ctx.tpu_host_placement_function run_infeed_loop_on_coordinator = True if self._sharded_per_core: # Per-Core input pipeline...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform(self, inputs: list, stage: str) -> datapack.DataPack:", "def dataflow():\n print 'Building',TRAINER_NAME,'package.'\n subprocess.check_call(['python', 'setup.py', 'sdist', '--format=gztar'])\n subprocess.check_call(['gsutil', '-q', 'cp',\n os.path.join('dist', TRAINER_NAM...
[ "0.62789917", "0.5918441", "0.5827906", "0.5817481", "0.56151706", "0.5610592", "0.55843145", "0.5573068", "0.5568397", "0.5530364", "0.5478247", "0.5455659", "0.5426491", "0.54174775", "0.53775316", "0.53011864", "0.52758986", "0.52615386", "0.5242524", "0.52380687", "0.5235...
0.59553194
1
Converts user provided model_fn` as a single train step on TPU. The user provided `model_fn` takes input tuple (features, labels) and produces the EstimatorSpec with train_op and loss for train `mode`. This usually represents a single train computation on CPU. For TPU training, a train (computation) step is first wrapp...
def convert_to_single_tpu_train_step(self, dequeue_fn): host_call = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def train_step(loss): """Training step function for use inside a while loop.""" del loss # unused; required in function signature. inputs = dequeue_fn...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _model_fn(features, labels, mode, config, params):\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode != model_fn_lib.ModeKeys.PREDICT:\n is_export_mode = False\n else:\n # For export_savedmodel, i...
[ "0.72253805", "0.68108726", "0.6774271", "0.6719611", "0.65392244", "0.649606", "0.64581513", "0.64381677", "0.6420198", "0.6401497", "0.6346824", "0.6328738", "0.6324738", "0.6315999", "0.6287701", "0.6285973", "0.62623984", "0.6247418", "0.61984706", "0.61869395", "0.617947...
0.6929875
1
Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.
def validate(host_calls): for name, host_call in host_calls.items(): if not isinstance(host_call, (tuple, list)): raise ValueError('{} should be tuple or list'.format(name)) if len(host_call) != 2: raise ValueError('{} should have two elements.'.format(name)) if not callable(host_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_parameters(self):\n self.target_metric = get_formatted_target_metric(\n self.target_metric, G.Env.metrics, default_dataset=\"oof\"\n )", "def validate(self):\n AcceleratorType.validate(self.accelerator_type)\n gcp.validate_machine_configuration(self.cpu_cores,...
[ "0.646635", "0.6258575", "0.62169003", "0.6163073", "0.6159874", "0.6156967", "0.6031898", "0.60262036", "0.60199577", "0.6002835", "0.59988576", "0.59860957", "0.5957013", "0.5948449", "0.59424144", "0.59343827", "0.59333295", "0.59328943", "0.5898939", "0.5875248", "0.58541...
0.72171295
0
Records the host_call structure.
def record(self, host_calls): for name, host_call in host_calls.items(): host_fn, tensor_list_or_dict = host_call self._names.append(name) self._host_fns[name] = host_fn if isinstance(tensor_list_or_dict, dict): for (key, tensor) in six.iteritems(tensor_list_or_dict): sel...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_callbacks(host, typ):\n \n ip = host.replace(\"-\", \".\")\n src = typ\n call_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\n if os.path.isfile(\"/tmp/cc/calls.log\"): # write callback to calls log\n with open(\"/tmp/cc/calls.log\", 'a') as f:\n s = \"{0:<25} {...
[ "0.5483742", "0.5457527", "0.5410701", "0.52416027", "0.5203142", "0.517999", "0.50630707", "0.5054913", "0.5040014", "0.5017073", "0.5015763", "0.5003606", "0.499811", "0.4976404", "0.4963154", "0.49531364", "0.49517584", "0.4951451", "0.49355006", "0.4930302", "0.49170408",...
0.7327148
0
Create the op to enqueue the recorded host_calls.
def create_enqueue_op(self): if not self._names: return [] tensors = [] # TODO(jhseu): Consider deduping tensors. for name in self._names: tensors.extend(self._tensors[name]) with ops.device(tpu.core(0)): return [tpu_ops.outfeed_enqueue_tuple(tensors)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def record(self, host_calls):\n\n for name, host_call in host_calls.items():\n host_fn, tensor_list_or_dict = host_call\n self._names.append(name)\n self._host_fns[name] = host_fn\n\n if isinstance(tensor_list_or_dict, dict):\n for (key, tensor) in six.iteritems(tensor_list_or_dict):\...
[ "0.6292755", "0.5712997", "0.56591284", "0.5607669", "0.5587878", "0.5528072", "0.54914623", "0.54818785", "0.53225166", "0.5263789", "0.51789", "0.50644535", "0.50429875", "0.5037634", "0.49689317", "0.49309674", "0.49283302", "0.4920256", "0.4901082", "0.48963195", "0.48628...
0.5739713
1
Sends the tensors through outfeed and runs the host_fn on CPU. The tensors are concatenated along dimension 0 to form a global tensor across all shards. The concatenated function is passed to the host_fn and executed on the first host.
def create_tpu_hostcall(self): if not self._names: return [] ret = {} # For each i, dequeue_ops[i] is a list containing the tensors from all # shards. This list is concatenated later. dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for name in self._names: for _ in se...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_pe...
[ "0.63360953", "0.62956405", "0.6291536", "0.6241733", "0.58995396", "0.5864348", "0.5839464", "0.58250535", "0.56826615", "0.56309867", "0.5552339", "0.548088", "0.5467413", "0.5426072", "0.53615206", "0.53566736", "0.5268573", "0.5245989", "0.5235768", "0.5235768", "0.523125...
0.6630872
0
Returns a new model_fn, which wraps the TPU support.
def _augment_model_fn(self, model_fn, batch_axis): def _model_fn(features, labels, mode, config, params): """A Estimator `model_fn` for TPUEstimator.""" with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) if mode != model_fn_lib.Mode...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model_fn(feature_columns):\n def _model_fn(features, mode, params):\n \"\"\"Model Function.\"\"\"\n logits = logits_fn(features, feature_columns, params)\n labels = tf.squeeze(features[\"label\"])\n\n if mode == tf_estimator.ModeKeys.EVAL:\n loss = tf.reduce_mean(tf.nn.sparse_softmax_c...
[ "0.65870714", "0.65342593", "0.6415281", "0.640519", "0.62561655", "0.62051016", "0.6142609", "0.61087984", "0.60520357", "0.6005414", "0.60032", "0.59343463", "0.5921695", "0.591066", "0.5891964", "0.5812297", "0.58100814", "0.5809237", "0.578478", "0.5782048", "0.5770619", ...
0.67717654
0
A Estimator `model_fn` for TPUEstimator.
def _model_fn(features, labels, mode, config, params): with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) if mode != model_fn_lib.ModeKeys.PREDICT: is_export_mode = False else: # For export_savedmodel, input_fn is n...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _augment_model_fn(self, model_fn, batch_axis):\n\n def _model_fn(features, labels, mode, config, params):\n \"\"\"A Estimator `model_fn` for TPUEstimator.\"\"\"\n with self._ctx.with_mode(mode) as ctx:\n model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)\n\n if mode !=...
[ "0.7530073", "0.71472275", "0.71315056", "0.7064388", "0.6985274", "0.69217014", "0.69103295", "0.6909028", "0.68506837", "0.67912245", "0.67624724", "0.67304444", "0.6727382", "0.6604433", "0.6592421", "0.65798515", "0.6450492", "0.6436315", "0.64316684", "0.63801944", "0.63...
0.73766655
1
Executes `model_fn_wrapper` multiple times on all TPU shards.
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_eval_step, host_calls, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)) def multi_tpu_eval_steps_on_single_shard(): return trainin...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_train_step, host_call, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))\n\n def multi_tpu_train_steps_on_single_shard():\n ...
[ "0.66594964", "0.61444473", "0.5915304", "0.5768129", "0.57181996", "0.56924707", "0.55896854", "0.55495274", "0.5511398", "0.5488238", "0.5486051", "0.5469015", "0.544404", "0.54410255", "0.54396427", "0.5391762", "0.53675765", "0.53656566", "0.5349069", "0.53173107", "0.528...
0.63976693
1
Executes `model_fn_wrapper` multiple times on all TPU shards.
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_train_step, host_call, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) def multi_tpu_train_steps_on_single_shard(): return trai...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n\n single_tpu_eval_step, host_calls, captured_scaffold_fn = (\n model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))\n\n def multi_tpu_eval_steps_on_single_shard():\n re...
[ "0.6398013", "0.61430126", "0.59155613", "0.5768424", "0.57171917", "0.56894976", "0.5591647", "0.55525845", "0.55103254", "0.54879135", "0.5486981", "0.5467224", "0.5443748", "0.5441698", "0.5436969", "0.5393069", "0.5367924", "0.53674203", "0.53497", "0.531816", "0.52897155...
0.6660681
0
Wraps the ops generated by `op_fn` in tf.while_loop.
def _wrap_computation_in_while_loop(device, op_fn): def computation(i): with ops.control_dependencies(op_fn()): return i + 1 iterations_per_loop_var = _create_or_get_iterations_per_loop() # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.d...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def while_loop_op(op):\n return (control_flow_util.IsLoopSwitch(op) or\n control_flow_util.IsLoopMerge(op) or\n control_flow_util.IsLoopEnter(op) or\n control_flow_util.IsLoopExit(op) or\n TensorTracer.loop_cond_op(op) or\n op.type in ('RefNextIterati...
[ "0.69818914", "0.68200386", "0.6126418", "0.6078447", "0.57636625", "0.5754965", "0.57449204", "0.57445085", "0.57208353", "0.57029027", "0.56025404", "0.54687613", "0.54140025", "0.53947186", "0.52648926", "0.5232417", "0.52252024", "0.520506", "0.5199421", "0.5190266", "0.5...
0.7957619
0
Retrieves the Scaffold from `captured_scaffold_fn`.
def _get_scaffold(captured_scaffold_fn): with _CapturingContext(message='Inside scaffold_fn'): scaffold_fn = captured_scaffold_fn.get() if scaffold_fn: scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allow...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_scaffold(self, mol: Chem.rdchem.Mol) -> str:\n return MurckoScaffold.MurckoScaffoldSmiles(mol=mol, includeChirality=self.include_chirality)", "def generate_scaffold(smiles, include_chirality=False):\n mol = Chem.MolFromSmiles(smiles)\n engine = ScaffoldGenerator(include_chirality=include_chi...
[ "0.5962946", "0.54265267", "0.5422105", "0.5317502", "0.49453947", "0.49192426", "0.47882766", "0.47736388", "0.46875826", "0.45953506", "0.4508714", "0.4507913", "0.4507913", "0.4490961", "0.44098067", "0.4358617", "0.42400393", "0.41495925", "0.41405055", "0.41361576", "0.4...
0.8463581
0
Returns an `_Inputs` instance according to `input_fn` return value.
def from_input_fn(return_values): if isinstance(return_values, dataset_ops.Dataset): dataset = return_values return _Inputs(dataset=dataset) features, labels = _Inputs._parse_inputs(return_values) return _Inputs(features, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input(inputs):\n return input(inputs)", "def _input_fn(input_pipeline_context=None):\n return _create_dataset(options, is_training, input_pipeline_context)", "def get_function_input(inputs, input_name, optional=False):\n this_input = inputs.get(input_name)\n\n if this_input ...
[ "0.6401963", "0.63848746", "0.6295835", "0.62745816", "0.62434405", "0.6147081", "0.61179113", "0.61011976", "0.6071407", "0.6055143", "0.5980461", "0.5961409", "0.59082115", "0.5904447", "0.5903279", "0.5902866", "0.5896739", "0.587621", "0.5863113", "0.58621866", "0.5805875...
0.74578035
0
Returns True if the return value from input_fn is Dataset.
def is_dataset(self): return self._dataset is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_dataset(obj):\n return isinstance(obj, (DictDataset, ImageDataset, LabeledImageDataset,\n TupleDataset, DatasetMixin))", "def is_pyvista_dataset(obj):\n return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))", "def is_dataset(X, require_attrs=None):\n\n if requ...
[ "0.7126082", "0.65838486", "0.63373053", "0.63276345", "0.60850066", "0.6026266", "0.5969108", "0.5859921", "0.5840824", "0.5836024", "0.583491", "0.5834641", "0.579408", "0.579408", "0.57419455", "0.57343686", "0.5691356", "0.5672013", "0.56190723", "0.55721736", "0.5570917"...
0.72974783
0
Returns a `SessionRunHook` to initialize this dataset. This must be called before `features_and_labels`.
def dataset_initializer_hook(self): iterator = self._dataset.make_initializable_iterator() # pylint: disable=protected-access hook = estimator_lib._DatasetInitializerHook(iterator) self._iterator = iterator return hook
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n \n #initialize the variables\n init = tf.global_variables_initializer()\n self.session.run(init)\n \n #initialize the data iterators\n self.session.run(self.data_iterator.initializer)", "def _initialize(self) -> None:\n p = self.params\n...
[ "0.5872473", "0.5574521", "0.551776", "0.5452945", "0.539178", "0.5315563", "0.5254858", "0.5231456", "0.5206683", "0.520266", "0.51905555", "0.51767313", "0.51684254", "0.51541096", "0.51093185", "0.5108235", "0.5104647", "0.5104162", "0.5078713", "0.5040331", "0.5021158", ...
0.67949516
0