query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Force a computation of the current portfolio state.
def update_portfolio(self): if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_st...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_val...
[ "0.60446954", "0.5951977", "0.5695806", "0.5687665", "0.56560236", "0.5653465", "0.56520754", "0.5633276", "0.559409", "0.5562694", "0.54696906", "0.54538536", "0.5440695", "0.5416885", "0.5380987", "0.5366457", "0.5364525", "0.5331849", "0.5323494", "0.53195906", "0.53195906...
0.66357535
0
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed.
def portfolio(self): self.update_portfolio() return self._immutable_portfolio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n ...
[ "0.7362575", "0.6996456", "0.6729907", "0.6721263", "0.6675463", "0.66379255", "0.6499", "0.63661253", "0.63091534", "0.62968546", "0.6240645", "0.6196709", "0.6144556", "0.60243994", "0.5963545", "0.5962163", "0.59054154", "0.5858664", "0.5812279", "0.5781019", "0.5715677", ...
0.81566226
0
Override fields on ``self.account``.
def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, to...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def patch(self, account=None, user=None, ac...
[ "0.6413226", "0.6413226", "0.6413226", "0.6413226", "0.6343206", "0.6329128", "0.6322581", "0.63135093", "0.61441493", "0.60770935", "0.60361296", "0.6022793", "0.60135746", "0.60068023", "0.59981", "0.59467715", "0.58997744", "0.5875439", "0.5858766", "0.5787134", "0.5765871...
0.67399603
0
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed.
def __del__(self): # subarray = getattr(self, '_subarray', None) subarray = self._subarray # If the subarray is unique it will have 2 references to # it plus 1 within this method, making 3. If it has more # than 3 references to it then it is not unique. if getrefc...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def _Close(self):\n self._fsfat_volume = None\n self._file_object = None", "...
[ "0.637165", "0.6141179", "0.6079966", "0.60165036", "0.5893113", "0.5771018", "0.5769767", "0.5752214", "0.5731687", "0.5730452", "0.57243013", "0.5715975", "0.5715163", "0.5711628", "0.5696097", "0.56944114", "0.56878215", "0.56815344", "0.5681272", "0.5633728", "0.5630855",...
0.61570215
1
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray.
def _add_to_file_counter(self, i): # subarray = getattr(self, '_subarray', None) subarray = self._subarray if subarray is None: return try: if isinstance(subarray, FileArray) and not isinstance( subarray, CachedArray ): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i...
[ "0.5323221", "0.5309046", "0.5237938", "0.52028364", "0.51664454", "0.5117871", "0.51150346", "0.5107957", "0.50491905", "0.50423014", "0.5001557", "0.499577", "0.49840355", "0.4932267", "0.4909338", "0.48659304", "0.48641643", "0.48607743", "0.48591715", "0.4854085", "0.4824...
0.8270168
0
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
[ "0.83802605", "0.6079216", "0.60073787", "0.5972287", "0.5871502", "0.58649766", "0.58356875", "0.5813897", "0.5656929", "0.5650024", "0.5628978", "0.5588129", "0.5575472", "0.55536973", "0.55466735", "0.55383646", "0.5529106", "0.5502226", "0.54975855", "0.5481056", "0.54359...
0.71174276
1
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ...
[ "0.7747842", "0.63648546", "0.6072333", "0.60351396", "0.5918244", "0.57059807", "0.56674904", "0.5664642", "0.56031275", "0.5573989", "0.5520654", "0.5453089", "0.5448838", "0.54281026", "0.5422772", "0.5378375", "0.5307265", "0.5244931", "0.52217174", "0.5220032", "0.521629...
0.6648334
1
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists.
def _configure_auxiliary_mask(self, auxiliary_mask): indices = self.indices new = [ mask[ tuple( [ (slice(None) if n == 1 else index) for n, index in zip(mask.shape, indices) ] ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_masking(self, masks):\n self.masks = masks", "def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n...
[ "0.5756575", "0.5562516", "0.54862016", "0.5367435", "0.5347834", "0.53455114", "0.5306387", "0.52775955", "0.522134", "0.5194576", "0.51778084", "0.5152401", "0.51056355", "0.51011837", "0.50790006", "0.5067728", "0.5065551", "0.4962702", "0.4946801", "0.4946075", "0.4918300...
0.71955097
0
True if and only if the partition's subarray is in memory as opposed to on disk.
def in_memory(self): return hasattr(self._subarray, "__array_interface__")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n ...
[ "0.7677807", "0.75874203", "0.73676527", "0.6567502", "0.6373806", "0.62520576", "0.6168975", "0.6139325", "0.61339194", "0.6126576", "0.611511", "0.608777", "0.6025861", "0.6025105", "0.60162103", "0.5953649", "0.59421575", "0.59196216", "0.5899218", "0.5882104", "0.5880964"...
0.7635218
1
True if and only if the partition's subarray is on disk as opposed to in memory.
def on_disk(self): return isinstance(self._subarray, FileArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # ------------------------------------------...
[ "0.69458973", "0.67629313", "0.6696292", "0.656141", "0.63517463", "0.6290223", "0.61257756", "0.59961635", "0.59486187", "0.59276515", "0.59175736", "0.5881189", "0.5803704", "0.577553", "0.5774722", "0.5755425", "0.5737828", "0.57311875", "0.5723483", "0.5720749", "0.568287...
0.80375713
0
The partition's subarray of data.
def subarray(self): return self._subarray
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subarray(self) -> Subarray:\n return Subarray.from_pybind11(self._ctx, self._subarray)", "def partition(self, sep):\n return asarray(partition(self, sep))", "def array(self) -> ndarray:\n if self._slices: # so this is a sub-parray object\n # index into origin array by saved...
[ "0.68206084", "0.6535615", "0.6457086", "0.6347438", "0.62131214", "0.61877143", "0.61062545", "0.6101903", "0.60762966", "0.6068932", "0.6029079", "0.6012183", "0.59961045", "0.59653914", "0.5928066", "0.58841175", "0.58761495", "0.58514863", "0.5848823", "0.5791028", "0.577...
0.7297865
0
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order.
def change_axis_names(self, axis_map): axes = self.axes # Partition axes self.axes = [axis_map[axis] for axis in axes] # Flipped axes flip = self.flip if flip: self.flip = [axis_map[axis] for axis in flip]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n ...
[ "0.7356323", "0.7326029", "0.6893155", "0.6677015", "0.64332557", "0.6372005", "0.63517636", "0.62905", "0.628417", "0.62775946", "0.6271645", "0.6077482", "0.6047013", "0.6024936", "0.5993588", "0.59712166", "0.5967535", "0.596201", "0.5952742", "0.58923167", "0.589169", "...
0.75589085
0
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the
def close(self, **kwargs): config = getattr(self, "config", None) if config is None: return if kwargs: config.update(kwargs) original = getattr(self, "_original", None) logger.partitioning("Partition.close: original = {}".format(original)) if n...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n return self.close_array", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def close(self):\n self.ix.close()", "def close (self):\n pass\n #TODO: implement more realistic closing semantics", "def close(self):\n self.data....
[ "0.6764165", "0.6518303", "0.60208786", "0.5811839", "0.5795013", "0.579095", "0.5784808", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.5713722", "0.5699953", "0.5699953", "0.5665161", "0.5664806", "0.56545...
0.70555735
0
True if the subarray contains datetime objects.
def isdt(self): return self.Units.isreftime and self._subarray.dtype == _dtype_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64", "def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)", "def is_datetime(self) -> bool:\n return False", "def are...
[ "0.72152364", "0.68011504", "0.67946845", "0.6540459", "0.6300369", "0.62526584", "0.6159265", "0.612599", "0.612436", "0.60484034", "0.60277694", "0.6001856", "0.5956571", "0.5938945", "0.58849597", "0.58664906", "0.58624506", "0.5751764", "0.56482595", "0.56456316", "0.5640...
0.71551335
1
Close the file containing the subarray, if there is one.
def file_close(self): if self.on_disk: self._subarray.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_file(self):\n self.root_group.close()", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n ...
[ "0.61085176", "0.6024418", "0.59545076", "0.5824396", "0.58042437", "0.5736725", "0.57022864", "0.57022864", "0.56994104", "0.56970567", "0.56860274", "0.564539", "0.56289333", "0.56183493", "0.5591367", "0.5591367", "0.55517685", "0.5509945", "0.5487693", "0.54802656", "0.54...
0.7828047
0
Return an iterator over indices of the master array which are spanned by the data array.
def master_ndindex(self): # itermaster_indices(self): return itertools_product( *[range(*r) for r in self.location] ) # TODO check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset_inds(self, adata_parent):\r\n subset_inds = np.ones(len(adata_parent), dtype=bool)\r\n for condition, values in self.subset_cond.items():\r\n subset_inds *= adata_parent.obs[condition].isin(values)\r\n return subset_inds", "def enumerate(self):\n # go through...
[ "0.61020994", "0.6072502", "0.60690475", "0.60687184", "0.60543483", "0.60275173", "0.5924552", "0.5918545", "0.59088135", "0.58821535", "0.58567846", "0.58429295", "0.5828838", "0.58083194", "0.5794418", "0.5793355", "0.57828283", "0.57748425", "0.5756645", "0.57550335", "0....
0.6835397
0
Update the `!part` attribute inplace for new indices of the master array.
def new_part(self, indices, master_axis_to_position, master_flip): shape = self.shape if indices == [slice(0, stop, 1) for stop in shape]: return # ------------------------------------------------------------ # If a dimension runs in the wrong direction then change its ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array", "def _idx_changed(self, idx):\n self.refresh_memory()", "def _loadpart(self, part):\n new_partidx = util.Partname(part.partname).idx\n for idx, seq_p...
[ "0.5455975", "0.5450583", "0.5443926", "0.53721666", "0.5359862", "0.5275372", "0.52543634", "0.52315736", "0.5228334", "0.51149035", "0.50902224", "0.50782204", "0.5062653", "0.50265366", "0.50074023", "0.49909484", "0.49905938", "0.49756426", "0.4970326", "0.496184", "0.496...
0.5784396
0
The extra memory required to access the array.
def extra_memory(self): if not self.in_memory: # -------------------------------------------------------- # The subarray is on disk so getting the partition's data # array will require extra memory # -------------------------------------------------------- ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allocated_memory(self):\n return self._allocated_memory", "def memory(self):\r\n return self._memory", "def __len__(self):\n\t\treturn len(self.memory)", "def __len__(self):\r\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n...
[ "0.6821264", "0.6811845", "0.66922146", "0.66735834", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", ...
0.73394907
0
Move the partition's subarray to a temporary file on disk.
def to_disk(self, reopen=True): # try: tfa = CachedArray(self.array) # except Exception: # return False fd, _lock_file = mkstemp( prefix=tfa._partition_file + "_", dir=tfa._partition_dir ) close(fd) self.subarray = tf...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(ori...
[ "0.58671135", "0.56434155", "0.5580373", "0.5417111", "0.5385665", "0.5372569", "0.53123814", "0.52139175", "0.5193834", "0.51137465", "0.5101339", "0.5099379", "0.50423723", "0.5027432", "0.50095487", "0.5004623", "0.49991313", "0.4995337", "0.49318588", "0.49247873", "0.489...
0.6427523
0
Register a temporary file on this rank that has been created on another rank.
def _register_temporary_file(self): _partition_file = self._subarray._partition_file _partition_dir = self._subarray._partition_dir if _partition_file not in _temporary_files: fd, _lock_file = mkstemp( prefix=_partition_file + "_", dir=_partition_dir ) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_tmp_file(self, tmp_file: str):\n self.temp_files.add(pathlib.Path(tmp_file))", "def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]", "def add_tempfile(self, filename, exists=True):\n...
[ "0.7100669", "0.6645065", "0.6373811", "0.60678446", "0.59707797", "0.5943573", "0.5882913", "0.5867154", "0.5833076", "0.57806534", "0.5745454", "0.5718152", "0.5706169", "0.5704879", "0.56752044", "0.56738895", "0.56596625", "0.56512666", "0.56443375", "0.56338364", "0.5625...
0.66906744
1
Add the lock files listed in lock_files to the list of lock files managed by other ranks.
def _update_lock_files(self, lock_files): _, _lock_file, _other_lock_files = _temporary_files[ self._subarray._partition_file ] _other_lock_files.update(set(lock_files)) if _lock_file in _other_lock_files: # If the lock file managed by this rank is in the list of ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LockFiles(self, entries):\n self._model.lock(entries)", "def add_mock_files(self, file_list):\n self._mock_file_list.extend(file_list)", "def thread_file_list(self):\n # Establish connection for this thread\n connection = self.connect()\n\n # Set working directory on serv...
[ "0.5858453", "0.5857905", "0.5442941", "0.5427439", "0.5369485", "0.534254", "0.5242692", "0.5233011", "0.52133423", "0.51889026", "0.51700175", "0.51625514", "0.5118121", "0.5110031", "0.50916535", "0.5077471", "0.5056525", "0.5047517", "0.50415224", "0.50407803", "0.5031041...
0.785469
0
r""" Samples a 2d function f over specified intervals and returns two arrays (X, Y) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot2d.py. f is a function of one variable, such as x2. x_args is an interval given in the form (var, min, max, n)
def sample2d(f, x_args): try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args except (TypeError, IndexError): raise ValueError("x_args must be a tuple of the form (var, min, max, n)"...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = ...
[ "0.6939351", "0.6302334", "0.6178659", "0.58382726", "0.57627946", "0.5739403", "0.56020606", "0.5559102", "0.5487004", "0.5467543", "0.5426483", "0.5417944", "0.53844726", "0.53342545", "0.53172773", "0.53114015", "0.5287415", "0.5252938", "0.5250112", "0.52382034", "0.52241...
0.7971121
0
r""" Samples a 3d function f over specified intervals and returns three 2d arrays (X, Y, Z) suitable for plotting with matlab (matplotlib) syntax. See examples\mplot3d.py. f is a function of two variables, such as x2 + y2. x_args and y_args are intervals given in the form (var, min, max, n)
def sample3d(f, x_args, y_args): x, x_min, x_max, x_n = None, None, None, None y, y_min, y_max, y_n = None, None, None, None try: f = sympify(f) except SympifyError: raise ValueError("f could not be interpreted as a SymPy function") try: x, x_min, x_max, x_n = x_args ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n ...
[ "0.6324257", "0.61952674", "0.60081536", "0.6003968", "0.59578633", "0.5772959", "0.5612362", "0.55938494", "0.5529308", "0.552768", "0.5526545", "0.55223507", "0.55000556", "0.5411602", "0.54069936", "0.53909606", "0.5389578", "0.53585124", "0.5356868", "0.53406954", "0.5334...
0.8300914
0
Samples a 2d or 3d function over specified intervals and returns a dataset suitable for plotting with matlab (matplotlib) syntax. Wrapper for sample2d and sample3d. f is a function of one or two variables, such as x2. var_args are intervals for each variable given in the form (var, min, max, n)
def sample(f, *var_args): if len(var_args) == 1: return sample2d(f, var_args[0]) elif len(var_args) == 2: return sample3d(f, var_args[0], var_args[1]) else: raise ValueError("Only 2d and 3d sampling are supported at this time.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample3d(f, x_args, y_args):\n x, x_min, x_max, x_n = None, None, None, None\n y, y_min, y_max, y_n = None, None, None, None\n try:\n f = sympify(f)\n except SympifyError:\n raise ValueError(\"f could not be interpreted as a SymPy function\")\n try:\n x, x_min, x_max, x_n = ...
[ "0.7615295", "0.70778996", "0.6081689", "0.57048494", "0.56257445", "0.55822027", "0.55651504", "0.55339074", "0.54328907", "0.53981596", "0.5369118", "0.53515136", "0.53430045", "0.5319749", "0.5275369", "0.5249938", "0.52404726", "0.52143115", "0.52082497", "0.51295596", "0...
0.7539873
1
Default reducer for distinctions. Expects all distinctions to follow
def __reduce__(self): return instanceReducer(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __reduce__(self):\n\t\treturn self.__class__, (self.dist, self.frozen)", "def evaluation_reducer(self) -> Union[Reducer, Dict[str, Reducer]]:\n return Reducer.AVG", "def __reduce__(self): # real signature unknown; restored from __doc__\r\n pass", "def _reduce(self, action):\n assert ...
[ "0.52857256", "0.5162497", "0.5107433", "0.5057547", "0.5027394", "0.5027394", "0.5027394", "0.5027394", "0.5027394", "0.49870852", "0.49124625", "0.48178238", "0.47693735", "0.47630015", "0.47151983", "0.4696542", "0.46933955", "0.4672102", "0.46205962", "0.46201527", "0.461...
0.5502056
0
For conjugate distinctions this should be overridden and return the base distinctions used. For none conjugate it will automatically return an empty list.
def getBaseDistinctions(self): return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_conjugate_bases_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is conjugate base of\")]\n else:\n return []", "def conjugate(self):\n pass", "def conjugate(self, ???):", ...
[ "0.6720197", "0.64957684", "0.63367724", "0.6140327", "0.6048907", "0.6023874", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", "0.5890595", ...
0.6723077
0
Generates a random distinction of this type than is valid for the schema config.schema and for the given graphs. This function for must take graphs as its first argument, and if its a conjugate distinction it must then take, as separate args, not a tuple,
def getRandomDistinction(config, graphs, *base_distinctions): raise AbstractMethodException(Distinction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_regular_graph(variable_names, dist_func, num_neigh=10, **kwargs):\n shuffle(variable_names)\n num_vars = len(variable_names)\n num_neigh = min(num_neigh, num_vars-1)\n graphs = nx.random_graphs.random_regular_graph(num_neigh, num_vars)\n edges = np.array(graphs.edges())\n edges.sort(...
[ "0.6162866", "0.60737395", "0.56084675", "0.5604049", "0.5572062", "0.55707884", "0.5547834", "0.5473386", "0.54535896", "0.54377645", "0.5403645", "0.5399105", "0.53836566", "0.5325518", "0.5313336", "0.5309223", "0.5285266", "0.52835023", "0.5281857", "0.52793276", "0.52750...
0.7116983
0
Get an estimate of the number of different subtypes for this distinction. This is used to estimate a PDF for randomly sampling the distinction space. Examine the code of other distinctions to get a feel for how things are estimated.
def getNumberOfSubtypes(config, low_estimate=True): raise AbstractMethodException(Distinction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNumberOfBaseDistinctionsNeeded():\n\n raise AbstractMethodException(Distinction)", "def findAtypicalTerms(self):\n self.atypicalTermsDict = collections.OrderedDict()\n distanceList = list()\n distance = 0\n for key in self.summaryFilteredDict:\n partitionName ...
[ "0.5700454", "0.5624713", "0.5599203", "0.5592559", "0.5505562", "0.54669946", "0.5466142", "0.5449652", "0.5400594", "0.5398503", "0.5343119", "0.53286606", "0.5312603", "0.53101563", "0.5294233", "0.5283371", "0.5280055", "0.5267188", "0.5262132", "0.52526665", "0.52496", ...
0.6394095
0
Given a schema return True if this type of distinction is valid for the schema. Default is True. Should be overridden if there are any schemas a distinction is not valid for.
def isValidForSchema(schema): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_a_dde_schema(self, schema):\n return schema in self.registered_dde_schemas", "def compatibleSchema(self,\n schema: schemaconverter.TDXSchema,\n raise_error: bool = True\n ) -> bool:\n db_tdx_schema = self.tdx_schema\n # see https://stackoverflow.com/a/41579450/1014916...
[ "0.738567", "0.6994796", "0.69861853", "0.67255765", "0.66746897", "0.6557599", "0.64317065", "0.6430458", "0.6274206", "0.62662953", "0.61832917", "0.61832917", "0.61646706", "0.6147451", "0.61017865", "0.6056857", "0.6008853", "0.60003716", "0.59765136", "0.5948833", "0.594...
0.81319565
0
Matrix multiplication of chains of square matrices
def chain_matmul_square(As): As_matmul = As while As_matmul.shape[0] > 1: if As_matmul.shape[0] % 2: A_last = As_matmul[-1:] else: A_last = None As_matmul = torch.matmul(As_matmul[0:-1:2], As_matmul[1::2]) if A_last is not None: As_ma...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursive_multiply(a, b):\n if len(a) == 2:\n return naive_multiply(a, b)\n\n a11 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a11):\n a11[index] = row[0:int(len(row) / 2)]\n\n a12 = a[0:int(len(a) / 2)]\n for index, row in enumerate(a12):\n a12[index] = row[int(len(...
[ "0.7083097", "0.6831898", "0.6822807", "0.68104607", "0.6706873", "0.66786253", "0.66484094", "0.6619107", "0.659644", "0.6510616", "0.6500125", "0.6498091", "0.646012", "0.6443269", "0.64374197", "0.6424422", "0.6419277", "0.6410198", "0.6350263", "0.63494116", "0.6345248", ...
0.70788336
1
Print Bento details by providing the bento_tag. \b
def get(bento_tag: str, output: str) -> None: # type: ignore (not accessed) bento = bento_store.get(bento_tag) if output == "path": console.print(bento.path) elif output == "json": info = json.dumps(bento.info.to_dict(), indent=2, default=str) console.print_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_tags():\n for tag in Tag.query.all():\n print tag.__repr__()", "def __gitDescribeTag(self):\n self.vcs.gitDescribe(self.project.getProjectPath(), [])", "def show_target(self, target):\n print \" \" + repr(target.subject) \\\n + \" \" + target.meaning...
[ "0.59889174", "0.5728905", "0.56706613", "0.56307644", "0.5569425", "0.5474408", "0.5425949", "0.54244584", "0.5415978", "0.5385348", "0.53358155", "0.5326319", "0.5322443", "0.5317626", "0.5277394", "0.52764267", "0.5227435", "0.5224774", "0.5220987", "0.5199572", "0.5164107...
0.637782
0
List Bentos in local store \b show all bentos saved $ bentoml list \b show all verions of bento with the name FraudDetector $ bentoml list FraudDetector
def list_bentos(bento_name: str, output: str) -> None: # type: ignore (not accessed) bentos = bento_store.list(bento_name) res = [ { "tag": str(bento.tag), "path": display_path_under_home(bento.path), "size": human_readable_size(calc_dir_size(...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command_list(arguments):\n global current_mode\n current_mode = Mode.list\n #current_entity.addlink(arguments[0], arguments[1])\n return 'Now listing all entities'", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names ...
[ "0.6460789", "0.609852", "0.605642", "0.60312444", "0.5958324", "0.58794826", "0.58415896", "0.5798007", "0.57732534", "0.5748135", "0.5748135", "0.5723663", "0.57183725", "0.57072484", "0.568819", "0.56753975", "0.5644786", "0.56367767", "0.5634464", "0.5600329", "0.55807924...
0.66813993
0
Export a Bento to an external file archive \b
def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed) bento = bento_store.get(bento_tag) out_path = bento.export(out_path) logger.info("%s exported to %s.", bento, out_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def archive(po_filename, bl_filename):\n\n # Store archive in same dir as this script\n root = os.path.abspath(os.path.dirname(sys.argv[0]))\n\n po_archive = root + '/po.csv.%s' % datetime.date.today()\n bl_archive = root + '/bl.csv.%s' % datetime.date.today()\n\n shutil.move(po_filename, po_archive...
[ "0.60800993", "0.60314316", "0.599764", "0.5991197", "0.5971901", "0.59628", "0.59533924", "0.5905716", "0.5669903", "0.5646166", "0.5619919", "0.5599178", "0.55828655", "0.5566471", "0.55654246", "0.5564186", "0.5563321", "0.55614084", "0.5539148", "0.55066764", "0.5496924",...
0.73495036
0
Import a previously exported Bento archive file \b
def import_bento_(bento_path: str) -> None: # type: ignore (not accessed) bento = import_bento(bento_path) logger.info("%s imported.", bento)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_idb(self, idb_file):\n self.__run_import_script(file=idb_file, is_bin=False)", "def zoo_import(name, head=''):\n net = gz.get_model(name, pretrained=True)\n export_block(head + name, net, preprocess=True)", "def _import_bh_(self):", "def import_bin(self, bin_file):\n self.__run_import_...
[ "0.6098828", "0.6013871", "0.5977933", "0.58846664", "0.5881364", "0.5841038", "0.57228506", "0.56769913", "0.5621053", "0.56173635", "0.5589252", "0.55258936", "0.5518586", "0.5502786", "0.54741645", "0.5466958", "0.545687", "0.5419559", "0.5372282", "0.5365402", "0.53363955...
0.6817035
0
Pull Bento from a yatai server.
def pull(bento_tag: str, force: bool) -> None: # type: ignore (not accessed) yatai_client.pull_bento(bento_tag, force=force)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _pull(self) -> None:\n raise NotImplementedError()", "def pull(self):", "def _pull(self) -> None:\n raise NotImplementedError() # pragma: no cover", "def pull_from_postmaster(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.sess...
[ "0.6253494", "0.61232203", "0.59411526", "0.5814772", "0.5765839", "0.5646557", "0.55963373", "0.55957717", "0.5572075", "0.55645216", "0.5557959", "0.5454153", "0.5417521", "0.54132676", "0.5413063", "0.53649527", "0.5361535", "0.5349794", "0.53326946", "0.53325665", "0.5319...
0.72192776
0
Push Bento to a yatai server.
def push(bento_tag: str, force: bool, threads: int) -> None: # type: ignore (not accessed) bento_obj = bento_store.get(bento_tag) if not bento_obj: raise click.ClickException(f"Bento {bento_tag} not found in local store") yatai_client.push_bento(bento_obj, force=force, threads=threa...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _push_to_server(self) -> None:\n pass", "def push(self, *args, **kwargs):\n pass", "def push(self, obj):\n pass", "def remote_push(self, pNamespace):", "def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()", "def _push(self):\n push_cmds = ...
[ "0.71998096", "0.6480765", "0.5861547", "0.5851244", "0.5771112", "0.575511", "0.5752704", "0.57498515", "0.568986", "0.564071", "0.56019354", "0.5566268", "0.5532686", "0.5483675", "0.54428613", "0.542945", "0.5403216", "0.5375344", "0.5329458", "0.53241456", "0.53143793", ...
0.70428616
1
Build a new Bento from current directory.
def build(build_ctx: str, bentofile: str, version: str) -> None: # type: ignore (not accessed) if sys.path[0] != build_ctx: sys.path.insert(0, build_ctx) build_bentofile(bentofile, build_ctx=build_ctx, version=version)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build(root):", "def build():", "def makeProject(self, version, baseDirectory=None):\n if baseDirectory is None:\n baseDirectory = FilePath(self.mktemp())\n baseDirectory.createDirectory()\n segments = version.package.split('.')\n directory = baseDirectory\n ...
[ "0.59951997", "0.5941467", "0.58681095", "0.5856485", "0.57614243", "0.5617354", "0.5611247", "0.5596623", "0.558593", "0.5584332", "0.5543917", "0.5542992", "0.54703754", "0.544997", "0.544899", "0.54458034", "0.5438347", "0.54342854", "0.54192024", "0.5414548", "0.5387722",...
0.7293197
0
Set the value. (And calls the base class) This will also check for Options to set the bools. FAULTS_ACTIVE FAULTS_CURRENT >>> BIT_FAULT_PROBE = 0 >>> BIT_FAULT_OVERTEMP = 1 >>> BIT_FAULT_PANEL_OPEN = 2 >>> BIT_FAULT_HIGH_VOLTAGE = 3 >>> BIT_FAULT_RAM_CRC = 4 >>> BIT_FAULT_EEPROM_CRC = 5 >>> BIT_FAULT_GPIO_ERROR = 6 >>>...
def set_value(self, item, value): super(t_16_Bit_Options, self).set_value(item, value) if(item == t_16_Bit_Options.FAULT_ACTIVE): self.set_bools(value, self.faults_current, t_16_Bit_Options.BIT_FAULT_MAX ) if(item == t_16_Bit_Options.FAULT_LATCHED): self.set_bools(value...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set(self, value): # interface for BlueSky plans\n if str(value).lower() not in (\"fly\", \"taxi\", \"return\"):\n msg = \"value should be either Taxi, Fly, or Return.\"\n msg + \" received \" + str(value)\n raise ValueError(msg)\n\n if self.busy.value:\n ...
[ "0.6006613", "0.59550226", "0.5876505", "0.5848868", "0.58279836", "0.57625586", "0.57241833", "0.57025355", "0.5644544", "0.56344664", "0.5631761", "0.55913144", "0.55671185", "0.551112", "0.5497896", "0.5429743", "0.54183626", "0.53717023", "0.5368946", "0.53565514", "0.534...
0.7903865
0
Set the value. (And calls the base class) This will also check for Options to set the bools. BOOLEAN_CONFIG_1 >>> BIT_PROBE_TERMINATION = 0 >>> BIT_TMODE = 1 >>> BIT_EMODE = 2 >>> BIT_MUTE = 3 >>> BIT_PATTERN_TRIGGER = 4 >>> BIT_DEBUG_REALTIME = 5 >>> BIT_DEBUGPRINT = 6 >>> BIT_DEBUG_HW_OVERRIDE = 7
def set_value(self, item, value): super(t_8_Bit_Options, self).set_value(item, value) if(item == t_8_Bit_Options.BOOLEAN_CONFIG_1): self.set_bools(value, self.bools, t_8_Bit_Options.BIT_MAX)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setbool(self, strcommand, value):\n command = ct.c_wchar_p(strcommand)\n value = ct.c_bool(value)\n self.lib.AT_SetBool(self.AT_H, command, value)", "def setBoolean(self, key, value):\n self.__config.setValue(key, QtCore.QVariant(value))\n self.__saved = False", "def Set(...
[ "0.69975346", "0.66105807", "0.6431443", "0.641337", "0.63824916", "0.63277537", "0.6307116", "0.6288704", "0.6275827", "0.6273386", "0.62682843", "0.6259129", "0.6229773", "0.6222815", "0.6199952", "0.615967", "0.6145829", "0.61051655", "0.6090899", "0.6050174", "0.6049057",...
0.7702449
0
Builds a command packet
def build_command_packet(self, command): packet = bytearray() # All option fields are 0 packet.append(0) packet.append(0) packet.append(0) packet.append(command) return packet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardwar...
[ "0.7055916", "0.6958671", "0.6834779", "0.6826811", "0.66905415", "0.6657784", "0.66384405", "0.65551907", "0.64848095", "0.6423199", "0.641272", "0.6404379", "0.63837487", "0.6381479", "0.6360029", "0.6312353", "0.62964827", "0.62884474", "0.6237116", "0.61567235", "0.614946...
0.8249946
0
This will get the current faults on the system.
def get_faults_current(self): request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16) return self.__get_faults_list(self.config_16.faults_current)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def faults(self):\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command = pack('B',code)\n reply = self.query(command,count=2)\n faults = \" \"\n # The reply is 0xC8 followed by a faults status byte.\n if len(reply) != 2:\n if len(reply)>0...
[ "0.7297142", "0.6973599", "0.62839913", "0.6233432", "0.6117404", "0.5914989", "0.585911", "0.5828832", "0.56888837", "0.5640751", "0.5596669", "0.55570495", "0.5398989", "0.5372518", "0.5344608", "0.5326614", "0.52823734", "0.525599", "0.5215235", "0.5206905", "0.51792276", ...
0.82058364
0
This will get the latched faults on the system.
def get_faults_latched(self): request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_LATCHED], BP_TOOL.REQUEST_16) return self.__get_faults_list(self.config_16.faults_latched)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_faults_current(self):\n request = self.get_option_from_shouter([t_16_Bit_Options.FAULT_ACTIVE], BP_TOOL.REQUEST_16)\n return self.__get_faults_list(self.config_16.faults_current)", "def faults(self):\n debug(\"Getting faults...\")\n code = int(\"01001000\",2)\n command ...
[ "0.6717997", "0.61814696", "0.5997935", "0.5937447", "0.5786711", "0.5544165", "0.5489587", "0.54597056", "0.53924805", "0.53853124", "0.53460693", "0.53460693", "0.5332667", "0.5330461", "0.5298619", "0.5260537", "0.5236995", "0.5182294", "0.5093936", "0.50649893", "0.504098...
0.807573
0
Gets the pattern wave pat_wave 101011110011 .... >>> Request >>> 0> >>> Pattern Wave [More to follow] >>> >> Request Next block >>> 0> >>> Pattern Wave [More to follow] >>> >> >>> ..... >>> >>> Request Next block >>> 0> >>> Pattern Wave [No More to follow] >>> <)
def __request_pat_wave(self, r_number): packet = bytearray() packet.append(0) # 16 bit options packet.append(0) # 8 bit options packet.append(1) # Request the 1 option # --------------------------------------------------------------------- # Request the variable length ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wave(self):\n return self._wave", "def waveband(self):\n return self.get(\"waveband\")", "def waveband(self):\n return self.get(\"waveband\", default=\"\", decode=True).split(\"#\")", "def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n excep...
[ "0.5981788", "0.5724615", "0.56906223", "0.5673661", "0.56648856", "0.544376", "0.54234815", "0.5388818", "0.5373861", "0.53209776", "0.53193724", "0.5285998", "0.52687967", "0.5267398", "0.51937246", "0.5191461", "0.5150314", "0.5148032", "0.51321924", "0.5121972", "0.512197...
0.7454566
0
The resource ID of the Network Fabric l3IsolationDomain.
def l3_isolation_domain_id(self) -> pulumi.Input[str]: return pulumi.get(self, "l3_isolation_domain_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l3_isolation_domain_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -...
[ "0.82438326", "0.639748", "0.639748", "0.639748", "0.6284943", "0.6265004", "0.62123185", "0.617114", "0.617114", "0.617114", "0.617114", "0.617114", "0.6158674", "0.60937095", "0.60937095", "0.6091811", "0.6091811", "0.6091811", "0.60563904", "0.60457486", "0.5990473", "0....
0.8125011
1
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
def interface_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "interface_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interface_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"interface_name\")", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")", "def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \...
[ "0.67701626", "0.6730182", "0.6579234", "0.65378875", "0.6346248", "0.6293956", "0.6253151", "0.61782867", "0.61560553", "0.61084664", "0.61054385", "0.605106", "0.59554195", "0.59140545", "0.5869591", "0.58683455", "0.58659315", "0.5836901", "0.5836809", "0.5832016", "0.5827...
0.684074
0
The name of the L3 network.
def l3_network_name(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "l3_network_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network(self) -> str:\n return pulumi.get(self, \"network\")", "def computer_network_name(self) -> str:\n return self._computer_network_name", "def network_name(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"network_name\"), kwargs)", "def name(self) -> str:...
[ "0.6955331", "0.6824133", "0.6799091", "0.6677395", "0.66031", "0.6491807", "0.6462104", "0.64592564", "0.64398986", "0.63908404", "0.63872564", "0.638713", "0.63722634", "0.6359511", "0.6331267", "0.63089246", "0.62042356", "0.6186131", "0.61773044", "0.61605525", "0.6082077...
0.88098806
0
Get an existing L3Network resource's state with the given name, id, and optional extra properties used to qualify the lookup.
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'L3Network': opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = L3NetworkArgs.__new__(L3NetworkArgs) __props__.__dict__["associated_res...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)", "def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200", "def get_state_...
[ "0.5696972", "0.5616301", "0.559337", "0.55891603", "0.55436486", "0.5497109", "0.54890585", "0.5482085", "0.5455767", "0.5444336", "0.54149556", "0.53990793", "0.53760177", "0.53489095", "0.534057", "0.5307886", "0.5290895", "0.529065", "0.5247792", "0.5244457", "0.5226916",...
0.6454363
0
The list of resource IDs for the other Microsoft.NetworkCloud resources that have attached this network.
def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]: return pulumi.get(self, "associated_resource_ids")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def network_ids(self):\n return self._network_ids", "def otherResources(self):\n return self._get_list_field(\"otherResources\")", "def resource_names(self):\n return self._resource_names", "def get_ids(self):\n all_networks = []\n network_dict = {}\n for network, st...
[ "0.69101155", "0.6573186", "0.6401023", "0.6343839", "0.6258195", "0.5978847", "0.5978847", "0.5978847", "0.5819962", "0.5801385", "0.5795712", "0.5795712", "0.57860464", "0.57594633", "0.5741592", "0.5688209", "0.5666085", "0.5643691", "0.56345814", "0.5630704", "0.5611117",...
0.70806557
1
The extended location of the cluster associated with the resource.
def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']: return pulumi.get(self, "extended_location")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:\n return pulumi.get(self, \"extended_location\")", "def extended_location(self) -> pulumi.Input['ExtendedLocationArgs']:\n return pulumi.get(self, \"extended_location\")", "def extended_location(self) -> p...
[ "0.6552111", "0.6507004", "0.6507004", "0.6427688", "0.64267975", "0.64260936", "0.64260936", "0.63151413", "0.6296761", "0.6123441", "0.6123441", "0.595561", "0.58933264", "0.5883953", "0.5883953", "0.5883953", "0.5883953", "0.5883953", "0.5883953", "0.5883953", "0.5883953",...
0.67308724
1
The default interface name for this L3 network in the virtual machine. This name can be overridden by the name supplied in the network attachment configuration of that virtual machine.
def interface_name(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "interface_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interface_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"interface_name\")", "def l3_network_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"l3_network_name\")", "def interface_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"...
[ "0.6839024", "0.67300063", "0.6576439", "0.6536506", "0.63453054", "0.62927467", "0.6252902", "0.6176393", "0.6155957", "0.6108241", "0.61039215", "0.60497546", "0.5955052", "0.5913382", "0.5868023", "0.5867791", "0.586413", "0.58365524", "0.583649", "0.58314383", "0.58273387...
0.67675763
1
The type of the IP address allocation, defaulted to "DualStack".
def ip_allocation_type(self) -> pulumi.Output[Optional[str]]: return pulumi.get(self, "ip_allocation_type")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_address_type(self):\n return self.__address_type", "def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))", "def address_type(self) -> str:\n ...
[ "0.6558105", "0.6343783", "0.62579095", "0.6204035", "0.61733663", "0.60764885", "0.60489833", "0.6027968", "0.590761", "0.5870783", "0.57298845", "0.5710156", "0.569875", "0.5694414", "0.5689083", "0.5678903", "0.5581792", "0.5554217", "0.5541468", "0.55387825", "0.5525745",...
0.66921926
0
The resource ID of the Network Fabric l3IsolationDomain.
def l3_isolation_domain_id(self) -> pulumi.Output[str]: return pulumi.get(self, "l3_isolation_domain_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l3_isolation_domain_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"l3_isolation_domain_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) ->...
[ "0.8125011", "0.639748", "0.639748", "0.639748", "0.6284943", "0.6265004", "0.62123185", "0.617114", "0.617114", "0.617114", "0.617114", "0.617114", "0.6158674", "0.60937095", "0.60937095", "0.6091811", "0.6091811", "0.6091811", "0.60563904", "0.60457486", "0.5990473", "0.5...
0.82438326
0
join the input string
def my_join(iters, string): out = "" for i in range(iters): out += "," + string return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def join(self, iterable) -> String:\n pass", "def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += \", \" + string\n return out", "def join_strings(words):\n joined_string = ''\n for word in words:\n joined_string += word\n\n return joined_string", "def ...
[ "0.73991024", "0.732438", "0.7231096", "0.72088933", "0.7069224", "0.6920054", "0.68781954", "0.67399055", "0.66996497", "0.6629545", "0.6618605", "0.65867525", "0.658431", "0.6571246", "0.653331", "0.65244675", "0.6523184", "0.6489013", "0.6483073", "0.6398085", "0.6364346",...
0.73344773
1
Adds the keys 'logits' and 'probs' to the end points dictionary of ResNet50v2.
def _get_updated_endpoints(original_end_points, name): end_points = dict(original_end_points) end_points['logits'] = tf.squeeze(end_points[name], [1, 2]) end_points['probs'] = tf.nn.softmax(end_points['logits']) return end_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2", "def init_output_dict(se...
[ "0.52572966", "0.52147967", "0.5190277", "0.5172461", "0.5160112", "0.5107622", "0.5044875", "0.502481", "0.49618483", "0.49577186", "0.49346328", "0.4871828", "0.47935718", "0.47819144", "0.47578776", "0.4739234", "0.47227845", "0.46987733", "0.46696952", "0.46637616", "0.46...
0.56161755
0
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n ...
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.672785...
0.77680707
1
Load weights from a checkpoint file into the tensorflow graph.
def load_weights(self, checkpoint_path, sess=None): if sess is None: sess = tf.get_default_session() assert sess is not None saver = tf.train.Saver(self.variables_to_restore) saver.restore(sess, checkpoint_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model_weights(sess, checkpoint_dir):\n\n init_fn = slim.assign_from_checkpoint_fn(\n checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)\n init_fn(sess)", "def load_weights_from_checkpoint(self, path: str, key: str):\n ckpt = torch.load(path, map_location='cpu')\n ...
[ "0.76077384", "0.74392045", "0.74392045", "0.7414661", "0.7232018", "0.71317524", "0.7071087", "0.70339084", "0.700918", "0.6977498", "0.6935631", "0.691333", "0.68927085", "0.68749905", "0.68339276", "0.6816416", "0.6816042", "0.67910594", "0.6769799", "0.6769098", "0.672785...
0.77680707
0
Warn about unused static variables.
def _find_unused_static_warnings(filename, lines, ast_list): static_declarations = { node.name: node for node in ast_list if (isinstance(node, ast.VariableDeclaration) and 'static' in node.type.modifiers) } def find_variables_use(body): for child in body: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unusedVars(self):\n fullcode = self.code_cfg\n variables = set([x[1:] for x in codeconfig_getvars(fullcode)])\n exceptions = set(['complexity', 'code_cfg'])\n clsvars = set(vars(self).keys())\n nones = set(filter(lambda x: self.__dict__[x] is None, clsvars))\n nones = ...
[ "0.6999553", "0.684138", "0.61850595", "0.6036887", "0.58252496", "0.57561684", "0.5704306", "0.56855494", "0.56610996", "0.56531423", "0.5616008", "0.55835503", "0.5551007", "0.55266124", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.5507778", "0.550777...
0.7391628
0
Return the parsed contents of the config file.
def get_config(): return json.loads(CONFIG_FILE.read_text())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_config(self):\n config = configparser.ConfigParser()\n config.read(self.configfile)\n return config", "def get(self):\n config = self.user_file.parseString(self.content)\n return config", "def get(self):\n if self.file:\n self._read()\n confi...
[ "0.77496666", "0.7607564", "0.75940424", "0.75590175", "0.7350562", "0.7329519", "0.7322378", "0.73064345", "0.7271919", "0.72459453", "0.71848226", "0.7173341", "0.71679926", "0.7166633", "0.71516", "0.7046862", "0.7030489", "0.70298284", "0.6951747", "0.693856", "0.6933751"...
0.76075804
1
seed users. by defualt set to 5 users
def seed_User(number=5, overwrite=False): if overwrite: print('Overwriting all users') User.objects.all().delete() count = 0 for i in range(number): username = fake.first_name() User.objects.create_user( email=username + "@blogmail.com", password="vns...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_users(count=10):\n for i in range(count):\n user = generate_random_user()\n db.session.add(user)\n db.session.commit()", "def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]", "def create_users(N):\n for _ in range(N):\n name = fake.name()\...
[ "0.76051784", "0.73858875", "0.7105484", "0.7039456", "0.7026748", "0.70025635", "0.69474685", "0.69027597", "0.68793637", "0.6835339", "0.6813174", "0.6786388", "0.6739348", "0.6732933", "0.6717712", "0.66460043", "0.66320664", "0.6586459", "0.6585731", "0.65754217", "0.6552...
0.7705716
0
set_score increments the score by change can be negative
def set_score(self, change): self._score = self._score + change
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_score(self,score):\n self._score = score", "def set_score(self, score):\n self._score = score", "def update_score():\n pass", "def set_score(self, a, b, score):\n ### FILL IN ###", "def set_score(self, score):\n # Update the score display\n self.score = sco...
[ "0.82878745", "0.8127291", "0.81113905", "0.8077499", "0.7966757", "0.796316", "0.796316", "0.796316", "0.79581094", "0.78421885", "0.7841102", "0.7821862", "0.78135276", "0.77125627", "0.7695137", "0.76055825", "0.75447255", "0.75010866", "0.7482682", "0.7436947", "0.7427076...
0.830666
0
move_ray this is the primary function which is responsible for recursively moving a ray. Although it primarily look after the action of the Ray.Ray class it lives in the Game instance itself. THIS IS HOW WE DETERMINE THE EXIT POINT OF ALL RAYS HORIZONTAL, VERTICAL, OR WITH DETOURS
def move_ray(self, ray): # look to the next spot in the ray's trajectory next_coordinates = ray.get_next_location() next_location = self._board.get_board_square(next_coordinates) # check for a collisition - return if it occurs if ray.check_for_collision(next_location): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entra...
[ "0.6431586", "0.6042099", "0.5938072", "0.5936508", "0.592946", "0.58522797", "0.56683385", "0.5636461", "0.5598364", "0.55645293", "0.55484056", "0.5496541", "0.54507935", "0.543731", "0.5407489", "0.5379115", "0.53595096", "0.53525144", "0.534523", "0.5339988", "0.5306413",...
0.71545416
0
shoot_ray shoots a ray from a given row and column if possible
def shoot_ray(self, origin_row, origin_column): # get the the square object at row x column origin = self._board.get_board_square((origin_row, origin_column)) # check that it is a valid "edge" to send a ray from origin_check = origin.is_edge() # if it's not then return false ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shoot_ray(self, row, col):\n # Uses validate method to check if row,col are legal for ray entrance location\n if not self.valid_ray(row, col):\n return False\n # creates ray object from row, col integers\n ray = Ray(row, col)\n # checks if atom is in front of entra...
[ "0.78674567", "0.6780156", "0.67677057", "0.61004895", "0.5816195", "0.5722863", "0.5721122", "0.56699514", "0.566448", "0.5559462", "0.5555234", "0.55531144", "0.55380315", "0.55212104", "0.55208117", "0.5480533", "0.54755425", "0.5391073", "0.53854626", "0.5371789", "0.5366...
0.748143
1
atoms_left returns the number of unguessed atoms still left
def atoms_left(self): return len(self._atoms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def atoms_left(self):\n return self._atoms", "def atoms_left(self):\r\n return self._board.get_atoms()", "def num_pieces_left(self):\n return self.num_white_pieces + self.num_black_pieces", "def count_mass_left(self):\n self.mass_left_count = int(np.sum(self.array))", "def numbe...
[ "0.7521378", "0.74469197", "0.6469958", "0.6355561", "0.6071514", "0.6057757", "0.6004638", "0.59833574", "0.59616107", "0.5921324", "0.5888561", "0.5864343", "0.5864343", "0.5818085", "0.57945406", "0.57681686", "0.5768063", "0.5694106", "0.56850934", "0.56428653", "0.559667...
0.84319353
0
Test GRU gnmt encoder. time_major=True
def runGRUEncoder(self, encoder, num_layers): inputs_ph = tf.placeholder( dtype=tf.float32, shape=(None, common_utils.TIME_STEPS, common_utils.DEPTH)) inputs_length_ph = tf.placeholder(dtype=tf.int32, shape=(None)) outputs, states = encoder.encode( mode=tf.estimator.ModeKeys.TRAIN, ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_golay_module1(self):\r\n sent = golay.encode([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])\r\n rec = sent[:-1] + 'C' # possible error here\r\n decoded, errors = golay.decode(rec)\r\n self.assertEqual(decoded, sent)\r\n self.assertLess(errors, 1.5)\r\n rec = sent[:-1] + '...
[ "0.53550947", "0.5301906", "0.5206506", "0.49940667", "0.49887952", "0.49855092", "0.49552405", "0.48806253", "0.4856835", "0.48504332", "0.48328564", "0.48092747", "0.48080942", "0.477451", "0.475015", "0.47448063", "0.47433698", "0.47394577", "0.47322595", "0.47050416", "0....
0.5863373
0
Create a CourseGraph, fetching unitary weights and edge weights from database, creating CourseNodes for each course, and
def __init__(self, database, session, max_suggestions=5, max_courses=30, cache_mult=4): self._nodes = dict() # dict with courseid keys, CourseNode vals self._max_suggestions = max_suggestions self._max_courses = max_courses self._cache_mult = cache_mult db = database ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _CreateGraph(self):\n self.nodes = []\n self.edges = []\n for i, r in self.airports.set_index('airport_id').iterrows():\n self.nodes.append((i,r.to_dict()))\n for i, r in self.routes.set_index(['src_id','dst_id']).iterrows():\n self.edges.append((i[0],i[1],r.to...
[ "0.610946", "0.6068693", "0.5970327", "0.59139115", "0.5910554", "0.59065074", "0.5842171", "0.58402723", "0.5831642", "0.5821705", "0.58180076", "0.5813589", "0.5796841", "0.57905227", "0.5715409", "0.5708784", "0.56940794", "0.5655929", "0.5635845", "0.5629975", "0.56149757...
0.7083467
0
Gets the crosslistings of the top edges from a course
def getTopEdgesFrom(self, session, courseid): node = self.getNode(courseid) # get CourseNode if not node: return [] edges = node.getEdges() # get its Edge dict return sorted(edges.keys(), key=lambda k: edges[k], reverse=True)[:5]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assembly_courses(wall):\n courses = []\n vertices = set(wall.nodes())\n base = set(wall.nodes_where({'is_support': True}))\n\n if base:\n courses.append(list(base))\n\n seen = set()\n seen.update(base)\n\n vertices -= base\n\n while vertices:\n nbrs = s...
[ "0.59826374", "0.55603427", "0.5523582", "0.5495732", "0.5461872", "0.54479295", "0.53818995", "0.5318768", "0.52677137", "0.5246417", "0.520737", "0.5162896", "0.5158995", "0.51440036", "0.51215637", "0.5089466", "0.50795156", "0.50770104", "0.5055389", "0.5054957", "0.50231...
0.72403175
0
Initializes turtle instance for turtle game.
def initialize(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_instance = turtle.Turtle() turtle_instance.shape(turtle_shape) turtle.bgcolor(bg_color) turtle_instance.color(turtle_color) turtle_instance.speed(turtle_speed) return turtle_instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_turtle():\n turtle.up()\n turtle.home()", "def __init__(self, commands=[], turtle_name=\"Terry\", speed=6, shape=\"classic\"):\n super().__init__()\n turtle.colormode(255)\n self._name = turtle_name\n super().speed(speed)\n super().shape(shape)\n self.comm...
[ "0.81658715", "0.7127116", "0.704404", "0.6922751", "0.6751538", "0.66928184", "0.6515024", "0.64465225", "0.6263604", "0.62292695", "0.6227585", "0.61615515", "0.58694047", "0.58318806", "0.580501", "0.575872", "0.57573175", "0.57200843", "0.5657598", "0.55897486", "0.555192...
0.8317453
0
Defines the turtle movement for the initialized turtle instance and executes that movement.
def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed): turtle_name = initialize(turtle_shape, bg_color, turtle_color, turtle_speed) for i in range(36): for i in range(4): turtle_name.forward(200) turtle_name.right(90) turtle...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_turtle(self):\n self.forward(self.move_speed)", "def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())", "def init_turtle():\n turtle.up()\n turtle.home()", "def move():\n Robot.move()", "def movement(self):", "def move(self):\r\n segments = len(self.all_turt...
[ "0.7389288", "0.6786604", "0.6750962", "0.6614437", "0.6599587", "0.64865434", "0.6407805", "0.64050645", "0.6318544", "0.6280543", "0.6203746", "0.61156297", "0.6057648", "0.6023432", "0.60230684", "0.600326", "0.59755194", "0.5961362", "0.59582186", "0.59399325", "0.5905899...
0.69048256
1
Saves summary statistics as a csv file in the current directory and returns the output filename.
def save_summary_statistics_csv( experiment_name, roi_summary_data, save_directory_path: str = "" ): # Create directories on the path if they don't already exist Path(save_directory_path).mkdir(parents=True, exist_ok=True) csv_filename = f"{experiment_name} - summary statistics (generated {iso_datetime...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint_stats(self, stats):\n stats.to_csv(\n self.params.stat.dir + self.params.model.name + \"_\" + self.params.data.name + \".stat\",\n sep='\\t',index=False,header=True)", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base...
[ "0.67156446", "0.6640688", "0.65309376", "0.65059", "0.6463663", "0.6374038", "0.6361525", "0.632994", "0.63163084", "0.62892014", "0.6272734", "0.62425643", "0.62299234", "0.62213546", "0.6216482", "0.6173342", "0.6157177", "0.6122196", "0.61156815", "0.60866743", "0.6001306...
0.78815484
0
stack pandas DataFrames logically into a bigger DataFrame, resets the index of the resulting DataFrame to avoid duplicates in the index
def _stack_dataframes(dataframes: List[pd.DataFrame]) -> pd.DataFrame: return pd.concat(dataframes).reset_index(drop=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reset_index(self):\n\n # reminder on multi index in columns\n df1 = pd.DataFrame([[1, 3], [2, 4], [11, 33], [22, 44]]).T\n df1.index = pd.Series([1, 2], name=\"idx1\")\n df1.columns = pd.MultiIndex.from_product([['a', 'b'], ['aa', 'bb']], names=['idx_c', 'idx2'])\n\n # s...
[ "0.5980725", "0.5887287", "0.5851793", "0.5812862", "0.5757257", "0.57415146", "0.57319605", "0.5689936", "0.56405115", "0.56170523", "0.56154037", "0.5591578", "0.5586357", "0.5564776", "0.55478466", "0.554424", "0.5535979", "0.54988015", "0.5480869", "0.54719794", "0.545409...
0.66242844
0
stack pandas Series logically into a DataFrame
def _stack_serieses(serieses: List[pd.Series]) -> pd.DataFrame: return pd.concat(serieses, axis="columns").T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0...
[ "0.6693489", "0.64312357", "0.6289627", "0.61618704", "0.59844136", "0.59790695", "0.5950793", "0.5896647", "0.58869344", "0.58781815", "0.577991", "0.5733921", "0.56654763", "0.566396", "0.56531364", "0.56504464", "0.5612756", "0.56065685", "0.5595002", "0.5558159", "0.55369...
0.76580703
0
Load instruments from configpath
def _load(self) -> list[Instrument]: logger.info("Loading config...") self._config = yml.load(self.configpath) instruments, modespec = self._config["instruments"], self._config["modes"] logger.success(f"Found {len(instruments)} instruments, {len(modespec)} modes")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_load_configs_simulation(self):\n global locator, config_paths\n locator.load_config(config_paths[1])\n\n self.assertEqual(locator.config['routines'], ['simulate'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'SimDriver',\n 'kwargs': {...
[ "0.63088006", "0.62734544", "0.60392916", "0.59869903", "0.58425516", "0.5792564", "0.57058764", "0.56768847", "0.56603354", "0.5657189", "0.56550276", "0.56537586", "0.5621684", "0.5609595", "0.5587608", "0.5586826", "0.55499035", "0.5522914", "0.5517707", "0.5516953", "0.54...
0.7140976
0
Expose unique instrument classes found in config
def _expose(self) -> None: classes = {instrument.__class__ for instrument in self._config["instruments"]} for class_ in classes: pyro.expose(class_) logger.success(f"Exposed {len(classes)} instrument class(es): {classes}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _config_classes(self):\n pass", "def config(self) -> InstrumentConfig:\n ...", "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def _instrument_class(self, cls):\n newcls = type('Instr...
[ "0.6167917", "0.6165286", "0.6108895", "0.5712228", "0.56472594", "0.5590002", "0.54058063", "0.5276742", "0.5238212", "0.517786", "0.5136025", "0.5066814", "0.5047145", "0.5043953", "0.5038126", "0.50306547", "0.502609", "0.5012202", "0.5012202", "0.50089884", "0.49899292", ...
0.66089696
0
Register instrument instances and self with daemon and storing uris
def _serve(self) -> None: for instrument in self._config["instruments"]: uri = self._daemon.register(instrument, objectId=str(instrument)) self._services[instrument.id] = str(uri) logger.success(f"Registered {instrument} at {uri}") self.uri = self._daemon.register(sel...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_instrument(instrument):\n config.INSTRUMENTS.append(instrument.id)\n for m in instrument.modules:\n register_module(m)", "def register():\n signals.initialized.connect(initialize)\n signals.article_generator_context.connect(add_libravatar)", "def on_register(cls):", "def _regi...
[ "0.61770165", "0.586058", "0.5787275", "0.57755834", "0.5748999", "0.5597987", "0.5547358", "0.5466158", "0.5462284", "0.5456908", "0.5437423", "0.54095894", "0.5405339", "0.5405339", "0.53873736", "0.53591174", "0.5346121", "0.5344868", "0.5339623", "0.53348446", "0.53146374...
0.78891295
0
Disconnect instruments and shutdown daemon
def shutdown(self) -> None: logger.info("Disconnecting instruments...") for instrument in self._config["instruments"]: instrument.disconnect() logger.info(f"Shutting down {self}...") self._daemon.shutdown()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False", "def shutdown(self):", "def stopAndDisconnectWalabot():\n wlbt.Stop()\n wlbt.Disconnect()\n print ('Termination successful')", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "d...
[ "0.67708665", "0.6769665", "0.6741867", "0.66888916", "0.66888916", "0.6571312", "0.65665215", "0.6500033", "0.64636064", "0.64133114", "0.6387295", "0.6361316", "0.63584465", "0.6352385", "0.6346766", "0.6346196", "0.6339963", "0.63314235", "0.63314235", "0.63314235", "0.631...
0.7881212
0
python ~/code/xdoctest/testing/test_linenos.py test_lineno_failcase_called_code python ~/code/xdoctest/testing/test_linenos.py
def test_lineno_failcase_called_code(): text = _run_case(utils.codeblock( r''' def func(a): """ Example: >>> func(0) >>> # this doesnt do anything >>> print('this passes') this passes >>> # call t...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_error(doctest):", "def test_expt(doctest):", "def test_exp(doctest):", "def testit(did_pass):\n\n # This function works correctly--it is verbatim from the text, chapter 6\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Te...
[ "0.704092", "0.70044655", "0.6639948", "0.6638953", "0.65686834", "0.65359443", "0.6508109", "0.6495619", "0.643054", "0.6405408", "0.63980496", "0.63929", "0.6392692", "0.63884795", "0.63859123", "0.63859123", "0.6350053", "0.6331821", "0.6329094", "0.6329094", "0.63262904",...
0.7327849
0
Add to the list of describing adjectives.
def add_adjectives(self, adjective): self.adjectives += [adjective]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_adjectives(self, *sAdjs):\n self.adjectives += list(sAdjs)", "def add(self):\n pass", "def add_many_descriptors(self, descriptors):", "def add(self, PlugLead):\n\n self.check_conflicts(PlugLead)\n self.plugleads.append(PlugLead)", "def add_disease(self, disease):\n ...
[ "0.7263275", "0.62598264", "0.5947216", "0.57094675", "0.5695133", "0.5606961", "0.56066847", "0.5586834", "0.5575344", "0.5548141", "0.5520845", "0.5520845", "0.55087703", "0.5452977", "0.5440875", "0.53886217", "0.5388016", "0.5352896", "0.5326072", "0.5325428", "0.5324635"...
0.7504388
0
Returns the list of describing adjectives. The list is shuffled first because generally this is used to get a random adjective.
def get_adjectives(self): random.shuffle(self.adjectives) return self.adjectives
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_adjectives(lyrics):\n doc = nlp(lyrics.lower())\n all_adjectives = [token.lemma_ for token in doc if token.pos_ == \"ADJ\"]\n return all_adjectives", "def succ(self):\n return [ self.simple_reflection(i) for i in self.descents(positive=True) ]", "def getAdjectives(self, word):\n\t\t...
[ "0.605831", "0.5822134", "0.57419574", "0.573242", "0.55850464", "0.5502515", "0.5492401", "0.5489824", "0.5483387", "0.5447286", "0.54260534", "0.5316058", "0.52798134", "0.5279397", "0.5267892", "0.5265623", "0.525828", "0.52477735", "0.5244685", "0.5200043", "0.51937425", ...
0.81158966
0
Returns the noun, including all its describing adjectives, as a string.
def full_string(self): return "{}: {}".format(str(self.word), " ".join([str(adj) for adj in self.adjectives]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def en_noun(t, label):\r\n s = label\r\n p = []\r\n is_uncountable = False\r\n \r\n # http://en.wiktionary.org/wiki/Template:en-noun\r\n head = t.arg(\"head\", label)\r\n p1 = t.arg(0)\r\n p2 = t.arg(1)\r\n \r\n if p1 == \"-\":\r\n # uncountable\r\n is_uncountable = True...
[ "0.6249946", "0.6021164", "0.600624", "0.5979195", "0.59327227", "0.58711636", "0.57597136", "0.5741161", "0.57387596", "0.57079136", "0.56950766", "0.5683717", "0.56733876", "0.5652428", "0.5563646", "0.55163616", "0.5513116", "0.5472722", "0.5459045", "0.5425532", "0.542323...
0.69946307
0
Parse a noun object from a data file containing nouns and their describing adjectives.
def parse(text): parts = text.split(' ') noun = Noun(parts[0], int(parts[1])) parts = parts[2:] while len(parts) > 0: noun.add_adjectives(Word(parts[0], int(parts[1]))) parts = parts[2:] return noun
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n ...
[ "0.5926569", "0.56615496", "0.5575726", "0.5492108", "0.54724497", "0.54638004", "0.5449956", "0.5432941", "0.5421955", "0.53711075", "0.53692234", "0.535761", "0.53158194", "0.5268704", "0.52373093", "0.5237189", "0.5234773", "0.5220252", "0.52158135", "0.5205877", "0.520256...
0.69166636
0
Returns the self.guessed_by and self.metaphors_used data as a readable string.
def get_str_metadata(self): return "\n".join(["Guessed by {}".format(self.guessed_by), "{} metaphors used".format(self.metaphors_used)])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_strings(self):\n return self._guessed_strings", "def __str__(self):\n d = {}\n d[\"tuner_number\"] = self.tuner_number\n d[\"output_format\"] = self.output_format\n d[\"output_source\"] = self.output_source\n return str(d)"...
[ "0.66056585", "0.6369337", "0.63330597", "0.63225114", "0.62778735", "0.62415534", "0.6170479", "0.6140569", "0.60968494", "0.60799277", "0.60438675", "0.60398", "0.60284495", "0.6009198", "0.59779775", "0.59726894", "0.597072", "0.59602815", "0.594238", "0.5917474", "0.58926...
0.80824745
0
Quick plot of a `tick.base.TimeFunction`
def plot_timefunction(time_function, labels=None, n_points=300, show=True, ax=None): if ax is None: fig, ax = plt.subplots(1, 1, figsize=(4, 4)) else: show = False if time_function.is_constant: if labels is None: labels = ['value = %.3g' % time_func...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(...
[ "0.6426607", "0.62802076", "0.62569886", "0.62506014", "0.6236783", "0.6092056", "0.6068509", "0.60091037", "0.60037977", "0.60024494", "0.6001193", "0.59628236", "0.59590256", "0.5951325", "0.59470856", "0.59453624", "0.5932996", "0.5908867", "0.5891108", "0.5865043", "0.586...
0.68075925
0
Generates mapping from water measurements column names to indices of the given header.
def get_water_index_map(archive, header): column_re = { 'surface': { 'flow': 'pretok', 'level': 'vodostaj' }, 'ground': { 'altitude': 'nivo', 'level': 'vodostaj' } } column_map = {key: -1 for key in column_re[archive].keys()} ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def colu...
[ "0.60388774", "0.5985021", "0.5983487", "0.5812272", "0.573689", "0.57180965", "0.5631869", "0.55975854", "0.55681896", "0.5565681", "0.554784", "0.5505632", "0.5441756", "0.5394642", "0.53900725", "0.53899986", "0.5387639", "0.537637", "0.5363915", "0.53568316", "0.53414994"...
0.6957255
0
Generates mapping from water measurements column names to values of the given CSV row.
def get_water_value_map(row, column_names_map): column_values_map = column_names_map.copy() row_length = len(row) empty = True for key, index in column_names_map.items(): # Check if non-empty value exist for given index. if -1 < index < row_length: value = row[index].strip()...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __get_column_mapping(self):\n\n s1 = list(Sample(\"FL\", \"M1.0@265_Primary_ar115_s2010-08-06T06_36_00_e2010-08-06T18_24_00.csv\").get_data().columns)[:25]\n column_mapping = {}\n for i in range(len(s1)):\n column_mapping[i] = s1[i]\n\n return column_mapping", "def crea...
[ "0.62421864", "0.57918304", "0.5663072", "0.5657489", "0.56149256", "0.5612935", "0.5567542", "0.5544542", "0.5500382", "0.5480754", "0.545259", "0.544672", "0.5385878", "0.53811884", "0.5380167", "0.5332743", "0.53060913", "0.5292364", "0.527309", "0.52203315", "0.5217865", ...
0.62722737
0
Populate water measurements table for selected `archive`, `directory` and `stations`.
def populate_water_measurements(cursor, archive, directory, station): csv_path = get_data_path( 'water', 'raw', archive, directory, f'{station}.csv' ) with open(csv_path, 'r', encoding='utf-8') as file: reader = csv.reader(file, delimiter=';') header ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')...
[ "0.69976664", "0.58139586", "0.5748849", "0.5552527", "0.5550863", "0.5530793", "0.55279726", "0.5518004", "0.5421447", "0.54047465", "0.5358192", "0.53122264", "0.52683693", "0.5263279", "0.52280056", "0.5153076", "0.51038533", "0.51007193", "0.507282", "0.50530833", "0.5048...
0.7368926
0
Populate watercourse and aquifer related data tables.
def populate_water_tables(connection): metadata = load_metadata('water') cursor = connection.cursor() # Check if tables are already populated. cursor.execute('SELECT count(*) FROM watercourses') watercourse_count = cursor.fetchone()[0] cursor.execute('SELECT count(*) FROM aquifers') aquifer...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "def populate_tables(connection: sqlite3.Connection) -> None:\n fake = Faker()\n Faker.seed(0)\n\n c = conn.cursor()\n\n number_of_courses = fake.pyint(min_valu...
[ "0.69008756", "0.66442764", "0.6618634", "0.65720314", "0.6367745", "0.63610566", "0.61599475", "0.61476094", "0.60915166", "0.60889375", "0.60289264", "0.59944206", "0.59331673", "0.5864972", "0.582192", "0.5799883", "0.5796334", "0.5792097", "0.5777224", "0.5760808", "0.574...
0.71547
0
Populate locations data table.
def populate_locations(connection): print('Populating locations...') cursor = connection.cursor() with open(get_data_path('locations', 'locations.json'), 'r', encoding='utf-8') as json_file: locations = json.load(json_file) for station_id, location in locations.items(): cursor.execute(f...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_position_data(self):\n # populate 'Location' field randomly\n self.output['Location'] = np.random.choice(self.locations, self.obs)\n\n # clean up geodata data frame and create 'Position' attribute\n nc = self.geodata[['Lat', 'Lng', 'Elevation']].round(2)\n nc['Elevat...
[ "0.6048028", "0.6013519", "0.59421986", "0.5901815", "0.58811194", "0.5729807", "0.5710549", "0.56738675", "0.56535035", "0.554302", "0.5542974", "0.55208075", "0.5469795", "0.54697716", "0.5454492", "0.54353935", "0.5430382", "0.542676", "0.5424422", "0.5422587", "0.54031056...
0.6565873
0
Check if given forecast dictionary contains a numeric value with provided key.
def is_forecast_number(key, forecast): return key in forecast and type(forecast[key]) in [float, int]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def moreThanOne(dict, key):\n\treturn key in dict and dict[key] > 0", "def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())", "def contains_200(dictnr):\n contains = False\n for i in dictnr:\n if dictnr[i] == 200:\n contains = True\n print(conta...
[ "0.6130827", "0.6060219", "0.5976841", "0.5809231", "0.5789039", "0.5752602", "0.56971747", "0.5649147", "0.56462014", "0.5644481", "0.56440645", "0.5638638", "0.56340736", "0.560666", "0.5580076", "0.55792755", "0.55783355", "0.55679023", "0.55529094", "0.5530956", "0.552016...
0.842359
0
Populate weather data tables.
def populate_weather(connection): metadata = load_metadata('weather') cursor = connection.cursor() water_defs = get_water_definitions() # Check if tables are already populated. cursor.execute('SELECT count(*) FROM weather') weather_count = cursor.fetchone()[0] if weather_count: pri...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_water_tables(connection):\n metadata = load_metadata('water')\n cursor = connection.cursor()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM watercourses')\n watercourse_count = cursor.fetchone()[0]\n cursor.execute('SELECT count(*) FROM aquifers')...
[ "0.6654514", "0.66226166", "0.6348557", "0.632052", "0.62991303", "0.62082505", "0.61876976", "0.6142009", "0.60627335", "0.6002963", "0.5983051", "0.59750384", "0.5964339", "0.5926051", "0.59136045", "0.59097177", "0.58774203", "0.5861508", "0.5857061", "0.5843847", "0.58242...
0.78376013
0
Helper function to construct multidimensional dictionaries e.g myhash = _makehash() myhash[1][2] = 4 myhash[2][5][8] = 17
def _makehash(): return defaultdict(_makehash)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashMap(self,arr):\r\n n = len(arr)\r\n dict1 = {}\r\n i = 1\r\n for i in range(n): \r\n if(i > 0): \r\n key=arr[i]\r\n value=arr[0]\r\n dict1[key] = value\r\n return dict1", "def __init__(self):\n ...
[ "0.67796296", "0.5938041", "0.5885213", "0.5752976", "0.5734703", "0.5709291", "0.5704826", "0.5652041", "0.5644233", "0.56247675", "0.56185186", "0.5611323", "0.56072676", "0.5539448", "0.5488976", "0.5445115", "0.54388916", "0.54211164", "0.54074925", "0.53994673", "0.53773...
0.6773854
1
Convert headers of fetched tickers to same format for convenient data storage in Database. This method assumes that parser's headers are configured properly(headers_dict), if one of the headers is missing in config file exception raised
def convert_headers(self, tickers): result = _makehash() for pair_name, fetched_values_dict in list(tickers.items()): for header, value in list(fetched_values_dict.items()): result[pair_name][self.config['headers'][header]] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_headers(headers):\n\n headers_new = []\n # reformat column headers if needed\n for j, hd in enumerate(headers):\n # rename so always have T1/2 (s)\n if hd == \"T1/2 (num)\" or hd == \"T1/2 (seconds)\":\n hd = \"T1/2 (s)\"\n # for uncertainties, add previous colum...
[ "0.6760923", "0.6707738", "0.66021603", "0.6435935", "0.62480223", "0.62002486", "0.6085462", "0.60607", "0.605833", "0.6038865", "0.5981734", "0.5961042", "0.5954765", "0.59422773", "0.5939061", "0.5936525", "0.5867014", "0.5862857", "0.5862484", "0.586054", "0.5848212", "...
0.7734185
0
Calculate the similarity based on Cosine Similarity between two CTRDMs
def cosinesimilarity_cal(CTRDM1, CTRDM2): # get number of conditions n_cons = np.shape(CTRDM1)[0] # calculate the number of value above the diagonal in RDM n = n_cons * (n_cons - 1) # initialize two vectors to store the values above the diagnal of two RDMs v1 = np.zeros([n], dtype=np.float64)...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n ...
[ "0.7754487", "0.7676584", "0.7633492", "0.76006675", "0.7589376", "0.7576636", "0.75592244", "0.75373095", "0.7522885", "0.746666", "0.74491453", "0.7418762", "0.7398554", "0.73664916", "0.7334395", "0.7312988", "0.7309643", "0.7292841", "0.72754246", "0.7240708", "0.72226435...
0.80601525
0
Adds basic_vector to the basic vectors. If there are at least 3 arrays in _basic_vectors, then add a new array to _featureVector. This added array is composed of the basic vectors and its 2 first central derivatives basic_vector must be the array returned by the mfcc.
def build_feature_vector(self, basic_vector): basic_vector = basic_vector - np.mean(basic_vector) self._basic_vectors.append(basic_vector) if len(self._basic_vectors) > 2: #if there are at least 3 basic vectors we can calculate the central derivative for the vector before this one ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_vector(self, doc_name, add_cnt, new_docvec):\n \n # determine the weight of the merging pieces\n old_weight = float(self.vector_cnt) / (self.vector_cnt + add_cnt)\n new_weight = float(add_cnt) / (self.vector_cnt + add_cnt)\n \n if len(self.name) == 0:\n self.nam...
[ "0.5917165", "0.5735873", "0.57002974", "0.5500693", "0.54043525", "0.5337707", "0.5288758", "0.5286068", "0.5255515", "0.5167239", "0.51532346", "0.5146452", "0.5016166", "0.5007243", "0.49999866", "0.4954787", "0.49387354", "0.49348387", "0.4906069", "0.48973984", "0.489477...
0.8391323
0
If there is at least an feature vector then returns it, else returns None
def get_last_feature_vectors(self): if len(self._feature_vectors): return self._feature_vectors[-1] return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_training(feature_path): \n features = np.loadtxt(feature_path)\n feature_size = features.shape[1] -1 \n features_in = features[:,0:feature_size]\n features_out = features[:,-1]\n #features_out = np.array(map(lambda x: x if x else 0, features_out_unnorm))\n return features_in, features_out...
[ "0.6076066", "0.6039801", "0.60045683", "0.5997051", "0.59677297", "0.5927279", "0.5924938", "0.592038", "0.58969414", "0.58928514", "0.5883369", "0.5868851", "0.5810611", "0.57908976", "0.57573485", "0.5748689", "0.57457215", "0.5690704", "0.5680142", "0.5675147", "0.5661629...
0.6274125
0
function used for marking deducted Late checkin request.
def action_payslip_done(self): for recd in self.late_check_in_ids: recd.state = 'deducted' return super(PayslipLateCheckIn, self).action_payslip_done()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loan(self):", "def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n ...
[ "0.56058526", "0.557811", "0.5529027", "0.5488796", "0.52010155", "0.5192642", "0.516891", "0.51390547", "0.5094012", "0.50648564", "0.5062245", "0.5061174", "0.49786085", "0.49332282", "0.4931753", "0.49173915", "0.4892089", "0.4874537", "0.48676074", "0.4837274", "0.4802804...
0.5966537
0
Decode next layer protocol.
def _decode_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_decode_next_layer'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_next_layer(self, dict_, length=None):\n # make next layer protocol name\n proto = str(self._prot or 'Raw').lower()\n\n # make BytesIO from frame package data\n bytes_ = io.BytesIO(self._file.read(dict_['len']))\n info, protochain = self._import_next_layer(bytes_, leng...
[ "0.75996035", "0.6549479", "0.65207684", "0.6393973", "0.6335503", "0.6324604", "0.6315522", "0.62680393", "0.62302494", "0.6203099", "0.61799216", "0.6160706", "0.6160706", "0.6118852", "0.6044695", "0.6001132", "0.5993506", "0.5988984", "0.59845906", "0.59568155", "0.590623...
0.67941993
1
Import next layer extractor.
def _import_next_layer(self, *args, **kwargs): # pylint: disable=signature-differs raise UnsupportedCall(f"'{self.__class__.__name__}' object has no attribute '_import_next_layer'")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _import_next_layer(self, file_, length):\n if self._prot == 'Ethernet':\n from .link import Ethernet as Protocol\n elif self._prot == 'IPv4':\n from .internet import IPv4 as Protocol\n elif self._prot == 'IPv6':\n from .internet import IPv6 as Protocol\n ...
[ "0.6111195", "0.54837346", "0.54835594", "0.5450349", "0.5450116", "0.54399455", "0.5409996", "0.5303996", "0.52164537", "0.52136886", "0.516096", "0.51069194", "0.5101633", "0.5099417", "0.5078807", "0.5042868", "0.4991108", "0.49878004", "0.4973067", "0.49313796", "0.491468...
0.66877997
0
Build QA data dict from the nights
def build_data(self): from desiutil.io import combine_dicts # Loop on exposures odict = {} for qanight in self.qa_nights: for qaexp in qanight.qa_exps: # Get the exposure dict idict = write_qa_exposure('foo', qaexp, ret_dict=True) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self, inroot=None):\n self.data = {}\n # Load\n for night in self.mexp_dict.keys():\n qaNight = QA_Night(night, specprod_dir=self.specprod_dir, qaprod_dir=self.qaprod_dir)\n qaNight.load_data()\n #\n self.data[night] = qaNight.data[nigh...
[ "0.5826197", "0.55371296", "0.5417445", "0.5337191", "0.52416694", "0.51694846", "0.5151711", "0.5091198", "0.5085503", "0.5074048", "0.5069187", "0.5063415", "0.50576264", "0.5021445", "0.50159806", "0.5006675", "0.4991931", "0.49876162", "0.49839446", "0.49812287", "0.49771...
0.6585092
0
Test case for add_or_update_case
def test_add_or_update_case(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_case(self):\n pass", "def test_update_one(self):\n pass", "def test_update_record(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update_scenario(self):\n pass",...
[ "0.79237324", "0.7314126", "0.7272837", "0.72685695", "0.72685695", "0.72685695", "0.712033", "0.69949913", "0.6982817", "0.69368035", "0.69113", "0.68479264", "0.68319446", "0.6824832", "0.6721132", "0.6690393", "0.6686279", "0.66511667", "0.6622248", "0.6599744", "0.6579878...
0.90939504
0
Test case for delete_case
def test_delete_case(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_run(self):\n pass", "def test_delete(self):\n pass", "def test_delete1(self):\n pass", "def test_delete_record(self):\n pass", "def test_CovidCase_delete(self):\n # setting up by creating and saving the the database\n del_Covid = self.create_CovidCa...
[ "0.85045195", "0.83174175", "0.8187092", "0.81214935", "0.7901589", "0.78933346", "0.7868206", "0.7762365", "0.7726762", "0.77229226", "0.7537628", "0.74579", "0.73829216", "0.735922", "0.7357545", "0.7353939", "0.73434424", "0.7334559", "0.7333317", "0.7319243", "0.72404516"...
0.94501704
0