query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Gets `features` and `labels`.
def features_and_labels(self): if self.is_dataset: if self._iterator is None: raise RuntimeError('Internal error: Must call dataset_initializer_hook ' 'before calling features_and_labels(). Please file ' 'a bug!') return _Inputs._parse_inputs...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_feature_labels(self):\n return self.feature_labels", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_features_and_labels(self, dataframe):\n features = dataframe.drop(columns=self._label, ...
[ "0.79877245", "0.734456", "0.734456", "0.7326892", "0.7230395", "0.71708345", "0.71536416", "0.7029519", "0.70181954", "0.7005192", "0.7003384", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69250166", "0.69...
0.7678803
1
Returns the `Signals` from `_Inputs`.
def signals(self): if self._current_inputs is None: raise RuntimeError( 'Internal Error: The current inputs have not been properly ' 'generated. First call features_and_labels, then call signals.') signals = self._current_inputs['signals'] self._current_inputs = None return sig...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSignals(cls):\n return []", "def get_signals(self):\n return QFDataFrame(data=self._signals, index=self._signals_dates)", "def read_all_signals(self):\n return [pio.sample(signal_idx)\n for signal_idx in self._signals_idx]", "def inputs(self):\n return self._...
[ "0.7630647", "0.674216", "0.66904575", "0.62987953", "0.62987953", "0.62987953", "0.62970096", "0.62616074", "0.62526375", "0.61903083", "0.6189783", "0.6183568", "0.61779135", "0.609748", "0.6075958", "0.60570484", "0.60570484", "0.60570484", "0.60139763", "0.6005559", "0.60...
0.7737252
0
Inserts stopping_signal into dataset via _map_fn. Here we change the data structure in the dataset, such that the return value is a dictionary now and `features`, `labels`, and `signals` are three distinguished keys in that dict. This provides a better structure, which eases the process to decompose the inputs (see `fe...
def insert_stopping_signal(stop, batch_size, add_padding=False): def _map_fn(*args): """The map fn to insert signals.""" if len(args) == 1: # Unpack the single Tensor/dict argument as features. This is required # for the input_fn returns no labels. args = args[0] features,...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _map_fn(*args):\n if len(args) == 1:\n # Unpack the single Tensor/dict argument as features. This is required\n # for the input_fn returns no labels.\n args = args[0]\n features, labels = _Inputs._parse_inputs(args)\n new_input_dict = {}\n\n if add_padding:\n pad...
[ "0.6490509", "0.5323185", "0.50812894", "0.50604886", "0.50385696", "0.49665913", "0.4957273", "0.48859507", "0.48683363", "0.47766396", "0.47728422", "0.4761893", "0.47422934", "0.47389808", "0.47379407", "0.47214484", "0.46899843", "0.46709707", "0.4644301", "0.46401665", "...
0.64638627
1
Pads out the batch dimension of features and labels.
def pad_features_and_labels(features, labels, batch_size): real_batch_size = array_ops.shape( _PaddingSignals._find_any_tensor(features))[0] batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) check_greater = check_ops.assert_greater_equal( batch_size_tensor, real_batch_size...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pad_feature_sequences(sequences, pad=PAD, feature_dims=768):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]], [0, 0]],\n mode='CONSTANT',\n c...
[ "0.65667975", "0.6381753", "0.6247113", "0.62424535", "0.6191029", "0.6129018", "0.5991808", "0.5989984", "0.596495", "0.59362715", "0.59317976", "0.5920891", "0.5863416", "0.5861643", "0.58507115", "0.5837591", "0.58372873", "0.5837181", "0.5827929", "0.5821655", "0.57975", ...
0.7201086
0
Slice the real Tensors according to padding mask in signals.
def slice_tensor_or_dict(tensor_or_dict, signals): padding_mask = signals['padding_mask'] batch_size = array_ops.shape(padding_mask)[0] def verify_batch_size(tensor): check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) with ops.control_dependencies([check_batch_size]): retur...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slice(tensor):\n out = tensor[:, 444:524, :]\n return out", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_...
[ "0.6374091", "0.56844383", "0.55673355", "0.5520634", "0.5495231", "0.538921", "0.53742844", "0.5365133", "0.5315971", "0.52649224", "0.52371657", "0.5231514", "0.52025896", "0.51906943", "0.51897913", "0.5185912", "0.51694137", "0.51603997", "0.5145077", "0.50990677", "0.509...
0.65732765
0
Plots the train accuracy, test accuracy, test precision and test recall, and saves them in the data folder. Also saves the scores in the data folder in 'scores.csv'
def plot_scores(self): results_path = DATA_PATH.joinpath("results") if not results_path.is_dir(): os.mkdir(results_path) scores = DataFrame(columns=["Import", "Months", "Train Accuracy", "Test Accuracy", "Precision", "Recall"]) for import...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n ...
[ "0.7224669", "0.71921223", "0.71734667", "0.71697706", "0.7110309", "0.7072749", "0.70446795", "0.7005062", "0.6988307", "0.6936626", "0.693535", "0.6925998", "0.69201785", "0.6885906", "0.6869337", "0.6860697", "0.6812449", "0.6787487", "0.67683053", "0.6765857", "0.6717797"...
0.7269786
0
Test remove missing feature in base marshmallow Schema Also test opting out by setting a pure marshmallow Schema for base
def test_marshmallow_base_schema_remove_missing(self, base_schema): # Typically, we'll use it in all our schemas, so let's define base # Document and EmbeddedDocument classes using this base schema class @self.instance.register class MyDocument(Document): MA_BASE_SCHEMA_CLS =...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_base_schema_ignores_unknown_fields():\n assert BaseSchema().load({\"unknown\": \"field\"}) == {}", "def test_schema_strict():\n path = os.path.join(extensiondir, 'release-schema.json')\n if os.path.isfile(path):\n with open(path) as f:\n data = json.load(f)\n\n original...
[ "0.6704261", "0.6257809", "0.62339044", "0.6202627", "0.6178689", "0.6136276", "0.61212945", "0.6067049", "0.6038067", "0.6000049", "0.5999241", "0.59988236", "0.59154654", "0.5914625", "0.5912379", "0.5896713", "0.58911383", "0.5890401", "0.58775854", "0.58499223", "0.582961...
0.76797485
0
Creates the initial alignment values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return a tensor of all zeros.
def initial_alignments(self, batch_size, dtype): max_time = self._word_alignments_size * self._alignments_size return _zero_state_tensors(max_time, batch_size, dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_default_paddings(self, inputs: JTensor) -> JTensor:\n in_shape = list(inputs.shape)\n assert len(in_shape) > 1\n in_shape[-1] = 1\n return jnp.zeros(in_shape, dtype=inputs.dtype)", "def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], ...
[ "0.6028425", "0.59444785", "0.59444785", "0.5852683", "0.5841245", "0.5833352", "0.5652988", "0.5560669", "0.55605567", "0.55293924", "0.5496568", "0.5492979", "0.5479976", "0.54668605", "0.5441797", "0.5423778", "0.5401625", "0.5400675", "0.5371976", "0.5336401", "0.53245115...
0.74215364
0
Computes the attention and alignments for a given attention_mechanism.
def _compute_attention(attention_mechanism, batch_size, cell_output, previous_alignments, attention_layer): line_alignments, word_alignments, hier_alignments = attention_mechanism( cell_output, batch_size, previous_alignments=previous_alignments) # Reshape f...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_attention(attention_mechanism, initial_state, previous_alignments,\n attention_layer):\n alignments, final_state = attention_mechanism(\n initial_state, previous_alignments=previous_alignments)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n ...
[ "0.7185696", "0.6218817", "0.6158965", "0.6057099", "0.6030939", "0.60308385", "0.5955116", "0.5597623", "0.55930084", "0.5510469", "0.54661316", "0.5414076", "0.5378174", "0.53511596", "0.53483367", "0.525885", "0.5242506", "0.5172489", "0.5112989", "0.5112013", "0.51053816"...
0.727797
0
Construct the `AttentionWrapper`. NOTE If you are using the `BeamSearchDecoder` with a cell wrapped in
def __init__(self, cell, attention_mechanism, rl=False, attention_layer_size=None, alignment_history=False, cell_input_fn=None, output_attention=True, initial_cell_state=None, name=None...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n cell,\n attention_mechanism,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=...
[ "0.72406137", "0.61116153", "0.6106595", "0.5868271", "0.58216625", "0.574263", "0.5419795", "0.5395449", "0.53780705", "0.5358682", "0.5326316", "0.5276871", "0.52745825", "0.5269008", "0.526074", "0.51900357", "0.5189963", "0.5186625", "0.5131551", "0.5088417", "0.50647956"...
0.7107153
1
Returns `seq` as tuple or the singular element. Which is returned is determined by how the AttentionMechanism(s) were passed to the constructor.
def _item_or_tuple(self, seq): t = tuple(seq) if self._is_multi: return t else: return t[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def to_sequence...
[ "0.6926655", "0.6926655", "0.58118045", "0.57991827", "0.57862526", "0.56921035", "0.5684707", "0.5637408", "0.5624004", "0.5592931", "0.551967", "0.54796165", "0.5422755", "0.53580755", "0.53534436", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53...
0.6938356
0
The `state_size` property of `AttentionWrapper`.
def state_size(self): return AttentionWrapperState( cell_state=self._cell.state_size, time=tensor_shape.TensorShape([]), attention=self._attention_layer_size, alignments=self._item_or_tuple( a.alignments_size for a in self._attention_mechanisms), alignment_history...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_size(self):\n raise NotImplementedError(\"Please implement this method\")", "def state_size(self):\n return AttentionWrapperState(\n cell_state=self._cell.state_size,\n time=tensor_shape.TensorShape([]),\n attention=self._attention_layer_size,\n ...
[ "0.8413178", "0.83095956", "0.76917756", "0.76917756", "0.76524323", "0.74709535", "0.73154217", "0.724257", "0.7135943", "0.70932657", "0.6971657", "0.69473106", "0.6929372", "0.69267815", "0.6837609", "0.67772937", "0.6741656", "0.6741119", "0.6716011", "0.66940117", "0.669...
0.83416754
1
This will cache the contents of a template fragment for a given amount of time, but with the extra bonus of limiting the dogpile/stampeding effect. You can easily replace the default template cache, just change the load statement from ``{% load cache %}`` to ``{% load cors_cache %}``.
def do_cache(parser, token, endparse='endcache', noda=CacheNode): nodelist = parser.parse((endparse,)) parser.delete_first_token() tokens = token.contents.split() if len(tokens) < 3: raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0]) try: expire_time = ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_cached(self, cache_key, render_cls, max_age, cache_time=0, *args, **kwargs):\r\n\r\n # Default the cache to be the same as our max age if not\r\n # supplied.\r\n cache_time = cache_time or max_age\r\n\r\n # Postfix the cache key with the subreddit name\r\n # This scope...
[ "0.59044135", "0.571624", "0.56906414", "0.55681616", "0.5532053", "0.55177927", "0.5490605", "0.54303783", "0.541575", "0.54028773", "0.5383336", "0.5373419", "0.53715813", "0.53494227", "0.53489083", "0.53416514", "0.52813894", "0.5276173", "0.52550995", "0.5205229", "0.520...
0.5767859
1
Outputs a .txt file that contains the names of my songs
def generate_playlist(): with open(r'C:\Users\adria\OneDrive\Desktop\Muzica.txt', 'w+', encoding='utf-8') as playlist: playlist_songs = os.listdir('D:\\Muzica\\') for song in playlist_songs: playlist.write(song + '\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_artist_songs(genius, name, max_songs, out_dir):\n\tartist = genius.search_artist(name, max_songs=max_songs, sort=\"title\")\n\tif artist is not None and artist.songs is not None:\n\t\tsongs = list(filter(lambda x: x is not None, map(lambda x: x.lyrics, artist.songs)))\n\t\twith open(out_dir + name + '.txt...
[ "0.69582075", "0.68970346", "0.678131", "0.6512336", "0.648723", "0.6459645", "0.63525665", "0.6315502", "0.6313916", "0.628889", "0.6231965", "0.6108531", "0.61006314", "0.60837126", "0.6061707", "0.6027118", "0.60271174", "0.60226905", "0.5982154", "0.59551275", "0.5949267"...
0.77204114
0
get processes tables from overcloud node
def get_overcloud_node_processes_table(ssh_client: ssh.SSHClientType): output = sh.execute( "ps -axw -o \"%U\" -o \"DELIM%p\" -o \"DELIM%P\" -o \"DELIM%C\" -o " "\"DELIM%z\" -o \"DELIM%x\" -o \"DELIM%c\" -o \"DELIM%a\" |grep -v " "ps|sed 's/\"/''/g'", ssh_client=ssh_client).stdout ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def get_tables(self):\n logging.debug(f\"\"\"get_tables\"\"\")\n c...
[ "0.5725424", "0.5656669", "0.559338", "0.5493022", "0.5485279", "0.54482675", "0.54397696", "0.54324937", "0.5425778", "0.54190934", "0.54075503", "0.53506416", "0.5315462", "0.5313187", "0.5285384", "0.52728575", "0.5259367", "0.5251954", "0.52515304", "0.52458316", "0.52287...
0.7119928
0
Checks that the oc_procs_df dataframe has OVN processes running on the expected overcloud node or nodes
def ovn_overcloud_processes_validations(self): if not neutron.has_ovn(): LOG.info("Networking OVN not configured") return True for process_dict in self.ovn_processes_to_check_per_node: if not self.oc_procs_df.query('PROCESS=="{}"'.format( process_...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_process_running_on_overcloud(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n if not oc_procs_df.query('PROCESS==\"{}\"'.format(process)).empty:\n return True\n else:\n return Fals...
[ "0.747246", "0.6814092", "0.63278425", "0.6067881", "0.5803084", "0.57029784", "0.56655276", "0.55178803", "0.55107725", "0.54893816", "0.54448056", "0.5434573", "0.5392496", "0.5376301", "0.5372792", "0.5320815", "0.53180724", "0.53141224", "0.52915174", "0.5282118", "0.5265...
0.82281446
0
Arguments and in_shapes are pytrees.
def test_pytree(self): # Arguments are of the form [([x00, x01], [x10]), dict(a=ya, b=yb)] def add_all_jax(x_pair_of_list, y_dict): x_list_0, x_list_1 = x_pair_of_list return functools.reduce(operator.add, x_list_0 + x_list_1 + [y_dict["a"], y_dict["b"]]) self.Che...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n ...", "def data_shapes(self):", "def __call__(self, shape):\n raise NotImplementedError()", "def shape(self):", "def shape(self):", "def graph_implementation(arg_objs, shape, data=Non...
[ "0.6255682", "0.57242876", "0.53818107", "0.5377391", "0.5377391", "0.53602016", "0.5299667", "0.5298952", "0.5294987", "0.5235773", "0.5211122", "0.52006346", "0.5196273", "0.51867396", "0.5165427", "0.5116953", "0.510076", "0.5098263", "0.5088183", "0.5087767", "0.5056091",...
0.68708235
0
create a __new__ function that has cls as first arguemnt and fields as requiredkeyword arguments return a Record instance from asyncpg module
def _new_fn(cls_name, fields, *, globals=None): body_lines = [] body_lines.append("mapping = collections.OrderedDict({" + ", ".join(f"'{f.name}': {i}" for i, f in enumerate(fields)) + "})") body_lines.append('elems = (' + ", ".join(f.name for f in fields) + ')') body_lines.append("return Record(mappin...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self,\n record_class: Optional[Type[_Record]] = None) -> None:\n self.fields: List[tsdb.Field] = []\n self._field_index: tsdb.FieldIndex = {}\n self.data: tsdb.Records = []\n self.projection = None\n...
[ "0.6626878", "0.6611793", "0.65634555", "0.64572716", "0.6446112", "0.63184273", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", ...
0.7193039
0
Compress using ZLIB algorithm and encode the given value in base64.
def compress_encode(value): return base64.b64encode(zlib.compress(value.encode("ascii"))).decode("ascii")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(Value):\n return base64.b64encode(zlib.compress(pickle.dumps(Value),9))", "def gzdeflate():\n return zlib.compress(val)", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def compress_zlib(self, string):\n #encode the input sting...
[ "0.7250324", "0.6997009", "0.69732076", "0.68763185", "0.6822638", "0.6522047", "0.6522047", "0.651008", "0.6489224", "0.6446565", "0.6415703", "0.6200399", "0.6144001", "0.5902229", "0.58805466", "0.5842919", "0.5842919", "0.58235097", "0.58078873", "0.57558864", "0.57503444...
0.8424179
0
Assign scores to trajectories
def assign_score(self, trajs): traj_scores = [] for traj in trajs: obs = torch.stack(traj['states']).squeeze(dim=1) if isinstance(traj['states'], list) else traj['states'] rewards = self._single_traj_score(obs) traj_scores.append(rewards.sum().item()) return ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_scores(self, scores):\n self.score = {k: v for k, v in scores.items()}", "def scores(self, value):\n self._scores = value", "def setTreasureScore(self, scores):\n if not self.hasLocalToon: return\n self.notify.debug(\"setTreasureScore: %s\" % scores)\n\n for i in rang...
[ "0.6659069", "0.6490567", "0.64631295", "0.6377789", "0.6331852", "0.6191901", "0.61868435", "0.61754644", "0.6111496", "0.6064884", "0.6040767", "0.60371596", "0.6019798", "0.60130745", "0.59846324", "0.5944102", "0.5935818", "0.59234035", "0.59189606", "0.5904517", "0.59045...
0.73590773
0
Stores a file into the database object file = The file that should be stored, should be a python filelike object encrypt = Set to true if the file should be encrypted, requires Crypto.Cipher to be installed, otherwise it does nothing compress = compress the file using zlib compression
def store(self, file, encrypt=False, compress=False): estring = file.read() self.size = file.size self.nicename = file.name if DBF_SETTINGS["DATABASE_FILES_CACHE"] and DBF_SETTINGS["DATABASE_FILES_CACHE_UNENCRYPTED"]: # Pre-fill the cache, the reasoning being that the file will probably be needed # immed...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store(self, filename):", "def crypt_file(self, file_path, encrypted=False):\n\n with open(file_path, 'rb+') as f:\n _data = f.read()\n\n if not encrypted:\n## print(f'File contents pre encryption: {_data}')\n data = self.cryptor.encrypt(_data)\n## ...
[ "0.6278036", "0.61073726", "0.588124", "0.57019895", "0.56977314", "0.56578296", "0.5633474", "0.5595208", "0.5583352", "0.55689156", "0.5562824", "0.546508", "0.543627", "0.54322916", "0.5420462", "0.5410077", "0.54031205", "0.5371673", "0.53660554", "0.5350774", "0.53082126...
0.74104655
0
Create a Numpy array with size equal to the sum of the sizes of all the NDArrays in the list of NDArrays l.
def _zeros_like_nd_list(l, dtype): total_size = np.sum([x.size for x in l]) return np.zeros(total_size, dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)", "def n(l):\n return np.array(l,dtype=object)", "def _copy_to_numpy_array(l, a):\n total_size = np.sum([x.size for x...
[ "0.6617197", "0.6432536", "0.62234515", "0.5939849", "0.5882715", "0.5754678", "0.5660762", "0.5622342", "0.5540883", "0.5535033", "0.5450192", "0.54291904", "0.5417099", "0.5367531", "0.5318646", "0.52935463", "0.5293152", "0.52884835", "0.5279245", "0.5263805", "0.5259671",...
0.6924306
0
Copy the values from the given ndarray into the given (preallocated) numpy array. This can be used to avoid extra memory allocation that ndarray.asnumpy() performs.
def _copy_into(ndarray, nparray): assert nparray.size == ndarray.size assert nparray.flags.f_contiguous and nparray.flags.behaved # NOTE: The copy=False variant of NDArray.astype does not seem to work if ndarray.dtype != nparray.dtype: ndarray = ndarray.astype(nparray.dtype) mx.base.check_ca...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyto(self, nparray):\n np.copyto(self.__np_array, nparray)", "def copy(a):\n return array(a, copy=True)", "def copyto(self, nparray):\n if self.__parity[0]==0:\n np.copyto(self.__np_array1, nparray)\n else:\n np.copyto(self.__np_array2, nparray)", "def _cop...
[ "0.69130385", "0.63658416", "0.63439447", "0.6323527", "0.6044379", "0.6023708", "0.60227984", "0.601236", "0.5889105", "0.5846941", "0.5768551", "0.54975843", "0.5460591", "0.543775", "0.54138625", "0.5381993", "0.5331947", "0.5294816", "0.5264773", "0.5249572", "0.52417374"...
0.7299799
0
Copy values from each NDArray in the list l to the numpy array a (in order).
def _copy_to_numpy_array(l, a): total_size = np.sum([x.size for x in l]) assert total_size == a.size j = 0 for x in l: # a[j:j+x.size] = x.asnumpy().reshape((x.size,)) _copy_into(x, a[j:j + x.size]) j += x.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_from_numpy_array(a, l):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n x[:] = a[j:j + x.size].reshape(x.shape)\n j += x.size", "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim...
[ "0.8242542", "0.63269496", "0.6141394", "0.5692313", "0.55791426", "0.55791426", "0.55682117", "0.54690933", "0.5439151", "0.5422417", "0.5382426", "0.5348696", "0.5288169", "0.5280279", "0.525362", "0.5152643", "0.514693", "0.5124691", "0.51072955", "0.5106846", "0.509638", ...
0.8206372
1
Copy values from subarrays of the numpy array a to the NDArrays in the list l. The sizes of the sub arrays correspond to the sizes of the NDArrays, so that this performs a copy in the reverse direction of copy_to_numpy_array(). Entries of l can have different dtype than a.
def _copy_from_numpy_array(a, l): total_size = np.sum([x.size for x in l]) assert total_size == a.size j = 0 for x in l: x[:] = a[j:j + x.size].reshape(x.shape) j += x.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_to_numpy_array(l, a):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n # a[j:j+x.size] = x.asnumpy().reshape((x.size,))\n _copy_into(x, a[j:j + x.size])\n j += x.size", "def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.nda...
[ "0.824406", "0.5672978", "0.56350243", "0.5510067", "0.5456553", "0.52112097", "0.51868254", "0.5168819", "0.51417094", "0.5063647", "0.5061758", "0.49632236", "0.49518946", "0.494873", "0.4942277", "0.49309397", "0.49309397", "0.49226636", "0.48952842", "0.4880583", "0.48740...
0.8338329
0
Make a deep copy of the input arg_dict (dict param_name to mx.nd)
def _deep_copy_arg_dict(input_arg_dict): output_arg_dict = {} for name, param in input_arg_dict.items(): output_arg_dict[name] = param.copy() return output_arg_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ndarray_copy(func):\n\n def wrapper(*args, **kw):\n args = list(args)\n\n # copy args\n for idx, arg in enumerate(args):\n if type(arg) == np.ndarray:\n args[idx] = arg.copy()\n\n # Copy key args\n for key, value in kw.items():\n if typ...
[ "0.6190199", "0.5896156", "0.5724149", "0.56482667", "0.56246066", "0.56068045", "0.55655664", "0.5564277", "0.5551959", "0.55463356", "0.55164564", "0.54720664", "0.5424602", "0.5411418", "0.5409501", "0.54047245", "0.537435", "0.5354888", "0.5348229", "0.5329177", "0.529733...
0.76881003
0
In order to initialize LBFGS from multiple starting points, this function makes it possible to randomize, inplace, an arg_dict (as used by executors to communicate parameters to LBFGS). The randomization is centered around mean_arg_dict, with standard deviation std.
def _inplace_arg_dict_randomization(arg_dict, mean_arg_dict, bounds, std=STARTING_POINT_RANDOMIZATION_STD): # We check that arg_dict and mean_arg_dict are compatible assert arg_dict.keys() == mean_arg_dict.keys() for name, param in arg_dict.items(): assert param.shape == mean_arg_dict[name].shape ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_random_params(scale, layer_sizes, seed=0):\n rs = npr.RandomState(seed)\n return [(rs.randn(insize, outsize) * scale, # weight matrix\n rs.randn(outsize) * scale) # bias vector\n for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]", "def apply_lbfgs_wi...
[ "0.61080253", "0.60588044", "0.60554767", "0.60480046", "0.59878653", "0.5974961", "0.5911697", "0.5907807", "0.5863012", "0.58475083", "0.58475083", "0.58475083", "0.58475083", "0.58475083", "0.58393437", "0.58366144", "0.5830874", "0.58203185", "0.5815766", "0.5807405", "0....
0.70326656
0
When dealing with nonconvex problems (e.g., optimization the marginal likelihood), we typically need to start from various starting points. This function applies this logic around apply_lbfgs, randomizing the starting points around the initial values provided in arg_dict (see below "copy_of_initial_arg_dict"). The firs...
def apply_lbfgs_with_multiple_starts( exec_func, arg_dict, grad_dict, bounds, n_starts=N_STARTS, **kwargs): assert n_starts >= 1 copy_of_initial_arg_dict = _deep_copy_arg_dict(arg_dict) best_objective_over_restarts = None best_arg_dict_over_restarts = copy_of_initial_arg_dict # Loop over ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_valid_initial_params(rng, model, *model_args, init_strategy=init_to_uniform,\n param_as_improper=False, prototype_params=None, **model_kwargs):\n init_strategy = jax.partial(init_strategy, skip_param=not param_as_improper)\n\n def cond_fn(state):\n i, _, _, is_val...
[ "0.57707477", "0.56678426", "0.5657085", "0.5636293", "0.5565462", "0.5560217", "0.5544692", "0.5538522", "0.5508883", "0.54963434", "0.5412181", "0.5406637", "0.537584", "0.5374977", "0.5325472", "0.53251934", "0.53189695", "0.53159976", "0.530835", "0.5276131", "0.5253891",...
0.70813787
0
Initialize the table, create default roles, set profile image
def __init__(self, **kwargs): super(User, self).__init__(**kwargs) Role.insert_roles() if self.role is None: if self.email == current_app.config["FLASKY_ADMIN"]: self.role = Role.query.filter_by(permissions=0xff).first() if self.role is None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_role_table():\n roles = [\n {\n \"name\": \"user\",\n \"description\": \"registered user permission\",\n \"raw_permissions\": Role.Permissions.REGISTERED.value\n },\n {\n \"name\": \"editor\",\n ...
[ "0.7216759", "0.6458455", "0.6416841", "0.6405134", "0.6322869", "0.6314916", "0.61332387", "0.61158174", "0.60757625", "0.604426", "0.6002373", "0.5999153", "0.59768623", "0.5972818", "0.5942051", "0.5905494", "0.5901315", "0.5885008", "0.58837014", "0.58837014", "0.5876468"...
0.6746913
1
Returns the temp_files class where all decompressed data is stored. If you want a file, request it from this class and it will look for it, decompress it if it exists, and return it. It will also deal with cleanup when nearing the memory cap.
def temp_files(self) -> misc_.TempFilesContainer: return self._temp_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_temproot(cls):\n import tempfile\n\n return local(tempfile.gettempdir())", "def _tempfile(self):\n fd, path = tempfile.mkstemp(dir = os.path.join(self.root, \"temporary\"))\n try:\n return os.fdopen(fd, \"wb\"), path\n except:\n os.unlink(path)\n ...
[ "0.59913236", "0.58054686", "0.5762157", "0.5731633", "0.5715007", "0.57014346", "0.5626013", "0.561726", "0.5587675", "0.5576853", "0.5558325", "0.5551208", "0.5544451", "0.550435", "0.547272", "0.5469176", "0.5460168", "0.5456278", "0.54530966", "0.54210013", "0.54140574", ...
0.68818074
0
Returns the game identifier for the game currently loaded. Returns None if load_game has not been called.
def game_identifier(self) -> Union[str, None]: if self.game_functions is not None: if hasattr(self.game_functions, 'game_identifier'): return self.game_functions.game_identifier else: raise AttributeError else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_game_id(self) -> str:\n return self.game_name_entry.get()", "def get_last_game_id():\n\t\ttry:\n\t\t\tf = open(game_id_file, 'r')\n\t\t\tid = int(f.read())\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\tprint('IOError raised, returning zero (0)')\n\t\t\treturn 0\n\t\treturn id", "def get_id():\n ...
[ "0.7608188", "0.68220633", "0.66524506", "0.6388743", "0.62113184", "0.6166356", "0.61501443", "0.6094548", "0.60707545", "0.5975456", "0.58981115", "0.57935846", "0.5776967", "0.57671946", "0.5766691", "0.5746554", "0.56954575", "0.56759334", "0.5648986", "0.5631534", "0.560...
0.77910143
0
Returns the dictionary mapping file name string to Forge class.
def forge_files(self) -> Dict[str, BaseForge]: return self._forge_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filenames(self) -> dict[str, str]:\r\n ...", "def get_cls_dict(config_path):\n return {i: n for i, n in enumerate(get_names(config_path))}", "def pre_lookup(self, file):\n return {}", "def file_parser(file):\n\n # Copy of the file instance to save it\n new_file = file\n dict_fil...
[ "0.56962377", "0.5674024", "0.5554048", "0.54324174", "0.53717715", "0.53676784", "0.53674424", "0.53652143", "0.5341359", "0.5310244", "0.5296541", "0.5288931", "0.5247922", "0.5225876", "0.52206045", "0.5177535", "0.51539", "0.5116209", "0.51110125", "0.5104048", "0.5101229...
0.6105684
0
NewsList a model defined in Swagger
def __init__(self, news: List[News]=None): self.swagger_types = { 'news': List[News] } self.attribute_map = { 'news': 'news' } self._news = news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}", "def news(self) -> List[News]:\n return self._news", "def newsList(request):\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.obj...
[ "0.67651105", "0.6221242", "0.6082616", "0.60183597", "0.60091746", "0.59793127", "0.59738564", "0.5931639", "0.58304185", "0.5824736", "0.58020616", "0.57277834", "0.5725878", "0.5699052", "0.5653614", "0.5636298", "0.55957717", "0.5594664", "0.55653083", "0.55634767", "0.55...
0.79788524
0
Gets the news of this NewsList. List of all sites.
def news(self) -> List[News]: return self._news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return GlobalNews.retrieve()", "def get_local_news_items(self):\n catalog = api.portal.get_tool(name='portal_catalog')\n default_lang = api.portal.get_tool(\n \"portal_languages\").getDefaultLanguage()\n results = catalog.searchResults(\n por...
[ "0.707582", "0.64522296", "0.6345483", "0.6317189", "0.6273085", "0.62567925", "0.621038", "0.61679095", "0.61474234", "0.6131414", "0.59921557", "0.59887254", "0.5983498", "0.59675586", "0.59466046", "0.5912889", "0.5902706", "0.5902237", "0.5902237", "0.58978075", "0.586384...
0.7832009
0
Sets the news of this NewsList. List of all sites.
def news(self, news: List[News]): self._news = news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def news(self) -> List[News]:\n return self._news", "def insert_into_news_pool(self, news):\n self.news_pool.append(news)", "def __init__(self, news: List[News]=None):\n self.swagger_types = {\n 'news': List[News]\n }\n\n self.attribute_map = {\n 'news':...
[ "0.655942", "0.5809342", "0.57910275", "0.5721592", "0.5674233", "0.5635913", "0.56231123", "0.55927944", "0.54833764", "0.5420062", "0.541851", "0.5358861", "0.52144605", "0.5196891", "0.5148625", "0.5148489", "0.51216763", "0.5053114", "0.50516135", "0.50410926", "0.5041092...
0.8127856
0
You can set attributes for the station by providing a dictionary Parameters. project & uri must be a list lat, lon, eas, must be convertible to float everything else is a string.
def setStation(self, attrib=None): if not isinstance(attrib, dict): return # minimal sanity check checkFloat = ['lat', 'lon', 'eas'] checkList = ['project', 'uri'] # create 'keys' without the underscore keys = [k.strip('_') for k in list(self.__dict__)] ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_data(self, record):\n state = {}\n if 'lon' in record:\n state[ATTR_LONGITUDE] = record['lon']\n if 'lat' in record:\n state[ATTR_LATITUDE] = record['lat']\n if 'alt' in record:\n state[ATTR_ELEVATION] = record['alt']\n if 'ubi' in record:...
[ "0.571317", "0.55146784", "0.5513319", "0.5414872", "0.53760904", "0.53628534", "0.5342563", "0.5324857", "0.52393043", "0.518832", "0.5161087", "0.51601505", "0.5153292", "0.5135026", "0.51113105", "0.5096146", "0.5095356", "0.50654036", "0.5047394", "0.5043429", "0.50406104...
0.7062441
0
Query the sparql endpoint for data products submitted by this station adjust latitude and longitude and store a list of data specifications and data objects (PID's)
def _setData(self): if not self.stationId: return """ # get the ressource url and adjust lat and lon from data portal query = sparqls.stationResource(self.stationId) key, val = RunSparql(query, 'array').run() if val: self.url = v...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_datacube(product,latitude,longitude,time,measurements):\r\n\r\n dc = datacube.Datacube(app=\"Query\")\r\n\r\n xarr = dc.load(\r\n product=product, \r\n longitude=longitude, \r\n latitude=latitude,\r\n # Time format YYYY-MM-DD\r\n time=time, \r\n measurement...
[ "0.5754376", "0.5710004", "0.55562365", "0.55338526", "0.5481048", "0.54356885", "0.53965205", "0.5357878", "0.53457266", "0.53261817", "0.53222287", "0.53217673", "0.52814996", "0.52286285", "0.5211386", "0.5211245", "0.52014244", "0.5185626", "0.51776993", "0.51525617", "0....
0.6863099
0
This function is a short cut to return the getSamplingHeight() for documentation, please refer to .getSamplingHeight()
def sh(self, product=None): return self.getSamplingHeight(product)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dimension_height(self):\n pass", "def stride_height(self):\n\t\treturn self.strides_shape_param('H')", "def get_height(self) -> int:\n return self.rsimulator.get_frame_height()", "def getHeight(self):\n return self.height", "def getHeight(self):\n return self.height", ...
[ "0.7334868", "0.7218686", "0.7208123", "0.7113875", "0.7113875", "0.70994365", "0.7040811", "0.7000414", "0.69715154", "0.69591147", "0.6952543", "0.6943306", "0.6932998", "0.6928708", "0.69241124", "0.69241124", "0.69241124", "0.6922861", "0.6917013", "0.69131047", "0.690251...
0.74387443
0
a list of unique values for sampling heights for the specified data product in case of no sampling hights or the product is not found for this station, an empty list is returned. A shortcut function is defined .sh() which calls this function.
def getSamplingHeight(self, product=None): # default return empty list sh = [''] # check if product is availabe for station if not product in self._data.values: return sh # if product is available but no sampling height is defined, return # count returns zer...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sh(self, product=None):\n return self.getSamplingHeight(product)", "def get_sealed_products_data(\n self, set_code: str\n ) -> List[MtgjsonSealedProductObject]:\n LOGGER.info(f\"Getting booster data for {set_code}\")\n products_list = []\n for sealed_product_name, sealed...
[ "0.6176316", "0.54340583", "0.51967186", "0.5194723", "0.51105845", "0.5095346", "0.50621283", "0.49846748", "0.49410042", "0.49322653", "0.48972636", "0.4896876", "0.48816198", "0.48781964", "0.48656213", "0.48565215", "0.4841422", "0.4831362", "0.4813755", "0.48079696", "0....
0.7926699
0
Function to match attributes from converter to instruments attributes
def _match_type_object_attributes_to_instrument_attributes(self, converted_data): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])", "def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n s...
[ "0.6252583", "0.6062729", "0.56403756", "0.5609397", "0.5576679", "0.55505896", "0.547663", "0.547364", "0.5460027", "0.54585034", "0.54178935", "0.5402644", "0.53940463", "0.5393521", "0.5341078", "0.5337362", "0.5318856", "0.52873087", "0.5284203", "0.5272638", "0.5266676",...
0.67192864
0
Get meta data from single cdf file So far only manually for 'SOHO_ERNEHED_L21MIN' and 'SOHO_ERNELED_L21MIN'
def _get_metadata(dataset, path_to_cdf): metadata = [] cdf = cdflib.CDF(path_to_cdf) if dataset=='SOHO_ERNE-HED_L2-1MIN' or dataset=='SOHO_ERNE-LED_L2-1MIN': if dataset=='SOHO_ERNE-HED_L2-1MIN': m = 'H' if dataset=='SOHO_ERNE-LED_L2-1MIN': m = 'L' metadata = {...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 R...
[ "0.6787354", "0.66897845", "0.6515701", "0.6452773", "0.6449452", "0.63816357", "0.6350765", "0.62949073", "0.62506527", "0.6211634", "0.6130489", "0.60176396", "0.6001111", "0.59593594", "0.5942799", "0.59364325", "0.59356487", "0.59337986", "0.59134674", "0.5909135", "0.590...
0.7197249
0
Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization. This function can be used to calculate the analytical solution to empirical VBMF.
def EVBMF(Y, sigma2=None, H=None): L,M = Y.shape #has to be L<=M if H is None: H = L alpha = L/M tauubar = 2.5129*np.sqrt(alpha) #SVD of the input matrix, max rank of H _,s,_ = torch.svd(Y) s = s[:H] #Calculate residual residual = 0. if H<L: # residual ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vbmstep(self):\n for k in range(self.k):\n self.beta_k[k] = self.beta_0 + self.counts[k]\n self.m_k[k] = (1 / self.beta_k[k]) * (self.beta_0 * self.m_0 +\n self.counts[k] * self.means[k])\n\n tmp = (self.beta_0 * self.coun...
[ "0.6568823", "0.6196563", "0.6113917", "0.6092184", "0.6078253", "0.6074075", "0.6015432", "0.59387404", "0.59387404", "0.592884", "0.58894825", "0.5857658", "0.5835821", "0.57507086", "0.5727656", "0.5717088", "0.57143795", "0.5711402", "0.5685124", "0.5682099", "0.5679074",...
0.63317454
1
Update the entity list to make things quicker reuse entity ctrls if possible. First, if there are more controls currently in the list than are needed, remove those that are redundant. Then work throught the list of entities, grabbing an existing control and updating it if possible. Create new controls only as necessary...
def update_entity_list(self): list_sizer = self.scroll_entity_list.GetSizer() number_of_ctrls = self.get_number_of_entity_ctrls() number_of_entities = self.get_number_of_entities() difference = number_of_ctrls - number_of_entities if difference > 0: for unused...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateAllEntities():\n entityKeys=list(registeredEntities.keys())\n for currKey in entityKeys:\n try:\n currEntity=registeredEntities[currKey]\n currEntity.update()\n except KeyError:\n #this should only be called if an entity is deleted (like if a rock got ...
[ "0.57126147", "0.5587496", "0.5402336", "0.5387726", "0.5372032", "0.53281385", "0.52710485", "0.5269086", "0.52376354", "0.5144977", "0.5120092", "0.5041901", "0.5036595", "0.5007323", "0.49805024", "0.4979292", "0.49675938", "0.4917916", "0.49164057", "0.49124953", "0.49018...
0.85483783
0
r"""Collapses cube coordinates and calculate percentiled data. Calculate percentiled data over a given coordinate by collapsing that coordinate. Typically used to convert realization data into percentiled data, but may calculate over any dimension coordinate. Alternatively calling this with a dataset containing probabi...
def process(cube, coordinates=None, ecc_bounds_warning=False, percentiles=None, no_of_percentiles=None): if no_of_percentiles is not None: percentiles = choose_set_of_percentiles(no_of_percentiles, sampling="quantile") # TODO: Correct when form...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standa...
[ "0.6452051", "0.60577875", "0.59541893", "0.589441", "0.5876835", "0.57781804", "0.57659763", "0.5726601", "0.5721728", "0.5720009", "0.5646027", "0.5432934", "0.5351184", "0.53393996", "0.5324208", "0.5303023", "0.52893645", "0.5286576", "0.526351", "0.5245997", "0.51418597"...
0.6905458
0
Check we receive a warning about weldx_widgets not being available.
def test_redirection_weldx_widgets_not_found(): orig_import = __import__ # Store original __import__ def import_mock(name, *args, **kwargs): if "weldx_widgets" in name: raise ModuleNotFoundError("weldx_widgets not found") if "matplotlib" in name: raise ModuleNotFoundErr...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_warnings_active(self) -> bool:", "def has_warnings(self) -> bool:", "def check_alive(cw: CustomWidget) -> NoReturn:\r\n ...", "def check_warnings():\n user_warned = False\n # Warn the user about problematic key bindings that may conflict with\n # vimode.\n # The solution is to remove t...
[ "0.65154344", "0.6439584", "0.6356937", "0.6303061", "0.61383975", "0.6066735", "0.6012545", "0.5965545", "0.5897515", "0.5897374", "0.5860277", "0.5803844", "0.57967067", "0.5743022", "0.5737353", "0.5673264", "0.563405", "0.563405", "0.563183", "0.56248", "0.55991155", "0...
0.6596005
0
Tiles x on dimension dim count times.
def tile(x, count, dim=0): perm = list(range(len(x.size()))) if dim != 0: perm[0], perm[dim] = perm[dim], perm[0] x = x.permute(perm).contiguous() out_size = list(x.size()) out_size[0] *= count x = x.repeat(count, *(1,) * x.dim()).transpose(0, 1).contiguous().view(*out_size) if d...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile(x, count, dim=0):\n perm = list(range(len(x.shape)))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.shape)\n out_size[0] *= count\n batch = x.shape[0]\n x = x.view(batch, -1) \\\n .transpose(0, 1) \\\n...
[ "0.7995799", "0.7743472", "0.72251785", "0.72251785", "0.6892366", "0.68706846", "0.6489282", "0.63585883", "0.6300268", "0.6292242", "0.6080058", "0.606337", "0.60176843", "0.59804887", "0.5967274", "0.5953083", "0.5943857", "0.5932811", "0.59011155", "0.58835983", "0.588350...
0.8115603
0
r""" Generate a square mask for the sequence. The masked positions are filled with float('inf'). Unmasked positions are filled with float(0.0).
def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor: mask = (torch.triu(torch.ones(sz, sz, device=device)) == 1).transpose(0, 1) mask = ( mask.float() .masked_fill(mask == 0, float("-inf")) .masked_fill(mask == 1, float(0.0)) ) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_square_subsequent_mask(self, sz: int):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\n '-inf')).masked_fill(mask == 1, float(0.0))\n return mask", "def generate_square_subsequent_mask(self, sz: int)...
[ "0.7153673", "0.68876415", "0.6759619", "0.6754629", "0.6736664", "0.6734393", "0.6703915", "0.653634", "0.64891696", "0.6488388", "0.63707256", "0.62912", "0.60452783", "0.6013539", "0.6002349", "0.5912298", "0.59067696", "0.59046054", "0.59028065", "0.58848965", "0.5863874"...
0.7210654
0
Constructs an instance of ResourceSpec from yaml data.
def FromYaml(cls, yaml_data, api_version=None): if not yaml_data: return None collection = registry.GetAPICollection( yaml_data['collection'], api_version=api_version) attributes = ParseAttributesFromData( yaml_data.get('attributes'), collection.detailed_params) return cls( ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromData(cls, data):\n if not data:\n return None\n\n attribute_name = data['attribute_name']\n parameter_name = data['parameter_name']\n help_text = data['help']\n completion_id_field = data.get('completion_id_field', None)\n completion_request_params_list = data.get('completion_request...
[ "0.6480427", "0.63688", "0.6167031", "0.61638653", "0.6083558", "0.6032994", "0.5951733", "0.59479475", "0.5927682", "0.5812073", "0.5791038", "0.5777207", "0.5646751", "0.5624482", "0.56070876", "0.5604127", "0.55825603", "0.5564748", "0.5553098", "0.55462116", "0.55418426",...
0.64526975
1
Initializes a ResourceSpec. To use a ResourceSpec, give a collection path such as 'cloudiot.projects.locations.registries', and optionally an API version. For each parameter in the collection path, an attribute is added to the resource spec. Names can be created by default or overridden in the attribute_configs dict, w...
def __init__(self, resource_collection, resource_name='resource', api_version=None, disable_auto_completers=True, plural_name=None, **kwargs): self._name = resource_name self.plural_name = plural_name self.collection = resource_collection self._resources = resources.REGISTR...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def __init__(__self__,\n ...
[ "0.6471468", "0.5993948", "0.5986623", "0.5976555", "0.5975396", "0.5903518", "0.5798204", "0.5712617", "0.56915796", "0.5652143", "0.56374675", "0.56139106", "0.56022555", "0.5597308", "0.5595672", "0.5583856", "0.55816966", "0.5577824", "0.5571314", "0.5570658", "0.5567015"...
0.7300264
0
A map from all attribute names to param names.
def attribute_to_params_map(self): return self._param_names_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def param_name_...
[ "0.71225196", "0.70732814", "0.6803996", "0.6746649", "0.6736647", "0.6706346", "0.6622288", "0.65926117", "0.65598536", "0.64562553", "0.6440021", "0.6430904", "0.64086884", "0.6398749", "0.6383543", "0.63519055", "0.6318669", "0.6314571", "0.63041973", "0.63021564", "0.6300...
0.8711842
0
Chooses attribute name for a param name. If attribute_config gives an attribute name, that is used. Otherwise, if the param is an anchor attribute, 'name' is used, or if not, param_name is used.
def _AttributeName(self, param_name, attribute_config, anchor=False): if attribute_config.attribute_name: return attribute_config.attribute_name if anchor: return 'name' return param_name.replace('Id', '_id').lower()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AttributeName(self, param_name):\n for attribute_name, p in six.iteritems(self.attribute_to_params_map):\n if p == param_name:\n return attribute_name", "def ParamName(self, attribute_name):\n if attribute_name not in self.attribute_to_params_map:\n raise ValueError(\n 'No par...
[ "0.7922777", "0.7051831", "0.6452999", "0.6426694", "0.61500454", "0.60328346", "0.601027", "0.5978671", "0.585918", "0.58558106", "0.57965505", "0.5747554", "0.57251555", "0.57141495", "0.5692946", "0.56223017", "0.5602715", "0.5579822", "0.55760527", "0.5572492", "0.556937"...
0.8635494
0
Given an attribute name, gets the param name for resource parsing.
def ParamName(self, attribute_name): if attribute_name not in self.attribute_to_params_map: raise ValueError( 'No param name found for attribute [{}]. Existing attributes are ' '[{}]'.format(attribute_name, ', '.join(sorted(self.attribute_to_params_map.keys())))) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AttributeName(self, param_name):\n for attribute_name, p in six.iteritems(self.attribute_to_params_map):\n if p == param_name:\n return attribute_name", "def _AttributeName(self, param_name, attribute_config, anchor=False):\n if attribute_config.attribute_name:\n return attribute_confi...
[ "0.7986248", "0.75536495", "0.6849631", "0.66188323", "0.6480736", "0.639842", "0.6344476", "0.6323198", "0.6190031", "0.61724794", "0.61705315", "0.61705315", "0.61705315", "0.61705315", "0.61705315", "0.6147806", "0.6147806", "0.61333215", "0.6112564", "0.6066664", "0.60465...
0.7948914
1
Given a param name, gets the attribute name.
def AttributeName(self, param_name): for attribute_name, p in six.iteritems(self.attribute_to_params_map): if p == param_name: return attribute_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AttributeName(self, param_name, attribute_config, anchor=False):\n if attribute_config.attribute_name:\n return attribute_config.attribute_name\n if anchor:\n return 'name'\n return param_name.replace('Id', '_id').lower()", "def ParamName(self, attribute_name):\n if attribute_name not ...
[ "0.8341303", "0.7594844", "0.7139493", "0.7003457", "0.6891393", "0.6869919", "0.68379277", "0.6723565", "0.67190063", "0.6652686", "0.6646669", "0.64897734", "0.64897734", "0.64897734", "0.64897734", "0.64897734", "0.648274", "0.6474743", "0.646516", "0.64248013", "0.6418702...
0.89631
0
Initializes a resource given its fallthroughs. If the attributes have a property or arg fallthrough but the full resource name is provided to the anchor attribute flag, the information from the resource name is used over the properties and args. This preserves typical resource parsing behavior in existing surfaces.
def Initialize(self, fallthroughs_map, parsed_args=None): params = {} # Returns a function that can be used to parse each attribute, which will be # used only if the resource parser does not receive a fully qualified # resource name. def LazyGet(name): f = lambda: deps_lib.Get(name, fallthrou...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__,\n resource_name: str,\n args: AclBindingRuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n ...
[ "0.66332495", "0.6537106", "0.65020514", "0.6459604", "0.62304676", "0.6204393", "0.6201695", "0.618227", "0.61458963", "0.6075082", "0.60665816", "0.6006086", "0.5999377", "0.5999337", "0.5999146", "0.59941", "0.59921694", "0.5991608", "0.5982053", "0.59734714", "0.5961451",...
0.69918567
0
Helper for parsing a list of results from a plural fallthrough.
def _ParseFromPluralValue(self, attribute_to_args_map, base_fallthroughs_map, plural_attribute, parsed_args): attribute_name = plural_attribute.name fallthroughs_map = self.BuildFullFallthroughsMap( attribute_to_args_map, base_fallthroughs_map, plural=True, with_ancho...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plural(record, sequence, zero=None, one=None, two=None, more=''):\n\n l = len (sequence(record))\n \n if l == 0 and zero is not None:\n return zero(record)\n elif l == 1 and one is not None:\n return one(record)\n elif l == 2 and two is not None:\n return two(record)\n el...
[ "0.5154282", "0.5042861", "0.5010712", "0.4999677", "0.49668226", "0.49542627", "0.4946884", "0.4855669", "0.48430166", "0.48237655", "0.47569048", "0.47480172", "0.47471583", "0.473727", "0.4729921", "0.47268867", "0.4725763", "0.47083244", "0.4693469", "0.46605954", "0.4631...
0.58729494
0
Builds map of all fallthroughs including arg names. Fallthroughs are a list of objects that, when called, try different ways of getting values for attributes (see googlecloudsdk.calliope.concepts. deps_lib._Fallthrough). This method builds a map from the name of each attribute to its fallthroughs, including the "primar...
def BuildFullFallthroughsMap(self, attribute_to_args_map, base_fallthroughs_map, plural=False, with_anchor_fallthroughs=True): fallthroughs_map = {} for attribute in self.attributes: fallthroughs_map[attribute.name] = ( self.GetArgAnd...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetArgAndBaseFallthroughsForAttribute(self,\n attribute_to_args_map,\n base_fallthroughs_map,\n attribute,\n plural=False):\n attribute_...
[ "0.66037655", "0.64677685", "0.60660607", "0.5610746", "0.5434453", "0.54003733", "0.52738345", "0.5240895", "0.51854205", "0.5154537", "0.5153767", "0.51498824", "0.5075057", "0.50633425", "0.5063299", "0.49745247", "0.48896033", "0.4885709", "0.48491773", "0.4836923", "0.48...
0.6850987
0
Helper to get anchordepednent fallthroughs for a specific attribute.
def _GetAttributeAnchorFallthroughs(self, anchor_fallthroughs, attribute): parameter_name = self.ParamName(attribute.name) anchor_based_fallthroughs = [ deps_lib.FullySpecifiedAnchorFallthrough( anchor_fallthrough, self.collection_info, parameter_name) for anchor_fallthrough in ancho...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetArgAndBaseFallthroughsForAttribute(self,\n attribute_to_args_map,\n base_fallthroughs_map,\n attribute,\n plural=False):\n attribute_...
[ "0.64066035", "0.62550104", "0.5431331", "0.53608054", "0.5328746", "0.47851092", "0.47500005", "0.46948504", "0.4661473", "0.45768866", "0.45704085", "0.45176873", "0.4516535", "0.44741994", "0.44652265", "0.4453551", "0.4448734", "0.44419", "0.44351763", "0.44257838", "0.44...
0.80926263
0
Helper for adding anchor fallthroughs to the fallthroughs map.
def _AddAnchorFallthroughs(self, anchor, fallthroughs_map): anchor_fallthroughs = fallthroughs_map.get(anchor.name, []) for attribute in self.attributes: if attribute == anchor: continue anchor_based_fallthroughs = self._GetAttributeAnchorFallthroughs( anchor_fallthroughs, attribut...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def BuildFullFallthroughsMap(self, attribute_to_args_map,\n base_fallthroughs_map, plural=False,\n with_anchor_fallthroughs=True):\n fallthroughs_map = {}\n for attribute in self.attributes:\n fallthroughs_map[attribute.name] = (\n sel...
[ "0.6128773", "0.5863155", "0.5690524", "0.5404224", "0.5352024", "0.5344817", "0.5318971", "0.5303139", "0.5210181", "0.52095926", "0.51509917", "0.5122511", "0.5094019", "0.4990103", "0.4977628", "0.4973901", "0.4971675", "0.49588776", "0.49393559", "0.4937994", "0.49252173"...
0.8732982
0
Parses a list of ResourceParameterAttributeConfig from yaml data.
def ParseAttributesFromData(attributes_data, expected_param_names): raw_attributes = [ ResourceParameterAttributeConfig.FromData(a) for a in attributes_data ] registered_param_names = [a.parameter_name for a in raw_attributes] final_attributes = [] # TODO(b/78851830): improve the time complexity here. ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromYaml(cls, yaml_data, api_version=None):\n if not yaml_data:\n return None\n collection = registry.GetAPICollection(\n yaml_data['collection'], api_version=api_version)\n attributes = ParseAttributesFromData(\n yaml_data.get('attributes'), collection.detailed_params)\n return ...
[ "0.5625115", "0.5370975", "0.5180128", "0.51600766", "0.5148526", "0.5116538", "0.50916874", "0.50824744", "0.5073696", "0.50610805", "0.50529665", "0.50239223", "0.49906877", "0.49881876", "0.49795628", "0.49705", "0.49678966", "0.49577042", "0.4937298", "0.49367967", "0.493...
0.65686333
0
Updates suspicion level of all users
def _update_suspicion_1(self): for bucket in self.used_buckets: multiplier = 1 if bucket.attacked else 0 for user in bucket.users: user.suspicion += multiplier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_suspicion_1(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if ...
[ "0.71854013", "0.6843317", "0.6716192", "0.670385", "0.66000074", "0.6202914", "0.62022865", "0.60776985", "0.595668", "0.59536815", "0.59249663", "0.5896137", "0.58041495", "0.574681", "0.57329714", "0.5728912", "0.56205404", "0.557669", "0.55413103", "0.5537747", "0.5410775...
0.7047431
1
Returns the similarity score between the query data and a snack
def get_score(snack_data, percentage_data, snack, snack_query, protein_query, carb_query, fat_query): start_time = time.time() #Load necessary data """ with open ('../../../Data/percentagesDict.pickle', 'rb') as f: percentage_data = pickle.load(f) with open ('../../../Data/FINAL_snacks_data.pickle', 'rb') as f:...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), l...
[ "0.65184444", "0.63786924", "0.62425673", "0.61836904", "0.6133173", "0.61231256", "0.6054935", "0.59781015", "0.59664744", "0.58743554", "0.58663404", "0.58427", "0.583546", "0.583208", "0.5831294", "0.5812179", "0.58055884", "0.5791727", "0.57866764", "0.5785247", "0.577111...
0.7251874
0
Returns the top n snacks with the highest similarity scores to the query snack
def top_n_scores(snack_data, percentage_data, n, snack_query, protein_query, carb_query, fat_query): start_time = time.time() #Loop through the snacks in dictionary and compute the score for each one scores_list = [] for title, info in snack_data.items(): score = get_score(snack_data, percentage_data, title, s...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top_n_satisfy2(content, n):\n #print(n)\n sum_satisfy = 0.0\n query_num = 0.0\n for qid in content:\n label_sort = []\n score = []\n all_info = content[qid]\n num_label1 = 0\n for info in all_info:\n if info[0] > 0:\n num_label1 += 1\n ...
[ "0.70479095", "0.7016488", "0.6949201", "0.68908536", "0.67934215", "0.6757787", "0.664486", "0.6607535", "0.65049905", "0.64892024", "0.64891934", "0.6486207", "0.6456038", "0.64506954", "0.6432667", "0.64052105", "0.63956976", "0.6391418", "0.63406956", "0.6337601", "0.6321...
0.7778652
0
Gets list of buses from database
def get_bus_list(): buses = db.session.query(Bus.bus_name).all() return buses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_bt(self):\n return list(self.collection.find({\"sensor_type\": \"bt\"}, {\"_id\": False})) # Return a list", "def list(cls, context, filters=None, limit=3000, marker=1,\n sort_key='id', sort_dir='asc'):\n db_boars = cls.dbapi.get_boar_list(\n context, limit=limit...
[ "0.67924833", "0.65883785", "0.6407331", "0.62877816", "0.6284882", "0.6264669", "0.61648506", "0.60405076", "0.5935302", "0.59322333", "0.58382916", "0.5751093", "0.5712563", "0.5708815", "0.57086897", "0.5692648", "0.56826633", "0.56777906", "0.56616527", "0.566161", "0.565...
0.7955064
0
Shows info per bus stop
def get_stop_info(stops): api_url = 'http://webservices.nextbus.com/service/publicXMLFeed?command=predictions&a=sf-muni&stopId=' """Stop_dict = {bus_name:'38', minutes: 7, stop_location: 'Geary & Leavenworth'}""" for stop in stops: url = api_url + str(stop) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showInfo(self, stop_number):\n if self.arrivalTime == '': # At origin terminals, there will only be a departure time listed\n if self.getArrivalTime(stop_number) == '00':\n print \"There is a\", self.getDirection(), self.routeID, \"train departing from\", self.getStop(stop_numb...
[ "0.66487247", "0.6538514", "0.6502286", "0.63745207", "0.6317412", "0.5726826", "0.5719049", "0.5625583", "0.55998963", "0.554344", "0.5520937", "0.5518883", "0.55143493", "0.5511896", "0.5466144", "0.5420231", "0.5420191", "0.538806", "0.5379551", "0.5367331", "0.53392893", ...
0.66911435
0
Create a modified image by rotating and repositioning the input image.
def create_modified_image(input_image, rotation_angle, x_offset, y_offset): image = Image.open(input_image) width, height = image.size # Calculate the maximum size needed to fit the rotated image max_size = max(image.size) max_width = max_size if width == max_size else int(max_size * (width / heigh...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotate(self, *args, **kwargs):\n return _image.im...
[ "0.7296847", "0.7029329", "0.69576144", "0.6940915", "0.68389374", "0.6762388", "0.6726973", "0.67229176", "0.6662857", "0.66358876", "0.6580777", "0.6566921", "0.6560993", "0.6520975", "0.65166694", "0.64928305", "0.648112", "0.64629215", "0.64446527", "0.64421993", "0.64395...
0.7490708
0
Select the th that maximizes a metric
def select_best_th(metrics_dict: Dict, metric: str): max_metric_ix = np.argmax(metrics_dict[metric]) return metrics_dict['metrics_ths'][max_metric_ix]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # the...
[ "0.6139892", "0.61252534", "0.60230327", "0.60230327", "0.60230327", "0.60230327", "0.60230327", "0.60230327", "0.60004276", "0.5977269", "0.5831864", "0.5795257", "0.5734608", "0.57190204", "0.56623447", "0.5624177", "0.56057686", "0.56052905", "0.5580644", "0.5558987", "0.5...
0.7147471
0
Creates a prediction for a label, given a th. If score > th > 1
def label_df_with_th(df: pd.DataFrame, th: float, score_col: str): df['y_pred_class'] = df[score_col].apply(lambda score: 1 if score >= th else 0) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, prediction, label):\n\n # true positive (tp): we predict a label of 1 (positive), and the true label is 1\n self.tp = np.sum(np.logical_and(prediction == 1, label == 1))\n # true negative (tn): we predict a label of 0 (negative), and the true label is 0\n self.tn = np...
[ "0.6739023", "0.6581369", "0.6510297", "0.63756716", "0.63308835", "0.6283337", "0.62540215", "0.6175885", "0.61675656", "0.61640334", "0.615781", "0.6091183", "0.6068421", "0.60517657", "0.6038007", "0.60357845", "0.60301214", "0.5937004", "0.5917283", "0.59122425", "0.58688...
0.71840984
0
Creates a new database path unique to the exported subset of ids.
def make_new_dbpath(ibs, id_label, id_list): import wbia tag_hash = ut.hashstr_arr(id_list, hashlen=8, alphabet=ut.ALPHABET_27) base_fmtstr = ( ibs.get_dbname() + '_' + id_label + 's=' + tag_hash.replace('(', '_').replace(')', '_') + '_%d' ) dpath = w...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_id_path(base_path, id_) -> Path:\n\n return base_path / (ID_FMT.format(id=id_))", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, sel...
[ "0.6192096", "0.58190155", "0.57944864", "0.5495239", "0.5478381", "0.54024965", "0.54021364", "0.53865814", "0.5273472", "0.5242266", "0.5226487", "0.5211098", "0.5210646", "0.51828325", "0.5147821", "0.51385933", "0.5101526", "0.5097332", "0.5086912", "0.508547", "0.5063234...
0.7305864
0
PZ_Master1 had annotmatch rowids that did not agree with the current name labeling. Looking at the inconsistencies in the graph interface was too cumbersome, because over 3000 annots were incorrectly grouped together. This function deletes any annotmatch rowid that is not consistent with the current labeling so we can ...
def fix_annotmatch_pzmaster1(): import wbia ibs = wbia.opendb('PZ_Master1') infr = wbia.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5) infr.initialize_graph() annots = ibs.annots() aid_to_nid = ut.dzip(annots.aids, annots.nids) if False: infr.reset_feedback() ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_04_remove_annotations(self):\n self.addAnnotation(\"annotation1\", self.host.id, \"HOST\")\n self.removeAnnotation(self.added_annotations[-1].annotation.id)\n del self.added_annotations[-1]", "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ib...
[ "0.6065128", "0.57366157", "0.53676903", "0.53666043", "0.5321259", "0.5258625", "0.5243224", "0.52191055", "0.5205449", "0.519409", "0.5154932", "0.51453507", "0.51413393", "0.5103089", "0.50633407", "0.5005262", "0.49942034", "0.4972329", "0.49611598", "0.4933948", "0.48975...
0.73364335
0
Assumes ibs1 is an updated subset of ibs2. Remerges ibs1 back into ibs2.
def remerge_subset(): import wbia ibs1 = wbia.opendb('PZ_PB_RF_TRAIN') ibs2 = wbia.opendb('PZ_Master1') gids1, gids2 = ibs1.images(), ibs2.images() idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids) isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2) assert all( ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n ...
[ "0.5970242", "0.58449215", "0.58449215", "0.5833292", "0.57845783", "0.5681122", "0.5585994", "0.55577934", "0.5522436", "0.5519949", "0.54871976", "0.5459155", "0.54430294", "0.54322654", "0.5431394", "0.54148954", "0.5397526", "0.53864276", "0.5382178", "0.5351669", "0.5346...
0.696387
0
Finds the aids of annotations in ibs1 that are also in ibs2 ibs1 = wbia.opendb('PZ_Master1') ibs2 = wbia.opendb('PZ_MTEST')
def find_overlap_annots(ibs1, ibs2, method='annots'): if method == 'images': images1, images2 = ibs1.images(), ibs2.images() idxs1, idxs2 = ut.isect_indices(images1.uuids, images2.uuids) isect_images1 = images1.take(idxs1) annot_uuids = ut.flatten(isect_images1.annot_uuids) i...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n asse...
[ "0.6751747", "0.6087413", "0.601892", "0.58519965", "0.5660577", "0.54375374", "0.54265267", "0.5317513", "0.5268035", "0.5241501", "0.51906496", "0.5165996", "0.5164853", "0.51569146", "0.5151366", "0.51483524", "0.51438683", "0.51413226", "0.5127341", "0.5124664", "0.510170...
0.6915208
0
Constructs the type with the given alias using the given args and kwargs.
def create_object(self, alias: str, *args: Any, **kwargs: Any) -> Any: object_type = self._type_aliases.get(alias) if object_type is None: raise KeyError(f"There is no type registered for alias {alias}") if not callable(object_type): raise TypeError( f"Ask...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:\n args = []\n ...
[ "0.6451635", "0.6361811", "0.5905361", "0.5635368", "0.5622488", "0.55912095", "0.55612254", "0.5529319", "0.5490742", "0.5441523", "0.5422917", "0.5418361", "0.5410999", "0.5371319", "0.5332267", "0.53162473", "0.52660453", "0.51575106", "0.51328623", "0.51288486", "0.512559...
0.6978425
0
this starts transcribing for up to (60) seconds
def start_transcribing(): transcribe.main()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transcribe_proc():\n while True:\n # Get result of transcription\n transcribe_result = transcriber.transcribe_stream(\n audio_stream(), sample_rate, sample_width, channels\n )\n\n _LOGGER.debug(\"Transcription result: %s\", transcribe_result)\n\...
[ "0.68453956", "0.60066426", "0.57858366", "0.5739796", "0.57244843", "0.57060385", "0.56948715", "0.5627209", "0.55653596", "0.54652876", "0.54332554", "0.5412271", "0.54015946", "0.53609794", "0.5348195", "0.53372765", "0.53358334", "0.5301576", "0.5300465", "0.52419746", "0...
0.65298873
1
Return function that computes loss only for those targets that are not 1.
def masked_loss_func(loss_function): def masked_loss_fn(predictions, targets): assert targets.ndim == 1 target_mask = T.neq(targets, -1) valid_inds = T.nonzero(target_mask) return loss_function(predictions[valid_inds], targets[valid_inds]) return masked_loss_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_fn(self, targets, outputs, model):", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def loss_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n pass", "def mask_nan_keep_loss(y_true, y_pred):\n y_pred, y_true, num_notnan = mas...
[ "0.66698176", "0.6661593", "0.65615445", "0.6476919", "0.6280919", "0.62795126", "0.62525606", "0.6203152", "0.6193807", "0.6179458", "0.6176614", "0.6161819", "0.6148114", "0.6142843", "0.6128714", "0.610796", "0.6101929", "0.6089113", "0.6025568", "0.5995906", "0.59949726",...
0.6711504
0
deep_hash creates a hash from the object
def deep_hash(obj): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepco...
[ "0.64543235", "0.6425407", "0.62933004", "0.61203706", "0.6063225", "0.6063225", "0.6063225", "0.6063225", "0.6049848", "0.60369045", "0.5989689", "0.5945062", "0.5930857", "0.5918197", "0.58890283", "0.5871536", "0.5871536", "0.5856316", "0.5843264", "0.58174515", "0.5791371...
0.8473571
0
deep_cmp compares two objects deeply
def deep_cmp(obj1, obj2): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deep_equals(obja, objb, isequal):\n\n objatree = wo.typedtree(obja)\n objbtree = wo.typedtree(objb)\n match = objatree == objbtree\n ok = match == isequal\n\n if ok:\n s = \"pass\"\n else:\n s = \"fail\"\n\n print(f\"{obja} == {objb} is {match} : {s}\")\n return ok", ...
[ "0.6726066", "0.66530275", "0.6527878", "0.64677584", "0.6271268", "0.621182", "0.6145705", "0.61302245", "0.61206347", "0.60844535", "0.60643643", "0.6057797", "0.6032406", "0.6009618", "0.59857124", "0.59731966", "0.5950671", "0.5939408", "0.5894853", "0.58666605", "0.57747...
0.9118865
0
Move files in the current working directory that match a pattern.
def batch_mover(pattern, directory=None): if directory is None: directory = Path().cwd() for i in os.scandir(directory): if file_check(pattern, i.name): pass # shutil.move(i.name, yeah we gotta change a lot here
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_files(sim_dir, dest_dir, file_patterns):\n for f in file_patterns:\n for p in glob.glob1(sim_dir, f):\n try:\n shutil.move(os.path.join(sim_dir, p), os.path.join(dest_dir, p))\n except Exception as e:\n print(\n \"error while...
[ "0.7225139", "0.7025052", "0.6768235", "0.6700694", "0.66823375", "0.6493144", "0.6468564", "0.64624935", "0.64362115", "0.62788254", "0.6260499", "0.62399226", "0.6172828", "0.6145879", "0.614499", "0.6129682", "0.6116104", "0.6089378", "0.60550845", "0.6048404", "0.6036013"...
0.7294452
0
Check that the file exists and matched the desired pattern.
def file_check(pattern, file_to_check): if file_to_check.name.__contains__(pattern): yield True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_contains_pattern(file, pattern):\r\n if not os.path.isfile(file):\r\n raise NameError('file %s does not exist' % file)\r\n return not utils.system('egrep -q \"' + pattern + '\" ' + file,\r\n ignore_status=True)", "def validate_string_match(self, pattern, file):\r\...
[ "0.73465383", "0.7302456", "0.72112775", "0.71983016", "0.6769893", "0.67092055", "0.66988623", "0.65889895", "0.6551649", "0.64450276", "0.6444477", "0.64354426", "0.6429209", "0.6409745", "0.6392553", "0.6392284", "0.63780326", "0.636964", "0.6360524", "0.63543516", "0.6349...
0.74560165
0
Player's Steam persona (profile) name.
def persona_name(self) -> str: return self._persona_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self) -> str:\n try:\n return self.stats[\"Player name\"]\n except KeyError as ke:\n logger.debug(ke, exc_info=True)\n logger.warn(\"unable to get player name\")\n return \"\"", "def get_name(self):\n try:\n return self.profile_...
[ "0.7409105", "0.7398977", "0.7245066", "0.71149844", "0.7076989", "0.69657075", "0.69252086", "0.6905714", "0.6882069", "0.6875132", "0.68576926", "0.68524104", "0.6836869", "0.6832816", "0.6792989", "0.6789445", "0.676719", "0.676719", "0.67574185", "0.6729789", "0.66886723"...
0.74546546
0
Create a team for the context's user. An administrator can also perform the action on a user's behalf.
def create_team_action(request): # Create the team. now = datetime.utcnow() user_id = request.context.user_id user = load_user(request.db, user_id) # Select a round based on the user's badges. round_ids = find_round_ids_with_badges(request.db, user['badges'], now) if len(round_ids) == 0: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n req = team_req.parse_args(strict=True)\n curr_user = api.user.get_user()\n if curr_user[\"teacher\"]:\n raise PicoException(\"Teachers may not create teams\", 403)\n req[\"team_name\"] = req[\"team_name\"].strip()\n if not all(\n [\n ...
[ "0.760319", "0.75646865", "0.7414623", "0.693689", "0.6909473", "0.6858494", "0.6830383", "0.6734105", "0.6727118", "0.668748", "0.6665157", "0.66149485", "0.6561478", "0.65193206", "0.64911103", "0.648043", "0.6461089", "0.64321554", "0.64144987", "0.64086944", "0.63389486",...
0.7725349
0
Evaluate plugin and vcf compatibility
def check_plugin(vcf_reader, plugin): # Always use core plug-in plugins = ['core'] # Collect supplied plugin(s) [plugins.append(item) for item in plugin] # Create set plugins = list(set(plugins)) # Evaluate vcf and plugin compatibility for plugin in plugin...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_client_custom_plugin():\n client = ConfigureClients(plugins=[PluginVipCustomisation])\n assert client.plugins == [PluginVipCustomisation]", "def test_check_status_code_returns_true():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance._check_status_code('replace me with real xml')...
[ "0.5617833", "0.53539044", "0.5324882", "0.5316147", "0.52079403", "0.520496", "0.5125704", "0.5116243", "0.50876707", "0.49684304", "0.49534646", "0.48966265", "0.48424554", "0.4832243", "0.4799134", "0.47725004", "0.47721884", "0.47630158", "0.4759951", "0.4743248", "0.4736...
0.6621885
0
print the details of the given cluster object
def print_cluster_attributes(self, objects): print("\n") print(("ClusterName".ljust(35),":",objects.ClusterName.value())) print(("Repository Disk".ljust(35),":", \ objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value())) print("\nNodes in the cluster :\n---------------...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def print_cluster(self):\n print('Cluster', self.number)\...
[ "0.7697402", "0.68622345", "0.67253536", "0.66951793", "0.6661364", "0.6646771", "0.6497768", "0.6483208", "0.64796126", "0.64762145", "0.6474442", "0.64543897", "0.6430488", "0.6301079", "0.6287944", "0.6264077", "0.6209574", "0.6173595", "0.612323", "0.6118218", "0.61159134...
0.81189764
0
Returns a dict, key industry, value set of stock symbols Parses the US_Large_Cap.txt file, parses the WIKIdatasetscodes.csv file,
def parse_US_Large_Cap(): stocks = set() industries = defaultdict(set) with open('US_Large_Cap.txt') as f: for line in f: industry_match = re.match(r'.*--\s*(.*)', line) if industry_match: ind = industry_match.group(1) stock_match = re.match(r'...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_stock_list():\n print(\"Reading list of stocks.\")\n stocks = {}\n with open(STOCKS_FILE) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n stocks[row['Symbol']] = (row['Name'], row['Sector'])\n return stocks", "def get_data_from_file(file_name):\...
[ "0.6792169", "0.6174895", "0.6046333", "0.59522957", "0.58431596", "0.57646185", "0.5708081", "0.5615214", "0.5540734", "0.5530439", "0.55191875", "0.54750764", "0.54580003", "0.5432584", "0.54289293", "0.5422499", "0.54044735", "0.53776044", "0.5358801", "0.53523725", "0.535...
0.7746812
0
Adds months to the sourcedate (adjusting the day if necessary)
def add_months(sourcedate, months): month = sourcedate.month - 1 + months year = sourcedate.year + month // 12 month = month % 12 + 1 day = min(sourcedate.day, calendar.monthrange(year, month)[1]) return datetime.date(year, month, day)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_months(sourcedate, months):\n month = sourcedate.month - 1 + months\n year = int(sourcedate.year + month / 12)\n month = month % 12 + 1\n day = min(sourcedate.day, calendar.monthrange(year, month)[1])\n return datetime.date(year, month, day)", "def add_months(sourcedate, months):\r\n\r\n ...
[ "0.8102769", "0.79254377", "0.67087173", "0.6575526", "0.6172759", "0.5974256", "0.5790029", "0.5745349", "0.56915337", "0.5571453", "0.5530026", "0.55111146", "0.54789144", "0.53980106", "0.5394042", "0.5374351", "0.53717643", "0.5355199", "0.5321254", "0.53072524", "0.53038...
0.80970645
1
A mobility(T; Nd, Na; A, B) = 3/2 3/2 T + B (Nd + Na) / T
def mobility(mat, T, A, B): Nd = mat.p_donor_concentration(T=T) Na = mat.n_acceptor_concentration(T=T) T32 = T ** (3 / 2) return A / (T32 + B * (Nd + Na) / T32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mu_na(n: float, a: float) -> float:\n return n * n * a * a * a", "def ilerp(a, b, t):\n return (t - a) / (b - a)", "def C_Na_eq():\n global C_Na, C_Mg, C_dNTP\n return C_Na + 120*sqrt(C_Mg - C_dNTP)", "def t(o, r):\n return (r/o)**2", "def calc_mad(a,b):\n comb = a + b\n idx = np.a...
[ "0.6247191", "0.6061543", "0.5862243", "0.58048445", "0.5770478", "0.57220936", "0.5684574", "0.56817734", "0.5677259", "0.5659736", "0.55972934", "0.55965066", "0.55772066", "0.556147", "0.55419177", "0.552071", "0.5519663", "0.5517092", "0.5505766", "0.5494118", "0.5485629"...
0.74827826
0
Adjust scrollbars according to window and canvassize.
def adjustScrolls(self): cwidth = self._canvas.winfo_width() cheight = self._canvas.winfo_height() self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) if cwidth < self.canvwidth or cheight ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AdjustMyScrollbars(self):\r\n\r\n if self._anchor:\r\n xUnit, yUnit = self.GetScrollPixelsPerUnit()\r\n if xUnit == 0:\r\n xUnit = self.GetCharWidth()\r\n if yUnit == 0:\r\n yUnit = self._lineHeight\r\n\r\n x, y = self._anchor.Get...
[ "0.7038642", "0.6943688", "0.69007504", "0.6676013", "0.6641863", "0.6597355", "0.6593239", "0.6341009", "0.63041687", "0.6233112", "0.61331993", "0.6067895", "0.60603666", "0.60564363", "0.6000023", "0.5999915", "0.59684426", "0.5967346", "0.59607744", "0.5949428", "0.594406...
0.7195202
0
return a blank image object
def _blankimage(): img = TK.PhotoImage(width=1, height=1) img.blank() return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_blank_img(self):\n if photos_settings.DEBUG:\n return self.get_placeholder_img()\n\n out = {\n 'blank': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % ...
[ "0.7410479", "0.734866", "0.7067597", "0.69549996", "0.6941064", "0.68790126", "0.6719695", "0.6615003", "0.6612284", "0.66046363", "0.6579427", "0.65710306", "0.6570533", "0.6521228", "0.6519786", "0.6500374", "0.64950687", "0.64875454", "0.64772147", "0.64727896", "0.646266...
0.82468075
0
Create an invisible polygon item on canvas self.cv)
def _createpoly(self): return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initiate(self):\n pts = []\n for point in self.points:\n pt = gr.Point(point[0],point[1])\n pts.append(pt)\n\n self.vis = [gr.Polygon(pts)]\n\n self.draw()", "def draw_me(self, canvas):\n return canvas.create_polygon(self.dot, outline=self.poly, fill=s...
[ "0.6772693", "0.67152303", "0.67051923", "0.6295918", "0.62573355", "0.6244688", "0.62337166", "0.6097748", "0.60878134", "0.60610825", "0.6032351", "0.60145247", "0.5891362", "0.5884091", "0.58743083", "0.58682644", "0.58416355", "0.58405304", "0.5811396", "0.58092785", "0.5...
0.69808215
0
Configure polygonitem polyitem according to provided
def _drawpoly(self, polyitem, coordlist, fill=None, outline=None, width=None, top=False): cl = [] for x, y in coordlist: cl.append(x * self.xscale) cl.append(-y * self.yscale) self.cv.coords(polyitem, *cl) if fill is not None: self.cv...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_poly_corners(self, poly_item, marker_dict=None):\n poly = poly_item.polygon()\n\n for i in range(poly.size()):\n point = poly.at(i)\n p = self.addEllipse(-4, -4, 8, 8, self.LUBronze, self.LUBronze)\n p.setZValue(2) # Make sure corners always in front of polyg...
[ "0.70407003", "0.6504912", "0.6365025", "0.625637", "0.6162488", "0.60109514", "0.5975227", "0.59541506", "0.5913706", "0.5852789", "0.5838715", "0.5821715", "0.58086246", "0.57636315", "0.572026", "0.5710127", "0.5707078", "0.5682682", "0.56402135", "0.56137234", "0.5599655"...
0.66038036
1
Create an invisible line item on canvas self.cv)
def _createline(self): return self.cv.create_line(0, 0, 0, 0, fill="", width=2, capstyle = TK.ROUND)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def draw(self, img):\n self._erase_la...
[ "0.6389729", "0.6292108", "0.6281788", "0.618214", "0.60821176", "0.6069137", "0.60624546", "0.60604846", "0.60585237", "0.60570765", "0.6035188", "0.6017805", "0.59448105", "0.59333545", "0.5931279", "0.59004617", "0.5883799", "0.58727956", "0.5841444", "0.5840402", "0.58379...
0.74068683
0
Delay subsequent canvas actions for delay ms.
def _delay(self, delay): self.cv.after(delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delay():\r\n time.sleep(2)", "def _delay(self, delay=None):\n return self.screen.delay(delay)", "def _delay(self, n=None):", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def delay(ms: int, /) -> None:", "def __delay(msecs):\n time.sleep(msecs / 1000)", ...
[ "0.7070052", "0.6835207", "0.6616521", "0.64635235", "0.6431732", "0.6303699", "0.61440194", "0.61355704", "0.6104973", "0.60513353", "0.6049009", "0.60098773", "0.5986527", "0.5979398", "0.5903434", "0.58919734", "0.5890372", "0.58212", "0.5795427", "0.57597876", "0.5746297"...
0.72006464
0
Check if the string color is a legal Tkinter color string.
def _iscolorstring(self, color): try: rgb = self.cv.winfo_rgb(color) ok = True except TK.TclError: ok = False return ok
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':...
[ "0.7850937", "0.7765966", "0.76969033", "0.7676884", "0.75903404", "0.75249046", "0.74149966", "0.7390387", "0.70323974", "0.6862508", "0.6857582", "0.68549395", "0.6794173", "0.6701827", "0.6685319", "0.6660881", "0.654674", "0.6521498", "0.6519961", "0.6518414", "0.6441103"...
0.8371807
0
Write txt at pos in canvas with specified font and color. Return text item and xcoord of right bottom corner of text's bounding box.
def _write(self, pos, txt, align, font, pencolor): x, y = pos x = x * self.xscale y = y * self.yscale anchor = {"left":"sw", "center":"s", "right":"se" } item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align], fill = pencolo...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, ...
[ "0.703068", "0.70164824", "0.6974559", "0.69286704", "0.68471724", "0.6755969", "0.66728175", "0.6666933", "0.65818965", "0.65559834", "0.65518737", "0.65458", "0.64890784", "0.6485086", "0.6478929", "0.64785075", "0.6454026", "0.6442451", "0.6432531", "0.63997805", "0.637317...
0.81335413
0
Bind fun to mousebuttonrelease event on Myturtle. fun must be a function with two arguments, the coordinates of the point on the canvas where mouse button is released. num, the number of the mousebutton defaults to 1 If a Myturtle is clicked, first _onclickevent will be performed, then _onscreensclickevent.
def _onrelease(self, item, fun, num=1, add=None): if fun is None: self.cv.tag_unbind(item, "<Button%s-ButtonRelease>" % num) else: def eventfun(event): x, y = (self.cv.canvasx(event.x)/self.xscale, -self.cv.canvasy(event.y)/self.yscale) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onscreenclick(self, fun, num=1, add=None):\n if fun is None:\n self.cv.unbind(\"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n ...
[ "0.68110013", "0.65954924", "0.6564144", "0.61202985", "0.6027845", "0.60225904", "0.5944487", "0.59349585", "0.5782131", "0.5691157", "0.5666011", "0.56604856", "0.5641726", "0.5623444", "0.56176275", "0.55965006", "0.5556579", "0.55450773", "0.5525885", "0.551433", "0.54659...
0.7111682
0
Bind fun to mousemoveevent (with pressed mouse button) on Myturtle. fun must be a function with two arguments, the coordinates of the actual mouse position on the canvas. num, the number of the mousebutton defaults to 1 Every sequence of mousemoveevents on a Myturtle is preceded by a mouseclick event on that Myturtle.
def _ondrag(self, item, fun, num=1, add=None): if fun is None: self.cv.tag_unbind(item, "<Button%s-Motion>" % num) else: def eventfun(event): try: x, y = (self.cv.canvasx(event.x)/self.xscale, -self.cv.canvasy(event.y...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onscreenclick(self, fun, num=1, add=None):\n if fun is None:\n self.cv.unbind(\"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n ...
[ "0.65346754", "0.59252113", "0.58134866", "0.567966", "0.55477583", "0.55414116", "0.5523079", "0.5498528", "0.5462336", "0.5461063", "0.54575276", "0.5379014", "0.5379014", "0.53327185", "0.5305558", "0.5290165", "0.5269398", "0.5261307", "0.5252702", "0.52516973", "0.524594...
0.6218892
1
Bind fun to mouseclick event on canvas. fun must be a function with two arguments, the coordinates of the clicked point on the canvas. num, the number of the mousebutton defaults to 1 If a Myturtle is clicked, first _onclickevent will be performed, then _onscreensclickevent.
def _onscreenclick(self, fun, num=1, add=None): if fun is None: self.cv.unbind("<Button-%s>" % num) else: def eventfun(event): x, y = (self.cv.canvasx(event.x)/self.xscale, -self.cv.canvasy(event.y)/self.yscale) fun(x, y) ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onclick(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n ...
[ "0.6381053", "0.63472396", "0.6090573", "0.58589256", "0.5826599", "0.5661302", "0.5654933", "0.56274605", "0.5607574", "0.55640775", "0.5502268", "0.54359365", "0.53825593", "0.5359921", "0.5359921", "0.53023577", "0.52998567", "0.5286697", "0.52823967", "0.52305996", "0.522...
0.72543937
0
Bind fun to keyrelease event of key. Canvas must have focus. See method listen
def _onkeyrelease(self, fun, key): if fun is None: self.cv.unbind("<KeyRelease-%s>" % key, None) else: def eventfun(event): fun() self.cv.bind("<KeyRelease-%s>" % key, eventfun)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_key_release(self, event):", "def key_release_event(self, event):\n pass", "def on_release(self, keyname):\n self.keydown = False\n keyname = str(keyname).strip('\\'')\n log.info('KEY RELEASE ' + keyname)\n if keyname in self.controls_keyrelease:\n key_handl...
[ "0.7828458", "0.73670536", "0.7318554", "0.6937562", "0.68964875", "0.6888811", "0.68864244", "0.6816621", "0.6766921", "0.6632363", "0.6632041", "0.6628382", "0.6627461", "0.66099536", "0.6597856", "0.6597017", "0.651941", "0.6456723", "0.642119", "0.6409312", "0.6390656", ...
0.80060947
0
If key is given, bind fun to keypress event of key. Otherwise bind fun to any keypress. Canvas must have focus. See method listen.
def _onkeypress(self, fun, key=None): if fun is None: if key is None: self.cv.unbind("<KeyPress>", None) else: self.cv.unbind("<KeyPress-%s>" % key, None) else: def eventfun(event): fun() if key is None: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onkeyrelease(self, fun, key):\n if fun is None:\n self.cv.unbind(\"<KeyRelease-%s>\" % key, None)\n else:\n def eventfun(event):\n fun()\n self.cv.bind(\"<KeyRelease-%s>\" % key, eventfun)", "def onkey(self, fun, key):\n if fun is None:\n ...
[ "0.71817255", "0.7139713", "0.694745", "0.6629521", "0.65834564", "0.6567381", "0.6504404", "0.6493854", "0.6422913", "0.6373026", "0.6367208", "0.6248102", "0.6203105", "0.61477613", "0.61441123", "0.6135494", "0.6100688", "0.60639316", "0.6040798", "0.6035595", "0.6017277",...
0.7777757
0
Install a timer, which calls fun after t milliseconds.
def _ontimer(self, fun, t): if t == 0: self.cv.after_idle(fun) else: self.cv.after(t, fun)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ontimer(self, fun, t=0):\n self._ontimer(fun, t)", "def start(self, sec, callFunc, *args, **kwargs):\n self.cancel()\n \n def doit(args=args, kwargs=kwargs):\n self._timerID = None\n callFunc(*args, **kwargs)\n\n self._timerID = self._tkWdg.after(int(0...
[ "0.7902541", "0.6533716", "0.6487333", "0.64392936", "0.64059657", "0.6389365", "0.6331448", "0.6325191", "0.62742996", "0.6244253", "0.615073", "0.6129014", "0.6125973", "0.6120906", "0.61153", "0.61113065", "0.61081517", "0.6058988", "0.60318214", "0.6030502", "0.60116047",...
0.6822445
1
Configure image item as to draw image object at position (x,y) on canvas)
def _drawimage(self, item, pos, image): x, y = pos self.cv.coords(item, (x * self.xscale, -y * self.yscale)) self.cv.itemconfig(item, image=image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pos_image(image, x,y):\n image.anchor_x = x\n image.anchor_y = y", "def on_draw_over_image(self):", "def draw_image_on_canvas(self, force_generation=False):\n\n self.canvas_vertex = (self.canvas.canvasx(0), self.canvas.canvasy(0))\n box_coords = (self.canvas_vertex[0], self.canvas_vertex[...
[ "0.7062693", "0.70012945", "0.6607183", "0.6527935", "0.6488746", "0.6411776", "0.639959", "0.6380511", "0.6360058", "0.6359835", "0.6342156", "0.63302064", "0.63219225", "0.62677747", "0.62642443", "0.6258162", "0.6256595", "0.6230513", "0.62140775", "0.6204366", "0.620224",...
0.8295738
0