query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Method tests the request to delete a bucketlist item
def test_delete_bucketlist_item(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertTrue(item) response = self.delete_bucketlist_item(email, _pword, bucketlist.id, item.id) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '200 OK') self.assertEqual( result['message'], 'Bucketlist Item with ID {} deleted'.format(item.id) ) item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=1).first() self.assertFalse(item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_bucketlist_item(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n\n # create a bucketlist by making a POST request\n res = self.client().post(\n '/api/v1/bucketlists/',\n ...
[ "0.8505305", "0.83253217", "0.82858104", "0.8246607", "0.82087", "0.81654054", "0.8043401", "0.78209937", "0.77620775", "0.76105493", "0.7587545", "0.75202996", "0.7477461", "0.7458294", "0.73910975", "0.7342991", "0.73347974", "0.7244719", "0.7221748", "0.7206562", "0.710843...
0.8602903
0
Method tests the error raised when end point for delete a bucket list item contains the wrong id
def test_delete_item_wrong_id(self): email = "test@test.com" _pword = "test" user = User.query.filter_by(email=email).first() bucketlist = BucketList.query.filter_by(user_id=user.id, name="test bucketlist").first() item = BucketListItem.query.filter_by(bucketlist_id=bucketlist.id, id=0).first() self.assertFalse(item) response = self.delete_bucketlist_item(email, _pword, bucketlist.id, 0) result = json.loads(response.data.decode('utf-8')) self.assertEqual(response.status, '404 NOT FOUND') self.assertEqual( result['message'], 'Bucketlist Item with ID {} not found in the database. You have requested this URI '\ '[/api/v1/bucketlist/1/items/0] but did you mean /api/v1/bucketlist/<int:bucketlist_id>/items/'\ ' or /api/v1/bucketlist/<int:bucketlist_id> or /api/v1/bucketlist ?'.format(0) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deletion_handles_no_bucket_found_by_id(self):\n with self.client:\n response = self.client.delete(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n ...
[ "0.8280715", "0.81965166", "0.8153652", "0.80517626", "0.7886369", "0.7813242", "0.7694013", "0.76679814", "0.76200867", "0.74761516", "0.7440213", "0.74288243", "0.74263364", "0.7391026", "0.73868865", "0.7326834", "0.7310586", "0.72898954", "0.72570956", "0.7218091", "0.721...
0.864614
0
Method is used to send request to the api to add a bucketlist for testing
def add_bucketlist_item(self, email, password, buckelist_id, item_name): test_date = str(date(2020, 9, 22)) headers = self.authentication_headers(email=email, password=password) return self.client.post( '/api/v1/bucketlist/{}/items/'.format(buckelist_id), data=json.dumps({"name": item_name, "finished_by": test_date}), content_type="application/json", headers=headers, follow_redirects=True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_bucketlist_item(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n ...
[ "0.7808094", "0.7646647", "0.76416534", "0.7634788", "0.73911667", "0.7332509", "0.7283901", "0.7266109", "0.72469544", "0.7237784", "0.7166871", "0.7161714", "0.7063952", "0.70266515", "0.7024281", "0.7019768", "0.69053954", "0.6877802", "0.6849187", "0.67823607", "0.6734943...
0.7050521
13
Method is used to send request for put for the bucketlist item to the api
def put_bucketlist_item(self, email, password, bucketlist_id, item_id, data): headers = self.authentication_headers(email=email, password=password) return self.client.put( '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id), content_type="application/json", data=json.dumps(data), headers=headers, follow_redirects=True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_put_bucketlist_item(self):\r\n data = {\"name\": \"bucketlist item name\", \"completed\": \"true\"}\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, name=...
[ "0.77104014", "0.75837624", "0.73698086", "0.7226279", "0.7216704", "0.71507794", "0.702163", "0.68962586", "0.6804519", "0.67802167", "0.6777862", "0.66020447", "0.6598506", "0.65621024", "0.64808017", "0.64796245", "0.64737403", "0.64593357", "0.6454596", "0.645011", "0.640...
0.7592196
1
Method is used to send request to delete a bucketlist item
def delete_bucketlist_item(self, email, password, bucketlist_id, item_id): headers = self.authentication_headers(email=email, password=password) return self.client.delete( '/api/v1/bucketlist/{}/items/{}'.format(bucketlist_id, item_id), content_type="application/json", headers=headers, follow_redirects=True )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_bucketlistitem_by_id(self):\n resp = self.client.post('/bucketlists',\n data=json.dumps(self.bucketlist),\n content_type=\"application/json\", headers={\n \"Authorization\": self.token\n ...
[ "0.77946883", "0.7778827", "0.7758909", "0.7664357", "0.76476765", "0.7614669", "0.7598389", "0.74553555", "0.74454176", "0.714708", "0.69668", "0.6871372", "0.6836843", "0.68232435", "0.67902356", "0.6745601", "0.66664237", "0.66347444", "0.6566819", "0.6558374", "0.6537086"...
0.7630652
5
Spawn a new thread which continually updates things.
def run(self): while True: buf = "" while len(buf) == 0 or buf[-1] != '\n': if self.ser.available(): buf += self.ser.read() else: delay(1) # Avoid pegging CPU tokens = buf.split(' ') s = tokens[0] self.lock.acquire() try: if s == "PPM": self.ppm = [int(i) for i in tokens[1:]] elif s == "Wind": self.wind = int(tokens[1]) elif s == "roll": self.roll = float(tokens[1]) elif s == "yaw": self.yaw = float(tokens[1]) elif s == "Wpot": self.winch = int(tokens[1]) elif s == "Rpot": self.rudder = int(tokens[1]) except: pass # A cast likely failed self.lock.release()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\n Thread(target=self.update, args=()).start()\n return self", "def start(self):\n Thread(target=self.update, args=()).start()\n return self", "def start(self):\r\n t = Thread(target=self.update, args=())\r\n t.daemon = True\r\n t.start()\r\n ...
[ "0.67790216", "0.67790216", "0.6629717", "0.6629717", "0.6471286", "0.63497496", "0.6247109", "0.60593855", "0.6020881", "0.60143054", "0.6010006", "0.6010006", "0.59761846", "0.5964021", "0.59637165", "0.59495205", "0.59432256", "0.59210443", "0.5894969", "0.5848531", "0.583...
0.0
-1
Apply type padding in a convolution operation.
def padding_type(spatial, config): ret = None if 'padding' not in config: return 0 elif isinstance(config['padding'], list): ret = torch.tensor(config['padding']) elif config['padding'] == 'same': k = torch.tensor(config['kernel_size']) s = torch.tensor(config['stride']) ret = (spatial*(s-1)-1+k)//2 elif config['padding'] == 'valid': ret = torch.zeros(spatial.shape).long() else: raise ValueError('Pad type is invalid') return list(ret.numpy())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_same_padder(conv_op: nn.Conv1d | nn.Conv2d | nn.Conv3d, image_size: list[int]):\n # calculate padding required\n padding: list[int] = _get_same_padding_conv_nd(image_size, conv_op.kernel_size, conv_op.dilation, conv_op.stride)\n\n # initialize and return padder\n padder = Pad[\"constantpad\",...
[ "0.6828457", "0.67641896", "0.67054737", "0.6690218", "0.6687156", "0.659067", "0.6519699", "0.6487844", "0.6447366", "0.6444962", "0.6419147", "0.64094675", "0.63942194", "0.63535935", "0.631613", "0.62766844", "0.6266804", "0.6255546", "0.6248054", "0.62391937", "0.6237672"...
0.6141132
35
Safely convert parameter value from .cfg file
def safe_conversion(value): try: value = ast.literal_eval(value) value = list(value) if isinstance(value, tuple) else value return value except ValueError: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _loadParamFromFile(config, section, paramName):\n\n # Get paramName from answer file\n value = config.get(section, paramName)\n\n # Validate param value using its validation func\n param = controller.getParamByName(paramName)\n _validateParamValue(param, value)\n\n # Keep param value in our n...
[ "0.62456924", "0.5969067", "0.58489376", "0.5828713", "0.57924104", "0.57802683", "0.57431704", "0.56006944", "0.5588103", "0.55810744", "0.55671716", "0.5529748", "0.5526934", "0.55040497", "0.55028737", "0.54622763", "0.5404378", "0.5393546", "0.53748494", "0.5366472", "0.5...
0.0
-1
Calculate spatial output shape after convolution.
def out_conv(spatial, config): p, k, s = [config[k] for k in ['padding', 'kernel_size', 'stride']] p2 = p if isinstance(p, int) else p[0] + p[1] return (spatial + p2 - k)//s + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_output_shape(self, input_shape):\r\n return input_shape", "def output_shape(self):\r\n return self.detector.output_shape", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.as...
[ "0.7263656", "0.718158", "0.7174481", "0.7144437", "0.7144437", "0.7144437", "0.7144437", "0.7144437", "0.71191394", "0.711027", "0.70433396", "0.695608", "0.695608", "0.695608", "0.67904943", "0.67573696", "0.6704987", "0.66797125", "0.66329926", "0.65868956", "0.6567974", ...
0.67702305
15
Calculate spatial output shape after transpose convolution.
def out_tconv(spatial, config): p, k, s = [config[k] for k in ['padding', 'kernel_size', 'stride']] p2 = p if isinstance(p, int) else p[0] + p[1] p_out = config.get('output_padding', 0) return (spatial-1)*s - p2 + k + p_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_output_shape(self, input_shape):\r\n return input_shape", "def get_output_shape(self):\n weights = self.W.get_shape().as_list()\n input_size = np.asarray(self.incoming_shape[-3:-1])\n strides = np.asarray(self.strides[-3:-1])\n kernels = np.asarray(weights[0:2])\n ...
[ "0.69743323", "0.68945485", "0.6871195", "0.68555003", "0.68555003", "0.68555003", "0.68555003", "0.68555003", "0.6844321", "0.6745843", "0.6660508", "0.6644264", "0.6641586", "0.66338116", "0.6628068", "0.6628068", "0.6628068", "0.65929323", "0.65625435", "0.65463483", "0.65...
0.6426456
21
Deal with the repeating blocks in the model by keeping track of the encompassed rows and multiplying them before appending.
def format_repeats(file): ret = [] while True: try: l = next(file).lstrip().replace('\n','') except StopIteration: break if l.lower().startswith('repeat'): times = int(l.split('x')[1]) repeats = [] while True: l = next(file).lstrip().replace('\n','') if l.lower() == 'end': break repeats.append(l) ret += repeats*times else: if not l.startswith('#'): ret += [l] return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rows(self, row):\n self.row += row", "def addInnerBreaks(self):\n for item in self:\n item.addInnerBreaks()", "def process_duplicate_rows(self):\n pass", "def addBreaks(self):\n for item in self:\n item.addBreaks()", "def block_replicate(data, block_siz...
[ "0.5476102", "0.5351711", "0.53062737", "0.51498103", "0.5123177", "0.5094556", "0.5087477", "0.5058288", "0.5041181", "0.49976635", "0.4990649", "0.497327", "0.49467406", "0.4940926", "0.49374586", "0.4928902", "0.48958787", "0.48951703", "0.4883801", "0.48619694", "0.485302...
0.0
-1
Keep track of how many times a type of layer has appeard and append _counter to their name to maintain module name uniqueness.
def add_counters(dic, arr): ret = [] for el in arr: name = el[1:-1] num = dic.get(name, None) if num is not None: ret.append('[%s_%s]'%(name, dic[name])) dic[name] += 1 else: ret.append(el) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_layer_count(layer_type=''):\n _LAYER_TYPES[layer_type] += 1\n return _LAYER_TYPES[layer_type]", "def _auto_name(name, parent):\n if not is_ready(parent):\n parent._pywarm_auto_name_dict = {}\n def _hook(model, x):\n model._pywarm_auto_name_dict = {}\n parent._pywa...
[ "0.6607571", "0.6507959", "0.6308612", "0.630668", "0.630668", "0.630668", "0.6284107", "0.6157651", "0.6157651", "0.6146809", "0.61214304", "0.6117625", "0.60797423", "0.59838635", "0.5966383", "0.5896614", "0.58724344", "0.58569306", "0.5815706", "0.581127", "0.5774243", ...
0.5392121
47
Check if model uses submodules
def defined_submodule(arr): return any([el.endswith('_module]') for el in arr])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_submodule(submodule):\n is_correct_subclass = issubclass(submodule, AnalysisModule)\n # Ensure submodule is defined within the package we are inspecting (and not 'base')\n is_correct_module = package.__name__ in submodule.__module__\n return is_correct_subclass and is_correct_m...
[ "0.66678244", "0.6193898", "0.6127741", "0.6090915", "0.60585386", "0.5981495", "0.5975656", "0.59688056", "0.58849907", "0.5884931", "0.5833496", "0.5831226", "0.58079565", "0.5742593", "0.5681119", "0.5659924", "0.56587225", "0.56485283", "0.56155103", "0.5606545", "0.56036...
0.6265166
1
Initialize a MAD4PG network.
def __init__(self, env, args): self.framework = "MAD4PG" self.t_step = 0 self.episode = 1 self.avg_score = 0 self.C = args.C self._e = args.e self.e_min = args.e_min self.e_decay = args.e_decay self.anneal_max = args.anneal_max self.update_type = args.update_type self.tau = args.tau self.state_size = env.state_size self.action_size = env.action_size # Create all the agents to be trained in the environment self.agent_count = env.agent_count self.agents = [D4PG_Agent(self.state_size, self.action_size, args, self.agent_count) for _ in range(self.agent_count)] self.batch_size = args.batch_size # Set up memory buffers, currently only standard replay is implemented self.memory = ReplayBuffer(args.device, args.buffer_size, args.gamma, args.rollout, self.agent_count) self.memory.init_n_step() for agent in self.agents: self.update_networks(agent, force_hard=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def __init__(self, network=None):\n\n i...
[ "0.6553901", "0.650665", "0.6231836", "0.607061", "0.60383505", "0.5979345", "0.5966271", "0.5948596", "0.58692914", "0.586917", "0.5827446", "0.58020544", "0.5797991", "0.5792459", "0.5786038", "0.5722977", "0.5710139", "0.5696189", "0.5653719", "0.5616278", "0.56124663", ...
0.0
-1
For each agent in the MAD4PG network, choose an action from the ACTOR
def act(self, obs, training=True): assert len(obs) == len(self.agents), "Num OBSERVATIONS does not match \ num AGENTS." with torch.no_grad(): actions = np.array([agent.act(o) for agent, o in zip(self.agents, obs)]) if training: actions += self._gauss_noise(actions.shape) return np.clip(actions, -1, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _advance_by_action(game, agents, action):\n getLogger(__name__).debug(\"Agent {} action {}\".format(game.current_agent_id, action))\n agent_id_for_action = game.current_agent_id\n\n game.take_action(action)\n for agent in agents:\n agent.take_action(action, agent.agent_id...
[ "0.70322484", "0.65847504", "0.65847504", "0.65610033", "0.65087974", "0.64800406", "0.64696527", "0.64690334", "0.6465", "0.64002377", "0.6380364", "0.6380364", "0.63314396", "0.6323784", "0.6322388", "0.6242467", "0.6234373", "0.6211339", "0.6201912", "0.6196667", "0.617057...
0.0
-1
Store an experience tuple in the ReplayBuffer
def store(self, experience): self.memory.store(experience)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_experience(self, s, a, r, t, s2):\n self.replay_buffer.add(s, a, r, t, s2)", "def add(self, experience):\n self.buffer.append(experience)", "def add_experience(self, action, state, reward, terminal):\n self.replay_buffer.add_experience(action, state, reward, terminal)", "def ap...
[ "0.7511635", "0.6776789", "0.67517585", "0.6497582", "0.6471509", "0.6421251", "0.6248629", "0.6102861", "0.5914454", "0.58810997", "0.58605915", "0.5817004", "0.5794464", "0.57156426", "0.5708091", "0.5649744", "0.5612071", "0.5603931", "0.55979264", "0.55325574", "0.5514577...
0.62331957
7
Perform a learning step on all agents in the network.
def learn(self): self.t_step += 1 # Sample from replay buffer, which already has nstep rollout calculated. batch = self.memory.sample(self.batch_size) obs, next_obs, actions, rewards, dones = batch # Gather and concatenate actions because critic networks need ALL # actions as input, the stored actions were concatenated before storing # in the buffer target_actions = [agent.actor_target(next_obs[i]) for i, agent in enumerate(self.agents)] predicted_actions = [agent.actor(obs[i]) for i, agent in enumerate(self.agents)] target_actions = torch.cat(target_actions, dim=-1) predicted_actions = torch.cat(predicted_actions, dim=-1) # Change state data from [agent_count, batch_size] # to [batchsize, state_size * agent_count] # because critic networks need to ALL observations as input obs = obs.transpose(1,0).contiguous().view(self.batch_size, -1) next_obs = next_obs.transpose(1,0).contiguous().view(self.batch_size,-1) # Perform a learning step for each agent using concatenated data as well # as unique-perspective data where algorithmically called for for i, agent in enumerate(self.agents): agent.learn(obs, next_obs, actions, target_actions, predicted_actions, rewards[i], dones[i]) self.update_networks(agent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self):\n for a in self.agents:\n a.learn()", "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def learn(self, experiences, gamma):\n \n next_actions = []\n actions = []\n ...
[ "0.81697196", "0.7225745", "0.70513433", "0.67381495", "0.6667428", "0.6628232", "0.65659773", "0.6510419", "0.64319664", "0.6419689", "0.6391004", "0.63875294", "0.6357127", "0.63503945", "0.6346165", "0.6324643", "0.62902844", "0.62848055", "0.6270551", "0.6256862", "0.6247...
0.68872505
3
Fills up the ReplayBuffer memory with PRETRAIN_LENGTH number of experiences before training begins.
def initialize_memory(self, pretrain_length, env): if self.memlen >= pretrain_length: print("Memory already filled, length: {}".format(len(self.memory))) return interval = max(10, int(pretrain_length/25)) print("Initializing memory buffer.") obs = env.states while self.memlen < pretrain_length: actions = np.random.uniform(-1, 1, (self.agent_count, self.action_size)) next_obs, rewards, dones = env.step(actions) self.store((obs, next_obs, actions, rewards, dones)) obs = next_obs if np.any(dones): env.reset() obs = env.states self.memory.init_n_step() if self.memlen % interval == 1 or self.memlen >= pretrain_length: print("...memory filled: {}/{}".format(self.memlen, pretrain_length)) print("Done!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def populate_buffer(self, num_transitions):\n while len(self.replay_buffer) < self.buffer_sample_size:\n self.play(num_transitions)", "def collect_samples(self):\n self.replay_buffer = self.collect_initial_batch(\n self.replay_buffer, self.acm_pre_train_samples\n )", ...
[ "0.7086755", "0.634814", "0.62348205", "0.61094946", "0.6062285", "0.601058", "0.59450614", "0.59385663", "0.59318626", "0.5931236", "0.5920841", "0.591392", "0.59087026", "0.5903313", "0.58007306", "0.57563967", "0.5704546", "0.56222826", "0.5611808", "0.56116015", "0.559233...
0.6525008
1
This property ensures that the annealing process is run every time that E is called. Anneals the epsilon rate down to a specified minimum to ensure there is always some noisiness to the policy actions. Returns as a property. Uses a modified TANH curve to roll off the values near min/max.
def e(self): ylow = self.e_min yhigh = self._e xlow = 0 xhigh = self.anneal_max steep_mult = 8 steepness = steep_mult / (xhigh - xlow) offset = (xhigh + xlow) / 2 midpoint = yhigh - ylow x = np.clip(self.avg_score, 0, xhigh) x = steepness * (x - offset) e = ylow + midpoint / (1 + np.exp(x)) return e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n r...
[ "0.660565", "0.6559329", "0.6557568", "0.64600974", "0.64180017", "0.6403347", "0.6382049", "0.6335735", "0.6307492", "0.62434304", "0.62413085", "0.62142545", "0.6212238", "0.61981845", "0.61455894", "0.61093074", "0.60480744", "0.6001524", "0.5995188", "0.59935886", "0.5979...
0.6084767
16
Handle any cleanup or steps to begin a new episode of training.
def new_episode(self, scores): # Keep track of an average score for use with annealing epsilon, # TODO: this currently lives in new_episode() because we only want to # update epsilon each episode, not each timestep, currently. This should # be further investigate about moving this into the epsilon property # itself instead of here avg_across = np.clip(len(scores), 1, 50) self.avg_score = np.array(scores[-avg_across:]).mean() self.memory.init_n_step() self.episode += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _training_after_hook(self):\n pass", "def _end_episode(self):\n self.turn_cnt = 0\n self.episode_cnt += 1\n self.contexts = None\n self.seed_utterances = None\n self.reset_agents()", "def finalize(self) :\n if not self.nTrain:\n self.nTrain = max(...
[ "0.65182483", "0.6447759", "0.62984306", "0.62847793", "0.62345207", "0.62345207", "0.623374", "0.62325156", "0.62036747", "0.6123561", "0.60841036", "0.6069675", "0.60509044", "0.6032578", "0.6024081", "0.6007592", "0.5988098", "0.59657085", "0.5957134", "0.5953631", "0.5944...
0.0
-1
Updates the network using either DDPGstyle soft updates (w/ param TAU), or using a DQN/D4PG style hard update every C timesteps.
def update_networks(self, agent, force_hard=False): if self.update_type == "soft" and not force_hard: self._soft_update(agent.actor, agent.actor_target) self._soft_update(agent.critic, agent.critic_target) elif self.t_step % self.C == 0 or force_hard: self._hard_update(agent.actor, agent.actor_target) self._hard_update(agent.critic, agent.critic_target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n ...
[ "0.64842385", "0.64803463", "0.61421794", "0.60652184", "0.59276474", "0.5910837", "0.59055424", "0.5872152", "0.5856507", "0.58434814", "0.5840877", "0.5840877", "0.5834914", "0.58075476", "0.5797457", "0.57930094", "0.57774806", "0.5751198", "0.5739794", "0.5714073", "0.566...
0.6279729
2
Slowly updated the network using everystep partial network copies modulated by parameter TAU.
def _soft_update(self, active, target): for t_param, param in zip(target.parameters(), active.parameters()): t_param.data.copy_(self.tau*param.data + (1-self.tau)*t_param.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_target_network(self, tau):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(t * (1-tau) + e * tau)", "def update_target_network(self, tau):\n for p_target, p_local in zip(self.q_network_targ...
[ "0.7093877", "0.7012822", "0.6283256", "0.6237948", "0.6235717", "0.6108989", "0.6108989", "0.5920825", "0.588449", "0.5847011", "0.58270663", "0.5826606", "0.5730364", "0.5714644", "0.5706353", "0.5686066", "0.5660529", "0.5652498", "0.56278974", "0.56147575", "0.5596634", ...
0.5398285
41
Fully copy parameters from active network to target network. To be used in conjunction with a parameter "C" that modulated how many timesteps between these hard updates.
def _hard_update(self, active, target): target.load_state_dict(active.state_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _soft_update_target_network(self):\n\n # Update the target network\n for target_param, param in zip(self.actor_target_network.parameters(), self.actor_network.parameters()):\n target_param.data.copy_((1-self.args.tau) * target_param.data + self.args.tau * param.data)\n\n # Updat...
[ "0.66702074", "0.6463969", "0.6463969", "0.63160634", "0.6189461", "0.61107165", "0.6051058", "0.599364", "0.5967248", "0.5884471", "0.5775594", "0.57238007", "0.5721437", "0.56973237", "0.56926984", "0.5677991", "0.5631507", "0.5583762", "0.55708534", "0.5542699", "0.5535037...
0.0
-1
Returns the epsilon scaled noise distribution for adding to Actor calculated action policy.
def _gauss_noise(self, shape): n = np.random.normal(0, 1, shape) return self.e*n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def get_epsilongreedy_policy(epsilon):\n \n def epsilongreedy_policy(Qvalues_oa):\n \"\"\"Returns softmax action probabilites from Qvalues\"\"\"\n \n X =...
[ "0.65060216", "0.64552784", "0.64374816", "0.64273673", "0.633723", "0.63370866", "0.63252556", "0.6308487", "0.62902623", "0.62704015", "0.6251092", "0.6214871", "0.6212052", "0.61300147", "0.6115993", "0.60839003", "0.6075641", "0.60724646", "0.6063236", "0.6057245", "0.602...
0.551613
78
Returns length of memory buffer as a property.
def memlen(self): return len(self.memory)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __len__(self) -> int:\n return len(self.buffer)", "def __len__(self):\n return len(self.buffer)", "def __len__(self):\n return len(self.buffer)", "def __len__(self):\n\t\treturn len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n ...
[ "0.7560014", "0.7533779", "0.7533779", "0.74142253", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", "0.7374192", ...
0.0
-1
Initialize a D4PG Agent.
def __init__(self, state_size, action_size, args, agent_count = 1, l2_decay = 0.0001): self.framework = "D4PG" self.device = args.device self.eval = args.eval self.actor_learn_rate = args.actor_learn_rate self.critic_learn_rate = args.critic_learn_rate self.gamma = args.gamma self.rollout = args.rollout self.num_atoms = args.num_atoms self.vmin = args.vmin self.vmax = args.vmax self.atoms = torch.linspace(self.vmin, self.vmax, self.num_atoms).to(self.device) self.atoms = self.atoms.unsqueeze(0) # Initialize ACTOR networks # self.actor = ActorNet(args.layer_sizes, state_size, action_size).to(self.device) self.actor_target = ActorNet(args.layer_sizes, state_size, action_size).to(self.device) self.actor_optim = optim.Adam(self.actor.parameters(), lr=self.actor_learn_rate, weight_decay=l2_decay) # Initialize CRITIC networks # c_input_size = state_size * agent_count c_action_size = action_size * agent_count self.critic = CriticNet(args.layer_sizes, c_input_size, c_action_size, self.num_atoms).to(self.device) self.critic_target = CriticNet(args.layer_sizes, c_input_size, c_action_size, self.num_atoms).to(self.device) self.critic_optim = optim.Adam(self.critic.parameters(), lr=self.critic_learn_rate, weight_decay=l2_decay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def agent_init(self):\n pass", "def __init__(self, agent: AEA) -> None:\n self._agent = agent\n super().__init__()", "def __init__(self, agent_id=\"default\", experiment_id=\"default\"):\n self.runtime = runtime()\n self.agent_id = agent_id\n self.experiment_id = exper...
[ "0.7342154", "0.6851554", "0.68496484", "0.673467", "0.67185724", "0.6604147", "0.6552631", "0.6546194", "0.6505822", "0.6481763", "0.6343824", "0.63030684", "0.6272267", "0.6253052", "0.62168545", "0.6205499", "0.6164115", "0.6161617", "0.61410064", "0.61237025", "0.60737205...
0.57521296
37
Returns the projected value distribution for the input state/action pair While there are several very similar implementations of this Categorical Projection methodology around github, this function owes the most
def _categorical(self, rewards, probs, dones): # Create local vars to keep code more concise vmin = self.vmin vmax = self.vmax atoms = self.atoms num_atoms = self.num_atoms gamma = self.gamma rollout = self.rollout # rewards/dones shape from [batchsize,] to [batchsize,1] rewards = rewards.unsqueeze(-1) dones = dones.unsqueeze(-1).type(torch.float) delta_z = (vmax - vmin) / (num_atoms - 1) projected_atoms = rewards + gamma**rollout * atoms * (1 - dones) projected_atoms.clamp_(vmin, vmax) b = (projected_atoms - vmin) / delta_z # It seems that on professional level GPUs (for instance on AWS), the # floating point math is accurate to the degree that a tensor printing # as 99.00000 might in fact be 99.000000001 in the backend, perhaps due # to binary imprecision, but resulting in 99.00000...ceil() evaluating # to 100 instead of 99. Forcibly reducing the precision to the minimum # seems to be the only solution to this problem, and presents no issues # to the accuracy of calculating lower/upper_bound correctly. precision = 1 b = torch.round(b * 10**precision) / 10**precision lower_bound = b.floor() upper_bound = b.ceil() m_lower = (upper_bound + (lower_bound == upper_bound).float() - b) * probs m_upper = (b - lower_bound) * probs projected_probs = torch.tensor(np.zeros(probs.size())).to(self.device) for idx in range(probs.size(0)): projected_probs[idx].index_add_(0, lower_bound[idx].long(), m_lower[idx].double()) projected_probs[idx].index_add_(0, upper_bound[idx].long(), m_upper[idx].double()) return projected_probs.float()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def action_distribution(self, state):\n means, stds = self.__call__(state)\n dist = Normal(means, torch.exp(stds))\n\n return dist", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n transition_state_probs = self.mdp.getTransitionStatesAndProbs(s...
[ "0.6413469", "0.5924323", "0.58160555", "0.5754944", "0.57004154", "0.56944776", "0.5661935", "0.56419706", "0.5618396", "0.56179804", "0.5609571", "0.55921984", "0.558994", "0.55720055", "0.55444455", "0.5505725", "0.55023724", "0.5493882", "0.5492896", "0.5434973", "0.54172...
0.0
-1
Representation of a circuit as an "Structured script". Structure means that the circuit is provided as a list of layers with 1 qubit operations and 2 qubit operations. That is a circuit of the shape 1Q_layer, [2Q_layer,1Q_layer] x num_layers, RO To be of this shape, the number of 1Q layers need to be one more than the number of 2Q layers.
def __init__(self, lines_1Q, lines_2Q, n_qubits): self.n_qubits = n_qubits # 1Qubit- and 2Qubit-operations layers do not match in size if not((len(lines_2Q)+1) == len(lines_1Q)): raise ValueError( '1Qubit- and 2Qubit-operations layers do not match in size') self.depth = 1+2*len(lines_2Q) self.lines_1Q = lines_1Q self.lines_2Q = lines_2Q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_circuit_instructions(n_qubits, depth, type_circuit):\n\n if type_circuit in [0, 1, 2]:\n\n # if type_circuit == 1:\n # if depth > 8:\n # raise ValueError(\n # \"For type-1 circuits, only at most depth=8 allowed!\"\n # )\n\n # de...
[ "0.63333786", "0.6290255", "0.5760848", "0.57012063", "0.56889766", "0.55815995", "0.5549112", "0.54142976", "0.537557", "0.53703374", "0.53373516", "0.53207034", "0.53111106", "0.5274469", "0.5271391", "0.5248255", "0.52141166", "0.52014184", "0.5191351", "0.51885855", "0.51...
0.5596882
5
Representation of a circuit as "Rotation list". On top of the structure (see Structured script representation), 1Q layers are compiled to the corresponding rotations. The 1Q layers are now represented with a rotation vector. rotations_1Q = [layer, qubit, rotation_vector] rotation_vector = [axis_of_rotation (3 numbers), angle_of_rotation]
def __init__(self, rotations_1Q, lines_2Q, n_qubits): self.rotations_1Q = rotations_1Q self.lines_2Q = lines_2Q self.n_qubits = n_qubits dim_depth, dim_qubits, dim_rot = self.rotations_1Q.shape self.depth = 1+2*len(lines_2Q) # 1Q rotations vector does not match the depth if not((2*dim_depth-1) == self.depth): raise ValueError('1Q rotations vector does not match the depth') # 1Q rotations vector does not match the qubit number if not(dim_qubits == n_qubits): raise ValueError( '1Q rotations vector does not match the qubit number') # 1Q rotations vector does not match the parameter number if not(dim_rot == 4): raise ValueError( '1Q rotations vector does not match the parameter number')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def two_qubit_rotation(self, bits, symbols):\n circuit = cirq.Circuit()\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[0], symbols[0:3]))\n circuit += cirq.Circuit(self.one_qubit_rotation(bits[1], symbols[3:6]))\n circuit += [cirq.ZZ(*bits)**symbols[6]]\n circuit += [cirq...
[ "0.63733256", "0.6243695", "0.61284345", "0.59969985", "0.59389025", "0.5919403", "0.5793986", "0.5788567", "0.57774144", "0.5774565", "0.57473123", "0.57306886", "0.5724324", "0.57041705", "0.5641081", "0.56394804", "0.5630439", "0.5608946", "0.5591003", "0.55356807", "0.550...
0.62612224
1
Representation of a circuit as "Euler angles list". On top of the rotation list (see Rotations list representation), these rotations are converted to Euler angles. The 1Q layers are now represented with an euler vector. rotations_1Q = [layer, qubit, euler_vector] euler_vector = [rot_Z(first), rot_X, rot_Z2(third)]
def __init__(self, euler_1Q, lines_2Q, n_qubits): self.euler_1Q = euler_1Q self.lines_2Q = lines_2Q self.n_qubits = n_qubits dim_depth, dim_qubits, dim_euler = self.euler_1Q.shape self.depth = 1+2*len(lines_2Q) # euler angles vector does not match the depth if not((2*dim_depth-1) == self.depth): raise ValueError('euler angles vector does not match the depth') # euler angles vector does not match the qubit number if not(dim_qubits == n_qubits): raise ValueError( 'euler angles vector does not match the qubit number') # euler angles vector does not match the parameter number if not(dim_euler == 3): raise ValueError( 'euler angles vector does not match the parameter number')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_qubit_rotation(self, qubit, symbols):\n # print(symbols, \"hi\")\n return [cirq.rx(symbols[0])(qubit),\n cirq.ry(symbols[1])(qubit),\n cirq.rz(symbols[2])(qubit)]", "def euler2quat(angles, rot_seq='zyx'):\n cangle = np.cos(0.5*angles)\n sangle = np.sin(0....
[ "0.63399196", "0.62531626", "0.60332036", "0.6030076", "0.59916806", "0.5967598", "0.59581524", "0.5917287", "0.59120715", "0.5886985", "0.58779275", "0.58498204", "0.584666", "0.58190167", "0.58053577", "0.58050466", "0.5801976", "0.5783403", "0.57816374", "0.5771403", "0.57...
0.59328794
7
Representation of a circuit as "XY list". On top of the Euler list (see Euler list representation), The euler angles are compiled into a single MW gate (rotation around axis in the azimutal plane) through virtual phase updates. The 1Q layers are now represented with an XY vector. rotations_1Q = [layer, qubit, XY_vector] XY_vector = [axis(azimutal angle), rotation_angle]
def __init__(self, XY_rotations, lines_2Q, n_qubits): self.XY_rotations = XY_rotations self.lines_2Q = lines_2Q self.n_qubits = n_qubits dim_depth, dim_qubits, dim_XY = self.XY_rotations.shape self.depth = 1+2*len(lines_2Q) # XY rotations vector does not match the depth if not((2*dim_depth-1) == self.depth): raise ValueError('XY rotations vector does not match the depth') # XY rotations vector does not match the qubit number if not(dim_qubits == n_qubits): raise ValueError( 'XY rotations vector does not match the qubit number') # XY rotations vector does not match the parameter number if not(dim_XY == 2): raise ValueError( 'XY rotations vector does not match the parameter number')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if oct...
[ "0.5938358", "0.5937187", "0.59367967", "0.5919371", "0.57940376", "0.56724787", "0.5659674", "0.5651457", "0.5649507", "0.55863035", "0.55385494", "0.5521582", "0.5468761", "0.5432579", "0.5426138", "0.5414661", "0.5390122", "0.5376509", "0.5373534", "0.5363947", "0.53566766...
0.60099
0
basic RNN returning next hidden state at a specific timestep.
def rnn_cell(hprev, zt, name=None, reuse=False): nin = zt.shape[-1].value nout = hprev.shape[-1].value with tf.variable_scope(name, default_name="rnn", values=[hprev, zt], reuse=reuse): wz = get_variable_wrap("kernel/input", [nin, nout], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) wh = get_variable_wrap("kernel/hidden", [nout, nout],dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) bh = get_variable_wrap("bias", [nout], dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.01)) return tf.tanh(ed.dot(hprev, wh) + ed.dot(zt, wz) + bh)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_rnn(self, input_tensor):\n\n w_trainable = False\n x_shift_trainable = False\n eta_trainable = True\n\n input_shape = input_tensor.get_shape().as_list()\n input_area = np.prod(input_shape[1:])\n batch_input_shape = (-1, input_area)\n\n filters = self._hparams.filters + self._hparams...
[ "0.61140877", "0.59839666", "0.59752065", "0.5951735", "0.5937966", "0.59245795", "0.5917941", "0.5917941", "0.5909956", "0.5903214", "0.5903214", "0.58980507", "0.589458", "0.58825445", "0.58472407", "0.58389205", "0.5838521", "0.5834483", "0.5830626", "0.58288646", "0.58194...
0.0
-1
LSTM returning hidden state and content cell at a specific timestep.
def lstm_cell(x, h, c, name=None, reuse=False): nin = x.shape[-1].value nout = h.shape[-1].value with tf.variable_scope(name, default_name="lstm", values=[x, h, c], reuse=reuse): wx = get_variable_wrap("kernel/input", [nin, nout * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) wh = get_variable_wrap("kernel/hidden", [nout, nout * 4],dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) b = get_variable_wrap("bias", [nout * 4], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) z = ed.dot(x, wx) + ed.dot(h, wh) + b i, f, o, u = tf.split(z, 4, axis=0) i = tf.sigmoid(i) f = tf.sigmoid(f + 1.0) o = tf.sigmoid(o) u = tf.tanh(u) c = f * c + i * u h = o * tf.tanh(c) return h, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = hard_sigmoid(ingate)\n forgetgate = hard_sigmoid(forgetgate)\n cellgate = F....
[ "0.6892743", "0.6710073", "0.6645775", "0.6560571", "0.65253127", "0.6467472", "0.6402133", "0.63405454", "0.6327336", "0.6319975", "0.62931645", "0.62660724", "0.6201569", "0.61974037", "0.61889523", "0.6188214", "0.6185717", "0.61776847", "0.61347216", "0.61347216", "0.6134...
0.5587781
87
LSTM returning hidden state and content cell at a specific timestep.
def lstm_cell_1(x, h, c, name=None, reuse=False): nin = x.shape[-1].value nout = h.shape[-1].value with tf.variable_scope(name, default_name="lstm_1", values=[x, h, c], reuse=reuse): wx = get_variable_wrap("kernel/input", [nin, nout * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) wh = get_variable_wrap("kernel/hidden", [nout, nout * 4],dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) b = get_variable_wrap("bias", [nout * 4], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) z = ed.dot(x, wx) + ed.dot(h, wh) + b i, f, o, u = tf.split(z, 4, axis=0) i = tf.sigmoid(i) f = tf.sigmoid(f + 1.0) o = tf.sigmoid(o) u = tf.tanh(u) c = f * c + i * u h = o * tf.tanh(c) return h, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = hard_sigmoid(ingate)\n forgetgate = hard_sigmoid(forgetgate)\n cellgate = F....
[ "0.68931335", "0.67093015", "0.6646216", "0.65613323", "0.6523706", "0.6465427", "0.6402848", "0.6338531", "0.6324766", "0.63201183", "0.6290784", "0.6263753", "0.6202274", "0.6196958", "0.6188465", "0.6187984", "0.61834884", "0.61785024", "0.6135082", "0.61321425", "0.613214...
0.0
-1
LSTM returning hidden state and content cell at a specific timestep.
def lstm_cell_2(x, h, c, name=None, reuse=False): nin = x.shape[-1].value nout = h.shape[-1].value with tf.variable_scope(name, default_name="lstm_2", values=[x, h, c], reuse=reuse): wx = get_variable_wrap("kernel/input", [nin, nout * 4], dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) wh = get_variable_wrap("kernel/hidden", [nout, nout * 4],dtype=tf.float32, initializer=tf.orthogonal_initializer(1.0)) b = get_variable_wrap("bias", [nout * 4], dtype=tf.float32, initializer=tf.constant_initializer(0.0)) z = ed.dot(x, wx) + ed.dot(h, wh) + b i, f, o, u = tf.split(z, 4, axis=0) i = tf.sigmoid(i) f = tf.sigmoid(f + 1.0) o = tf.sigmoid(o) u = tf.tanh(u) c = f * c + i * u h = o * tf.tanh(c) return h, c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):\n hx, cx = hidden\n gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)\n\n ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)\n\n ingate = hard_sigmoid(ingate)\n forgetgate = hard_sigmoid(forgetgate)\n cellgate = F....
[ "0.6892743", "0.6710073", "0.6645775", "0.6560571", "0.65253127", "0.6467472", "0.6402133", "0.63405454", "0.6327336", "0.6319975", "0.62931645", "0.62660724", "0.6201569", "0.61974037", "0.61889523", "0.6188214", "0.6185717", "0.61776847", "0.61347216", "0.61347216", "0.6134...
0.0
-1
neural network model for mapping
def neural_network(z, dim_out): hidden_dim = 15 net1 = slim.fully_connected(z, hidden_dim, activation_fn=None) net2 = slim.fully_connected(net1, dim_out, activation_fn=tf.tanh) return net2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trainNet():", "def predict_from(self, inputs, to_layers):", "def neural_network(xtrain, ytrain, xtest, ytest,labels_mapping, scaled = False):\n if not scaled :\n scaler = StandardScaler()\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n nn = MLPClassi...
[ "0.7084198", "0.6653931", "0.6629831", "0.6618596", "0.65436137", "0.6513071", "0.64708334", "0.646356", "0.6437267", "0.6412351", "0.6400347", "0.63606924", "0.6356501", "0.6342798", "0.6280546", "0.6239402", "0.6237478", "0.6236198", "0.6233234", "0.62314695", "0.6226379", ...
0.0
-1
Find a rotation matrix R such that F_inf.dot(R) ~= F_true
def compute_optimal_rotation(L, L_true, scale=True): from scipy.linalg import orthogonal_procrustes R = orthogonal_procrustes(L, L_true)[0] if scale: Lp = L.dot(R) s = (L_true*Lp).sum() / (Lp*Lp).sum() return R*s else: return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isRotationMatrix(self, R):\n Rt = np.transpose(R)\n shouldBeIdentity = np.dot(Rt, R)\n I = np.identity(3, dtype=R.dtype)\n n = np.linalg.norm(I - shouldBeIdentity)\n return n < 1e-6", "def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np....
[ "0.60769", "0.59607196", "0.59561664", "0.58100045", "0.5771714", "0.5771714", "0.5648924", "0.562697", "0.5613362", "0.5532923", "0.5497425", "0.5491512", "0.5427582", "0.5386786", "0.53646296", "0.53314674", "0.5323371", "0.5293043", "0.5282815", "0.5280129", "0.5269544", ...
0.4913564
69
Testing the working of xia workflow celery task queue
def test_execute_xia_automated_workflow(self, mock_run): self.assert_(execute_xia_automated_workflow.run()) self.assert_(execute_xia_automated_workflow.run()) self.assertEqual(mock_run.call_count, 2) self.assert_(execute_xia_automated_workflow.run()) self.assertEqual(mock_run.call_count, 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_celery_tasks(self):\n from celery_runner import celery\n task = celery.send_task(\n 'tasks.log', args=['Hello from the other side!'], kwargs={}\n )\n self.assertTrue(task.id)\n task_state = celery.AsyncResult(task.id).state\n while task_state == PENDING...
[ "0.7558448", "0.7010462", "0.685679", "0.6836651", "0.6805605", "0.6643382", "0.6535218", "0.640248", "0.640248", "0.63725996", "0.63460475", "0.6286987", "0.62630576", "0.6254249", "0.62299985", "0.6221732", "0.6209035", "0.62022513", "0.6200722", "0.617855", "0.61699396", ...
0.581855
81
Forward method for your Runner. Should not be called directly outside of runner. If your model has specific interface, override this method to use it
def forward(self, batch: Mapping[str, Any], **kwargs) -> Mapping[str, Any]: output = self._process_input(batch, **kwargs) output = self._process_output(output) return output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")", "def __call__(self):\n raise NotImplementedError", "def RunModel(self):\n raise UnimplementedMethodException()", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplemente...
[ "0.72703433", "0.7202122", "0.7161987", "0.7095755", "0.7095755", "0.70892507", "0.7041553", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.7017677", "0.6987295", "0.6987295", "0.69869214", "0.69604117", "0.6940787",...
0.0
-1
Inner method to handle specified data batch. Used to make a train/valid/infer stage during Experiment run.
def handle_batch(self, batch: Mapping[str, Any]) -> None: self.batch = {**batch, **self.forward(batch)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def train(self, batch):\n pass", "def _pipeline_general(self,\n data_batch: Dict[str, Any],\n levels: Sequence[int],\n training: bool = False,\n ...
[ "0.71952105", "0.7118524", "0.66174924", "0.6597654", "0.6585971", "0.65415645", "0.65149236", "0.6504006", "0.64783216", "0.6470306", "0.6465952", "0.64400965", "0.6413714", "0.6394377", "0.6394115", "0.6370589", "0.63683504", "0.63560295", "0.63350075", "0.6323913", "0.6297...
0.6063903
55
Checks that X is transposed to [Lat, Lon, Sample, Feature] order
def check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert list(X.dims).index(x_lat_dim) == 0, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE' assert list(X.dims).index(x_lon_dim) == 1, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE' assert list(X.dims).index(x_sample_dim) == 2, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE' assert list(X.dims).index(x_feature_dim) == 3, 'XCast requires a dataset to be transposed to LAT x LON x SAMPLE x FEATURE'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_input_transposed_vector(multiple_linear_regression_data):\n X, y = multiple_linear_regression_data\n x = X.copy().T\n y = pd.DataFrame(y)\n\n # There is a difference with a transposed array\n with pytest.raises(\n AssertionError, match=r\"N >= K: You need at least as many rows .*\"\n...
[ "0.60901403", "0.6069965", "0.57867116", "0.57052827", "0.56909966", "0.5635825", "0.5586665", "0.54974854", "0.54878813", "0.5452838", "0.5424291", "0.54226655", "0.54185116", "0.5416405", "0.5388056", "0.5387666", "0.53499806", "0.5334254", "0.5315664", "0.5304115", "0.5288...
0.7853445
0
Checks that X is 4D, with Dimension Names as specified by x_lat_dim, x_lon_dim, x_sample_dim, and x_feature_dim
def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional' assert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X' assert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X' assert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X' assert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X'\n\tassert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X'\n\tassert x_sample_dim in X.coords.keys(), 'X...
[ "0.6943855", "0.6864585", "0.6622508", "0.6591821", "0.647431", "0.63964295", "0.6374549", "0.6134668", "0.6122223", "0.5947658", "0.583848", "0.57911134", "0.57625794", "0.565753", "0.56400055", "0.5597365", "0.5592346", "0.5584553", "0.55217546", "0.5503743", "0.54942316", ...
0.7832379
0
Checks that X has coordinates named as specified by x_lat_dim, x_lon_dim, x_sample_dim, and x_feature_dim
def check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert x_lat_dim in X.coords.keys(), 'XCast requires a dataset_lat_dim to be a coordinate on X' assert x_lon_dim in X.coords.keys(), 'XCast requires a dataset_lon_dim to be a coordinate on X' assert x_sample_dim in X.coords.keys(), 'XCast requires a dataset_sample_dim to be a coordinate on X' assert x_feature_dim in X.coords.keys(), 'XCast requires a dataset_feature_dim to be a coordinate on X'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, ...
[ "0.77260077", "0.7620336", "0.7221872", "0.6576378", "0.6310555", "0.6308388", "0.6186717", "0.608041", "0.58673334", "0.5852697", "0.5782202", "0.5687635", "0.5607326", "0.5584896", "0.55696183", "0.5523855", "0.5504535", "0.5483882", "0.54531854", "0.54332227", "0.5432066",...
0.8039974
0
Checks that X's Coordinates are the same length as X's Dimensions
def check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert X.shape[list(X.dims).index(x_lat_dim)] == len(X.coords[x_lat_dim].values), "XCast requires a dataset's x_lat_dim coordinate to be the same length as its x_lat_dim dimension" assert X.shape[list(X.dims).index(x_lon_dim)] == len(X.coords[x_lon_dim].values), "XCast requires a dataset's x_lon_dim coordinate to be the same length as its x_lon_dim dimension" assert X.shape[list(X.dims).index(x_sample_dim)] == len(X.coords[x_sample_dim].values), "XCast requires a dataset's x_sample_dim coordinate to be the same length as its x_sample_dim dimension" assert X.shape[list(X.dims).index(x_feature_dim)] == len(X.coords[x_feature_dim].values), "XCast requires a dataset's x_feature_dim coordinate to be the same length as its x_feature_dim dimension"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n ...
[ "0.7469548", "0.7054452", "0.7046964", "0.6944492", "0.66742504", "0.6647212", "0.6620178", "0.6618185", "0.6572067", "0.6569208", "0.65156883", "0.6446175", "0.6434718", "0.64269143", "0.64210254", "0.6420011", "0.64147735", "0.6348928", "0.6343616", "0.6322013", "0.63205206...
0.7488213
0
Checks that X is an Xarray.DataArray
def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): assert type(X) == xr.DataArray, 'XCast requires a dataset to be of type "Xarray.DataArray"'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __check_is_xarray(self, data):\n if type(data) is xr.core.dataarray.DataArray or \\\n type(data) is xr.core.dataarray.Dataset:\n\n return True\n else:\n msg = \"Variable {data} is not an xarray DataArray/Dataset\"\n raise Exception(msg)", "def is_dataa...
[ "0.86942047", "0.71866673", "0.7029208", "0.69669193", "0.6887847", "0.67700577", "0.66643125", "0.6535942", "0.63918483", "0.63843584", "0.636618", "0.63450086", "0.63026375", "0.6252001", "0.6233888", "0.6226298", "0.6150564", "0.61219764", "0.6118666", "0.6112856", "0.6090...
0.8198799
1
Checks that X satisfies all conditions for XCAST
def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim): check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim) #check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert type(X) == xr.DataArray, 'XCast requires a dataset to be of type \"Xarray.DataArray\"'", "def _check_support(X: np.ndarray, **kwargs) -> None:\n\n assert (X > 0).all() & isinteger(X), \"x should be greater then 0 and integer.\...
[ "0.6669929", "0.65580815", "0.65137964", "0.6363444", "0.6190184", "0.5916384", "0.5903115", "0.58242035", "0.5805368", "0.5690557", "0.5617207", "0.5617207", "0.5495422", "0.54181933", "0.541543", "0.53723425", "0.52777237", "0.52643865", "0.52449125", "0.52307457", "0.51835...
0.59676224
5
Test for t(tau,x) = tau(1+asin(bx))
def flattenTest(): x = rampfloat(0,0,1,n1,n2) t = rampfloat(0,1,0,n1,n2) smax = 5.0 a = smax/(n1-1) b = 2*PI/(n2-1) bx = mul(b,x) bt = mul(b,t) cosbx = cos(bx) sinbx = sin(bx) acosbx = mul(a,cosbx) asinbx = mul(a,sinbx) p2 = div(mul(bt,acosbx),add(1,asinbx)) el = fillfloat(1,n1,n2) fl = FlattenerCg(8.0,0.01) sf = fl.findShifts(p2,el) # found shifts se = neg(mul(t,asinbx)) # exact shifts plot(sf,jet,-smax,smax) plot(se,jet,-smax,smax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def akendalltau(x,y):\r\n n1 = 0\r\n n2 = 0\r\n iss = 0\r\n for j in range(len(x)-1):\r\n for k in range(j,len(y)):\r\n a1 = x[j] - x[k]\r\n a2 = y[j] - y[k]\r\n aa = a1 * a2\r\n if (aa): # neither array has a tie\r\n n1 = n1...
[ "0.6248144", "0.6197467", "0.6141509", "0.6100883", "0.60800886", "0.60800886", "0.6000496", "0.59086967", "0.58352697", "0.58224106", "0.5792972", "0.5762635", "0.5752603", "0.57476383", "0.5732413", "0.5732413", "0.57315695", "0.5711668", "0.57024705", "0.5690728", "0.56630...
0.0
-1
Return a new ObjectChange representing a change made to this object. This will typically be called automatically by ChangeLoggingMiddleware.
def to_objectchange(self, action): return ObjectChange( changed_object=self, object_repr=str(self), action=action, object_data=serialize_object(self), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updated_object(self):\n o = deepcopy(self.object)\n o[\"name\"] += \"-copy\"\n return o", "def change_object(self, new_object):\n raise NotImplementedError", "def __call__(self, change: ChangeDict) -> None:\n old = None\n new = None\n ctype = change[\"type\"...
[ "0.61876494", "0.5870512", "0.56965613", "0.5530178", "0.55052364", "0.54316956", "0.54316956", "0.54316956", "0.53980166", "0.5397867", "0.5397867", "0.5397867", "0.5333922", "0.53334", "0.531325", "0.5294916", "0.5289745", "0.528219", "0.5259374", "0.5217978", "0.52005905",...
0.73542356
0
initialize your data structure here.
def __init__(self): self.stack = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_empty(self):\n self._data = []", "def __init__(self):\n self.data = []\n self.record = {}", "def __init__(self):\n self.structure = {}", "def __init__(self):\n self._data = []", "def __init__(self):\n self._data = []", "def __init__(self):\n self...
[ "0.7761043", "0.76102185", "0.7555967", "0.7549892", "0.7549892", "0.7549892", "0.7549892", "0.7549892", "0.7549892", "0.752797", "0.7446006", "0.7446006", "0.7446006", "0.7446006", "0.7446006", "0.743338", "0.743338", "0.7408609", "0.7385719", "0.737986", "0.737986", "0.73...
0.0
-1
Resets the array to its original configuration and return it.
def reset(self) -> List[int]: return self.nums
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self) -> List[int]:\n self.array = deepcopy(self.original)\n return self.array", "def reset(self) -> np.array:\n raise NotImplementedError", "def reset_original(self):\n self._original = [] # Empty out self._originals", "def reset(self):\n b = [0] * self.n\n ...
[ "0.77292603", "0.74091315", "0.680525", "0.661002", "0.65122974", "0.6499901", "0.64658946", "0.6412889", "0.64038277", "0.63267505", "0.6321126", "0.6252074", "0.6242088", "0.6207423", "0.62034124", "0.620192", "0.6169583", "0.61650425", "0.6147524", "0.6144126", "0.61250526...
0.0
-1
Returns a random shuffling of the array.
def shuffle(self) -> List[int]: index_set = set() index_list = [] while len(index_set) < len(self.nums): index = random.randint(0,len(self.nums) - 1) if index not in index_set: index_set.add(index) index_list.append(index) return [self.nums[i] for i in index_list]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shuffle(self) -> List[int]:\n for i in range(len(self.array)):\n tmp_idx = randrange(i, len(self.array))\n self.array[i], self.array[tmp_idx] = self.array[tmp_idx], self.array[i]\n return self.array", "def shuffle(self):\n for i in xrange(self.n - 1):\n ...
[ "0.74818784", "0.7243617", "0.72220534", "0.72162074", "0.7185313", "0.7155056", "0.71378976", "0.7131264", "0.7099804", "0.70315886", "0.7027391", "0.70198023", "0.7006563", "0.6986513", "0.69661355", "0.69302654", "0.6921108", "0.6921108", "0.6921108", "0.69205916", "0.6900...
0.0
-1
Initialize ProjectForm, adding crispy_forms helper and layout.
def __init__(self, *args, editable=False, request=None, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper(self) user_authenticated = request and request.user.is_authenticated if hasattr(self.instance, 'pk') and self.instance.pk is not None: new = False slug_field = HTML(""" <div id="div_slug" class="row form-group"> <label for="id_url" class="col-md-2 col-form-label form-control-label"> URL </label> <input type="text" name="slug" maxlength="50" id="id_slug" class="textinput textInput form-control" hidden value="{{object.slug}}"> <div class="col-md w-100"> <div class="input-group"> <input type="text" name="slug" maxlength="50" id="id_url" class="textinput textInput form-control" disabled value="http://cobwebarchive.org{{object.get_absolute_url}}"> </div> </div> </div> """) form_buttons_kwargs = { 'confirm_title': 'Save changes', 'confirm_text': 'Click the submit button to save changes to this project or click on cancel to return to Cobweb without saving.', } else: new = True self.fields['slug'].label = "Choose a Cobweb URL" slug_field = PrependedAppendedText('slug', prepended_text='http://cobwebarchive.org/proj/') form_buttons_kwargs = { 'confirm_title': 'Add new project', 'confirm_text': 'Click the submit button to add this project to Cobweb or click on cancel to return to Cobweb without saving.', } self.helper.layout = Layout( slug_field, HField('title', edit=editable), FormSection( Row(Column(HField('description', edit=editable))), HField('collecting_scope', edit=editable), Row( Field('status', edit=editable, wrapper_class='col-md-5'), Field('administrators', edit=editable, wrapper_class='col-md-7', show=user_authenticated), ), ), FormSection( Row( Column(Field('nomination_policy', edit=editable), css_class='col-md-5'), Column( Field('nominators', edit=editable, show=user_authenticated), Field('nominator_blacklist', edit=editable, show=editable), css_class='col-md-7' ), ), ), FormSection(select_field('tags', edit=editable)), form_buttons(**form_buttons_kwargs) if editable else HTML(''), TIMESTAMPS, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setupUi(self)", "def get_add_project_form():\n\n return render_template(\"project_add.html\")", "def get_pr...
[ "0.6280024", "0.6280024", "0.6275415", "0.61467266", "0.5942895", "0.5872091", "0.5814412", "0.5774404", "0.57378775", "0.56497526", "0.56302845", "0.56079954", "0.56048656", "0.56025636", "0.5512409", "0.5445713", "0.5410989", "0.53841966", "0.5374642", "0.5372924", "0.53698...
0.54038054
17
Publisher unique numerical id. Getter only
def id(self): return self._id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_id(self) -> str:\n return pulumi.get(self, \"unique_id\")", "def unique_id(self):\n return self._id", "def unique_id(self):\n return self._id", "def id(self):\n\n return sha256(self.pub.export()).digest()", "def unique_id(self):\n return self._unique_id", "de...
[ "0.74543864", "0.74381155", "0.74381155", "0.74232537", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684", "0.73660684"...
0.0
-1
Publisher unique service advertise identifier. Getter only
def advertise_id(self): return self._advertise_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advertisement_id(self):\n return uuid.uuid4()", "def service_id(self) -> str:\n return pulumi.get(self, \"service_id\")", "def service_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_id\")", "def service_id(self) -> pulumi.Output[str]:\n return pulumi.get(s...
[ "0.6714376", "0.67001706", "0.65665245", "0.65665245", "0.6548375", "0.65236896", "0.6251711", "0.6107556", "0.60676295", "0.60120785", "0.5996947", "0.59193593", "0.59193593", "0.5900588", "0.5853006", "0.5787707", "0.57624674", "0.57600904", "0.5751242", "0.5715866", "0.570...
0.73240006
0
Queue size of the ROS Topic
def queue_size(self): return self._queue_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getqueuesize(self):\n return self._queuesize", "def queue_size(self) -> ConfigNodePropertyInteger:\n return self._queue_size", "def ctrlqueue_queue_size(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(10), ctypes.c_int32(0))", "def queue_size(self) -> int:\n ret...
[ "0.7185033", "0.7173836", "0.71359736", "0.71171594", "0.7078327", "0.6965806", "0.6958423", "0.6958423", "0.69455695", "0.69455695", "0.69455695", "0.69455695", "0.69437385", "0.6941902", "0.68795055", "0.68710214", "0.68147635", "0.6792856", "0.674943", "0.6711253", "0.6707...
0.70792633
4
Publishing ROS message type property. e.g. 'std_msgs/String'
def message_type(self): return self._message_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):\n data = {} if data is None else data\n metadata = {} if metadata is None else metadata\n content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))\n self.kernel.session.send(\n self.k...
[ "0.6201101", "0.6103247", "0.6074486", "0.5716341", "0.56712085", "0.5609717", "0.56032944", "0.55876714", "0.5475563", "0.5447158", "0.54009306", "0.5368012", "0.5363983", "0.5361609", "0.53507215", "0.5346041", "0.5328286", "0.5312357", "0.5275769", "0.5265659", "0.5265659"...
0.52689123
19
Publishing latch status. Getter only property
def latch(self): return self._latch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def monitor(self):", "def _get_status(self):\n return self.__status", "def status(self):\n raise NotImplementedError()", "def power_status(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def set_On(self):\n if not(self._locked):\n self.__dict__['...
[ "0.60354245", "0.5962349", "0.5958736", "0.59539264", "0.5925358", "0.59236515", "0.5900095", "0.57918376", "0.57918376", "0.5746601", "0.5694383", "0.56685543", "0.56662333", "0.5662871", "0.56223184", "0.5591586", "0.5569208", "0.5568028", "0.556712", "0.5565975", "0.556073...
0.73604715
0
Getter only property. Returns publishing topic name.
def topic(self): return self._topic_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def topic(self) -> str:\n return self._topic", "def pubsub_topic(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pubsub_topic\")", "def topic(self):\n return self.config.get('topic', f'{NAMESPACE}/{self.id}')", "def get_topic(self):\n return self.topic", "def pubsub_top...
[ "0.7873912", "0.77815044", "0.7672942", "0.7589836", "0.7427634", "0.73527247", "0.7141051", "0.7079695", "0.70687497", "0.70312816", "0.7003432", "0.6849306", "0.6837315", "0.6716", "0.66638047", "0.6615674", "0.65900433", "0.6494008", "0.6449045", "0.6389532", "0.6370327", ...
0.8160925
0
Publish a ROS message
def publish(self, message): logger.info("Publishing to topic [{0}]: {1}".format(self._topic_name, message)) self._executor.send(json.dumps({ 'op': 'publish', 'id': 'publish:{0}:{1}'.format(self._topic_name, self._id), 'topic': self._topic_name, 'msg': message }))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, message: str) -> None:", "def publish(self, data=None):\n rospy.loginfo(\"Message published on topic %s\", self.topic)", "async def publish(self, body, routing_key=None):\n pass # pragma: no cover", "def _publish(self, topic_name, message):\n msg = {\n 'op':...
[ "0.79041046", "0.7305027", "0.7107372", "0.7052109", "0.7042832", "0.70376146", "0.69866174", "0.69642437", "0.69358927", "0.69347024", "0.6902832", "0.68914086", "0.6865876", "0.68259144", "0.67583984", "0.6745768", "0.674358", "0.67271394", "0.6675353", "0.6673596", "0.6660...
0.7000611
6
Reduce the usage of the publisher. If the usage is 0, unadvertise this topic.
def unregister(self): self._executor.unregister_publisher(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _remove_pub(pub):\n # counting publisher instance per topic name\n TopicBack.pub_instance_count[pub.name] -= 1\n\n # Be aware of https://github.com/ros/ros_comm/issues/111\n return pub.unregister()", "def suppress_topics ( *topics ) :\n if topics and 1 == len( topics ) :\n ...
[ "0.59047306", "0.5432222", "0.520002", "0.51949817", "0.51929474", "0.5141639", "0.5133911", "0.5067576", "0.5030349", "0.49800456", "0.49592015", "0.4953709", "0.49524015", "0.49356797", "0.49280757", "0.4905593", "0.48718688", "0.4860922", "0.48507154", "0.48441455", "0.483...
0.43292674
79
Creates estimator for predicting hashtag based on graph construction
def __init__(self, minimal_random_walk_change_difference_value: float, damping_factor: float, max_iterations: int, verbose: bool = False): self.graph: nx.Graph = None self._hashtags_tf_idf_vectorizer: TfidfVectorizer = None self._hashtags_tf_idf_representation: np.ndarray = None self._hashtag_labels: Union[set, np.ndarray] = None self._users_labels: Union[set, np.ndarray] = None self._tweet_labels: Union[set, np.ndarray] = None self._transition_matrix: np.ndarray = None self._hashtag_encoder: ModifiedOneHotEncoder = ModifiedOneHotEncoder() self.minimal_random_walk_change_difference_value = minimal_random_walk_change_difference_value self.damping_factor = damping_factor self.max_iterations = max_iterations self.verbose = verbose
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_estimator(self):\n # Hyperparameters to create the Estimator\n hparams = tf.contrib.training.HParams(\n job_dir='test_dir',\n save_checkpoints_steps=1,\n keep_checkpoint_max=1,\n num_layers=2,\n dnn_dropout=0.7,\n dnn_optimizer='test_optimizer',\n ...
[ "0.57602215", "0.56859493", "0.5646092", "0.5621122", "0.5566403", "0.55499375", "0.55365247", "0.5525229", "0.547958", "0.54549783", "0.5403307", "0.5403233", "0.5356686", "0.53558606", "0.53558564", "0.53543764", "0.53396976", "0.5337955", "0.5331275", "0.53096443", "0.5309...
0.0
-1
Transforms single row of pandas `original_tweets_with_lemmas.p` to graph. Suffixes in node names are needed due to intersection between hashtags and user names.
def _transform_single_row(self, hashtag_agg: Dict, row: pd.Series): user_name = row["username"] + "_user" tweet_id = str(row["id"]) + "_tweet" tags = row["hashtags"] self._users_labels.add(user_name) self._tweet_labels.add(tweet_id) if not self.graph.has_node(user_name): self.graph.add_node(user_name, type="username") if not self.graph.has_node(tweet_id): self.graph.add_node(tweet_id, type="tweet_id") for hashtag_index in tags: tag = hashtag_index["text"] + "_tag" hashtag_agg[tag] += row["lemmas"] if not self.graph.has_node(tag): self.graph.add_node(tag, type="hashtag") if not self.graph.has_edge(tag, user_name): self.graph.add_edge(tag, user_name) if not self.graph.has_edge(tag, tweet_id): self.graph.add_edge(tag, tweet_id) self._hashtag_labels.add(tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_node_ids(df):\n le = LabelEncoder()\n all_node_names = list(set(df['from_name'].values.tolist() + df['to_name'].values.tolist()))\n le.fit(all_node_names)\n df['from_id'] = le.transform(df['from_name'])\n df['to_id'] = le.transform(df['to_name'])\n return df, le", "def lemmatize(data:...
[ "0.53392446", "0.53363657", "0.53016007", "0.52973044", "0.5226778", "0.5155888", "0.51276714", "0.5085444", "0.4898099", "0.4861826", "0.48439053", "0.481717", "0.48156443", "0.4814153", "0.47771806", "0.4760283", "0.475947", "0.47401556", "0.47281075", "0.46995685", "0.4695...
0.58514464
0
Adds edges between hashtag nodes if they share the same user.
def _refine_matrix_with_additional_connections(self): new_graph = self.graph.copy() for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose): if self.graph.node[node]["type"] == "hashtag": for neighbour in self.graph.neighbors(node): if self.graph.node[neighbour]["type"] == "username": for other_node in self.graph.neighbors(neighbour): if self.graph.node[other_node]["type"] == "hashtag" \ and not self.graph.has_edge(node, other_node) \ and not node == other_node: new_graph.add_edge(node, other_node) self.graph = new_graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToGraph(tid,uid,mentions) :\n global G,found\n\n user = r.get(int(uid))\n \n if user == None :\n return\n\n user = re.findall('\"((?:(?!(?:\",\")).)*)\"', user)\n \n # lower the hashtags\n mentions = [t.lower() for t in mentions if t not in [\"\"]]\n \n usern = user[1].lower()\n\n G.add_node(us...
[ "0.6894521", "0.68589467", "0.5790534", "0.571554", "0.5655163", "0.5619311", "0.56077766", "0.5606925", "0.5577704", "0.5519251", "0.5487911", "0.54744655", "0.54540205", "0.5451284", "0.5424921", "0.53971547", "0.53903186", "0.53803366", "0.5375972", "0.53607416", "0.535052...
0.6445627
2
Builds tri partite graph of Users Hashtags Tweets. Hashtags are connected if has the same user.
def fit(self, x: pd.DataFrame, y=None, **fit_params) -> "Method": self.graph = nx.Graph() minimal_hashtag_occurence = fit_params["minimal_hashtag_occurence"] x = self.drop_tweets_with_hashtags_that_occurred_less_than(x, minimal_hashtag_occurence) hashtag_agg = defaultdict(list) self._hashtag_labels = set() self._users_labels = set() self._tweet_labels = set() if self.verbose: print("Building graph ...") tqdm.tqdm.pandas() x.progress_apply(lambda r: self._transform_single_row(hashtag_agg, r), axis=1) else: x.apply(lambda r: self._transform_single_row(hashtag_agg, r), axis=1) self._refine_matrix_with_additional_connections() self._hashtag_labels = np.asarray(list(sorted(self._hashtag_labels))) self._users_labels = np.asarray(list(sorted(self._users_labels))) self._tweet_labels = np.asarray(list(sorted(self._tweet_labels))) if self.verbose: print("Building incidence matrix ...") incidence_matrix = self._get_binary_incidence_matrix()[ :len(self._hashtag_labels), len(self._hashtag_labels):] weighted_adjacency_matrix_of_tags = incidence_matrix.dot(incidence_matrix.T) weighted_adjacency_matrix_of_tags.setdiag(0) if self.verbose: print("Building hashtag graph ...") hashtag_graph = nx.from_scipy_sparse_matrix(weighted_adjacency_matrix_of_tags) weighted_degree = np.asarray( list(map(itemgetter(1), hashtag_graph.degree(weight="weight")))) matrix_weighted_degree = sps.diags([1 / (weighted_degree + 1e-8)], [0]) self._transition_matrix = weighted_adjacency_matrix_of_tags.dot( matrix_weighted_degree) if self.verbose: print("Calculating tf idf ...") document_list = [' '.join(hashtag_agg[key]) for key in self._hashtag_labels] # it has normalization inside, so no L2 is necessary self._hashtags_tf_idf_vectorizer = TfidfVectorizer(norm="l2") self._hashtags_tf_idf_representation = self._hashtags_tf_idf_vectorizer.fit_transform( document_list) if self.verbose: print("Fitting hashtag encoders ...") # [:-4] because each hashtag has "_tag" postfix to distinguish it in the graph self._hashtag_encoder.fit([lab[:-4] for lab in self._hashtag_labels]) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToGraph(tid,uid,mentions) :\n global G,found\n\n user = r.get(int(uid))\n \n if user == None :\n return\n\n user = re.findall('\"((?:(?!(?:\",\")).)*)\"', user)\n \n # lower the hashtags\n mentions = [t.lower() for t in mentions if t not in [\"\"]]\n \n usern = user[1].lower()\n\n G.add_node(us...
[ "0.7264354", "0.67232686", "0.65345025", "0.6294691", "0.60873413", "0.5904825", "0.5853261", "0.5806474", "0.57820344", "0.57578456", "0.5725518", "0.57033443", "0.5695641", "0.5693837", "0.56878614", "0.5613401", "0.5573096", "0.5560261", "0.55522144", "0.5523659", "0.54931...
0.56399894
15
For a given tweet represented as a list of lemmas recommends hashtags.
def transform(self, x: Union[Tuple[Tuple[str, ...], ...], Tuple[str, ...]], **kwargs) -> np.ndarray: lemmatised = list(x[:]) if isinstance(lemmatised[0], str): for i, xi in enumerate(lemmatised): lemmatised[i] = get_wcrft2_results_for_text(xi) if isinstance(lemmatised[0], list): for i, xi in enumerate(lemmatised): lemmatised[i] = ' '.join(xi) query_hashtags = kwargs.get("query", None) if query_hashtags is not None: assert len(query_hashtags) == len(x), \ "If at least 1 query is given, the array should have the same dimension as input `x`" if isinstance(query_hashtags, str): query_hashtags = [query_hashtags] * len(lemmatised) # as in fit, vectorizer has normalization inside ... tf_idf_vectors = self._hashtags_tf_idf_vectorizer.transform(lemmatised) # ... so this simplifies to cosine similarity - no normalisation required similarities = self._hashtags_tf_idf_representation.dot(tf_idf_vectors.T).T.toarray() preference_vectors = self._get_preference_vectors(similarities, query_hashtags) similarity_rank_vertices = self._random_walk(preference_vectors) best_indices = np.argsort(-similarities * similarity_rank_vertices, axis=1) result = self._hashtag_labels[best_indices].tolist() return self.post_process_result(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hashtags(self, candidate_list):\n if Tweet.hashtagre is None:\n Tweet.hashtagre = re.compile('|'.join(map(re.escape, candidate_list)))\n return [\n [m.group(0).replace('#', '', 1), m.span()]\n for m in Tweet.hashtagre.finditer(self.text)\n ]", "def getHas...
[ "0.7114178", "0.6828377", "0.6736826", "0.6493812", "0.64422315", "0.632229", "0.6310081", "0.6094305", "0.6063977", "0.6057257", "0.5984623", "0.5919002", "0.5900357", "0.58999604", "0.58835715", "0.5878227", "0.5875355", "0.5873595", "0.58567333", "0.58058137", "0.579629", ...
0.0
-1
Creates sparse matrix of preference vectors for each of N samples to recommend which are used to initialize random walk algorithm. If a query hashtag for a particular tweet is given, then it is used to create preference vector. The most similar hashtag is used otherwise.
def _get_preference_vectors(self, tweet_content_similarities: np.ndarray, query_hashtags: Optional[Tuple[str]]) -> sps.csr_matrix: def _get_using_similarities(similarity_vector): query_hashtag_index = np.argmax(similarity_vector) vec = np.zeros((len(self._hashtag_labels),)) vec[query_hashtag_index] = 1 return vec preference_vectors = [] for i in range(len(tweet_content_similarities)): if query_hashtags is None or query_hashtags[i] is None: preference_vector = _get_using_similarities(tweet_content_similarities[i]) else: try: preference_vector = np.asarray(self._hashtag_encoder.transform([query_hashtags[i]]))[0] except ValueError: warnings.warn( "Unknown hashtag: {}. Using the closest hashtag in terms of content similarity".format( query_hashtags[i])) preference_vector = _get_using_similarities(tweet_content_similarities[i]) preference_vectors.append(preference_vector) preference_vectors = np.vstack(preference_vectors) preference_vectors = sps.csr_matrix(preference_vectors, preference_vectors.shape, dtype=np.float32) return preference_vectors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initialize(self):\n for doc_index, doc in enumerate(self.document):\n temp_word_topic_matrix = []\n for word in doc:\n if word in self.word2id.keys():\n start_topic_index = np.random.randint(0, self.K)\n temp_word_topic_matrix.a...
[ "0.58001864", "0.56129664", "0.54644907", "0.54111147", "0.54101944", "0.53472745", "0.5298088", "0.52882266", "0.52852654", "0.52436393", "0.5239852", "0.52355725", "0.5232348", "0.5206258", "0.5178139", "0.51459336", "0.51324886", "0.51294774", "0.51167494", "0.51034796", "...
0.6474532
0
Performs random walk algorithm on graph using transition matrix calculated in `fit`, given similarities of input tweet to hashtags representations calculated as tf idf in `fit` method. Random walk lasts until no changes are noticed in node values or algorithm exceeded upper limit of possible iterations.
def _random_walk(self, preference_vectors: sps.csr_matrix) -> np.ndarray: similarity_rank_vertices = preference_vectors nb_iteration = 0 while True: previous_similarity_rank_vertices = similarity_rank_vertices if self.verbose: print("Step: {}".format(nb_iteration + 1)) similarity_rank_vertices = self.damping_factor * similarity_rank_vertices.dot(self._transition_matrix) + ( 1 - self.damping_factor) * preference_vectors diff = np.sum( np.abs(similarity_rank_vertices - previous_similarity_rank_vertices)) if nb_iteration > 0 and diff < self.minimal_random_walk_change_difference_value: if self.verbose: print("Converged with error: {:.6f}".format(diff)) break nb_iteration += 1 if nb_iteration > self.max_iterations: if self.verbose: print("Random walk did not converge, current error: {:.6f}".format( diff)) break return similarity_rank_vertices.toarray()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test4(self):\n with self.test_session() as sess:\n table = np.array([[[0.9, 0.1, 0],\n [0, 0.9, 0.1],\n [0, 0, 1.0]]] * 3)\n\n for cell_transform in ['default', 'flatten', 'replicate']:\n cell = MarkovChainC...
[ "0.59803855", "0.58579344", "0.5386858", "0.53660476", "0.53021395", "0.5288456", "0.5247692", "0.52213484", "0.5216367", "0.5207188", "0.5196385", "0.5184978", "0.5155157", "0.5146504", "0.51292765", "0.5090521", "0.50815225", "0.50664717", "0.50661576", "0.5053628", "0.5053...
0.49008697
53
Removes ending suffix that was used to distinguish between nodes with the same name but different category.
def post_process_result(self, result: np.ndarray) -> np.ndarray: to_cut = len("_tag") return np.asarray([[tag[:-to_cut] for tag in list_of_tags] for list_of_tags in result])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removesuffix(self, x) -> String:\n pass", "def _suffix(self) -> str:\n return \"\"", "def strip_suffix(s, suffixes):\n for suffix in suffixes:\n if s.endswith(suffix):\n return s.rstrip(suffix)\n return s", "def remove_suffix(x, suffix=\" \"):\n if x.endswith(suffix):...
[ "0.6892483", "0.672639", "0.66920334", "0.66891456", "0.6561912", "0.6489998", "0.6470396", "0.6446932", "0.64206946", "0.6388689", "0.6340012", "0.63353306", "0.6307654", "0.6175348", "0.61530787", "0.6142381", "0.6139506", "0.611326", "0.60891044", "0.5997538", "0.59363437"...
0.0
-1
activates mujoco with license at `file_path` this does not check the return code, per usage example at simulate.cpp and test.cpp.
def register_license(file_path): result = mjlib.mj_activate(file_path) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_license(self):\n param = self.module.params[\"param\"]\n license_file_path = param['license_file_path']\n if license_file_path and os.access(license_file_path, os.F_OK) and os.access(license_file_path, os.R_OK):\n self.client.upload_license(license_file_path)\n ...
[ "0.6253657", "0.6131019", "0.5873717", "0.56893003", "0.5666623", "0.56539637", "0.5633931", "0.5633391", "0.5620481", "0.56039864", "0.56034553", "0.5600046", "0.5559953", "0.55529106", "0.5540216", "0.55401397", "0.5527236", "0.5520839", "0.5341878", "0.53384465", "0.529629...
0.7091268
0
Return (qposadr, qveladr, dof) for the given joint name. If dof is 4 or 7, then the last 4 degrees of freedom in qpos represent a unit quaternion.
def joint_adr(self, joint_name): jntadr = mjlib.mj_name2id(self.ptr, C.mjOBJ_JOINT, joint_name) assert (jntadr >= 0) dofmap = {C.mjJNT_FREE: 7, C.mjJNT_BALL: 4, C.mjJNT_SLIDE: 1, C.mjJNT_HINGE: 1} qposadr = self.jnt_qposadr[jntadr][0] qveladr = self.jnt_dofadr[jntadr][0] dof = dofmap[self.jnt_type[jntadr][0]] return (qposadr, qveladr, dof)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWireframeJoints(self, q):\n xline = [0. for _ in self.Joints]\n yline = [0. for _ in self.Joints]\n zline = [0. for _ in self.Joints]\n for cnt, jnt in enumerate(self.Joints.keys()):\n xyz = self.joint_syms[jnt][\"func_xyz_coj\"](*q)\n xline[cnt] = xyz[0, 0]...
[ "0.5734758", "0.5489396", "0.5309817", "0.5117031", "0.50999624", "0.5072133", "0.50482094", "0.50176036", "0.4997793", "0.49432415", "0.4891485", "0.4865569", "0.47592014", "0.47541997", "0.4749527", "0.47479632", "0.47286186", "0.4689717", "0.4671337", "0.46211225", "0.4620...
0.6961554
0
Reverse order of given text characters.
def encode(text: str) -> str: reversed_text = "".join(char for char in text[-1::-1]) return reversed_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task10_string_reversed(text):\n return text[::-1]", "def string_reverse(text):\n rev_text = text[::-1]\n return rev_text", "async def reverse(self, ctx, *, text: str):\n t_rev = text[::-1].replace(\"@\", \"@\\u200B\").replace(\"&\", \"&\\u200B\")\n await ctx.send(f\"🔁 {t_rev}\")", ...
[ "0.7165525", "0.70656836", "0.68825185", "0.6665356", "0.6664884", "0.6568191", "0.6480333", "0.644043", "0.64028203", "0.6399076", "0.6385635", "0.63255966", "0.62972546", "0.6293171", "0.62818104", "0.6206855", "0.6206855", "0.618907", "0.6167491", "0.6159032", "0.61581963"...
0.62933666
13
Obtain original text from a reversed text.
def decode(text: str) -> str: # Reverse of reverse is original text. return encode(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_reverse(text):\n rev_text = text[::-1]\n return rev_text", "def task10_string_reversed(text):\n return text[::-1]", "def string_mirror(text):\n rev_text = text[::-1]\n mirror_text = text + rev_text\n return mirror_text", "def reverse(text):\n #The empty String translates to Fa...
[ "0.6893517", "0.68568224", "0.6491691", "0.6230875", "0.61860424", "0.6106706", "0.6054018", "0.59995306", "0.59206975", "0.5907858", "0.5739098", "0.57272434", "0.5710309", "0.57061905", "0.5688273", "0.56567407", "0.5605282", "0.5591018", "0.55687314", "0.55279493", "0.5506...
0.6215529
4
Reencode phylogeny_df to facilitate efficient analysis and transformation operations. The returned phylogeny dataframe will be topologically sorted (i.e., organisms appear after all ancestors), have contiguous ids (i.e., organisms' ids correspond to row number), contain an integer datatype `ancestor_id` column if the phylogeny is asexual (i.e., a more performant representation of `ancestor_list`). Input dataframe is not mutated by this operation unless `mutate` set True. If mutate set True, operation does not occur in place; still use return value to get transformed phylogeny dataframe.
def alifestd_to_working_format( phylogeny_df: pd.DataFrame, mutate: bool = False, ) -> pd.DataFrame: if not mutate: phylogeny_df = phylogeny_df.copy() phylogeny_df = alifestd_try_add_ancestor_id_col(phylogeny_df, mutate=True) if not alifestd_is_topologically_sorted(phylogeny_df): phylogeny_df = alifestd_topological_sort(phylogeny_df, mutate=True) if not alifestd_has_contiguous_ids(phylogeny_df): phylogeny_df = alifestd_assign_contiguous_ids( phylogeny_df, mutate=True ) return phylogeny_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_tree(tree, subtree_hierarchy):\n new_tree = subtree_hierarchy.copy()\n for bg_pop, row in subtree_hierarchy.iterrows():\n # Remove not showing pops from new_tree\n if row['To_show'] == 'no':\n new_tree = new_tree.drop(bg_pop)\n continue\n\n # Find Parent\...
[ "0.54605395", "0.52514964", "0.52182794", "0.51729226", "0.5131557", "0.4997631", "0.49200213", "0.49033338", "0.47637245", "0.47538468", "0.4739272", "0.46873105", "0.46594372", "0.4646138", "0.46402073", "0.4636189", "0.4631968", "0.46285406", "0.46198457", "0.46106067", "0...
0.8255052
0
Set default values for options.
def initialize_options(self): #Each user option must be listed here with their default value. self.pylint_rcfile = ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def _set_default_options(options):\n\n options_defaults = {\n 'run_storage_base': None,\n 'watch': False...
[ "0.82235515", "0.76142746", "0.7325896", "0.7278611", "0.7278611", "0.7222484", "0.71993154", "0.7152846", "0.709889", "0.70756227", "0.70539", "0.70520246", "0.704013", "0.6966003", "0.6921334", "0.6866702", "0.68231", "0.68159455", "0.6777279", "0.67474914", "0.67449015", ...
0.64381355
45
Compute count of values for Sx=sum 0<=p<x a_n % M
def hist326(T,P,M): H=[0]*M S=0 T+=1 reps,rem=T//P,T%P for x in range(P): n=reps if x<rem: n+=1 H[S]+=n e=e326(x+1) S=(S+e)%M return sum([h*(h-1)/2 for h in H])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def countm(m):\n nfound=0\n\n for i in range(1,m+1):\n for jpk in range(2,(2*i)+1):\n d1=i*i+(jpk)*(jpk) \n if(checkpsq(d1)): \n if(jpk<=i):\n factor=jpk/2 \n else:\n factor=((2*i-jpk)+2)/2 \n nfou...
[ "0.7074451", "0.67225456", "0.6443518", "0.63969105", "0.6153251", "0.60965586", "0.6039756", "0.6003885", "0.59993243", "0.59983164", "0.5964119", "0.5954213", "0.5915635", "0.5903191", "0.5876021", "0.5867894", "0.5854033", "0.58404493", "0.5825537", "0.5816647", "0.5792668...
0.0
-1
Resopnd to keypass and mouse events.
def check_events(ship): for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n keys = set()\n mouse = set()\n old_mouse = set()\n\n # Set up game\n init()\n\n # Perform game loop\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n if event.type == pygame.KEYDOWN: keys.add(event.key)\n ...
[ "0.61480045", "0.6093318", "0.59711534", "0.59679854", "0.59173495", "0.5892807", "0.5828488", "0.57922244", "0.5735914", "0.5715611", "0.5710519", "0.5705049", "0.5689077", "0.5688968", "0.5657838", "0.5655574", "0.5652674", "0.56487423", "0.5629702", "0.5615708", "0.5594418...
0.0
-1
Update images on the screen and flip to the new screen.
def update_screen(ai_settings, screen, ship):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_screen(self):\n\t\tself.screen.fill((255, 255, 255))\n\n\t\tself._check_collisions()\n\t\tself._update_objects()\n\t\tself._blit_objects()\n\n\t\tpygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.ship.blitme()\n for ufo in self.u...
[ "0.7323385", "0.73144567", "0.72792417", "0.7079508", "0.70134467", "0.70087224", "0.6951003", "0.6883352", "0.68315375", "0.68152404", "0.67967147", "0.67831165", "0.675435", "0.6707523", "0.6706954", "0.66996604", "0.66944337", "0.6660561", "0.6639409", "0.65874845", "0.657...
0.6261121
43
Provides list of github organization urls based on authenticated user.
def get_orgs(): url = "https://api.github.com/user/orgs" org_urls = [] orgs = utils.get_json(url) for org in orgs: org_urls.append(org["url"]) return org_urls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def get_organization_links(self):\n yield from self.get_resource_by_item(\"/orgs\")", "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GE...
[ "0.77288854", "0.6896512", "0.6850855", "0.6795222", "0.6704334", "0.6566785", "0.64914256", "0.64815396", "0.64218557", "0.6373243", "0.6368361", "0.6260303", "0.6226184", "0.62181467", "0.6191352", "0.61800104", "0.61728805", "0.6149173", "0.6129998", "0.61161196", "0.61137...
0.7619069
1
Provides a list of Member urls per organizations. param orgs either a list of urls pointing to organizations or a single org name return list of member urls
def list_members(orgs): members =[] if isinstance(orgs, list): #if list of orgs for each org get members list for url in orgs: #append /member to url - member_url is not valid canidate without a member list url = url + "/members" print("Checking " + url) members_data = utils.get_json(url) for member in members_data: members.append(member["url"]) return members else: #build url from input org name and return member list url = "https://api.github.com/orgs/" + orgs + "/members" members_data = utils.get_json(url) #check for invalid GitHub credentials or invalid github org name try: for member in members_data: members.append(member["url"]) return members except TypeError: if(members_data["message"] == "Not Found"): print("That organization doesn't exist try again\n") raise SystemExit elif(members_data["message"] == "Bad credentials"): print("Please verify GitHub credentials are correct in config.py") raise SystemExit else: print (members_data) raise SystemExit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def get_organizatio...
[ "0.73141086", "0.6856046", "0.6766409", "0.66456306", "0.6501711", "0.64493275", "0.6426517", "0.62708795", "0.6261771", "0.6171711", "0.6152523", "0.605219", "0.6030204", "0.6027043", "0.60185575", "0.60124224", "0.5963386", "0.58976614", "0.5893394", "0.5823597", "0.5802018...
0.7285096
1
Provides a list of Member urls that have [attribute] is null. param attribute to check for null value params memberUrls List of member urls to check return list of member urls with null [attribute] field
def check_for_null(attribute, memberUrls): attributeNotFound =[] for url in memberUrls: member_data = utils.get_json(url) if member_data[attribute] is None: #TODO: TBD Could grab email here if speed was an issue attributeNotFound.append(url) return attributeNotFound
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_should_pass_when_list_of_url_are_not_empty(self):\n self.ebooksPage = EbooksPage(self.driver)\n list_of_links = self.ebooksPage.get_all_links()\n\n assert (isinstance(list_of_links, list) and (len(list_of_links) > 0 ))", "def get_parliament_members_urls(self) -> list:\n direc...
[ "0.5205275", "0.51647687", "0.4913061", "0.49125162", "0.49111745", "0.4911004", "0.48863113", "0.4880418", "0.48025796", "0.47962448", "0.47788674", "0.47781992", "0.47770363", "0.47515574", "0.47040161", "0.46784464", "0.45712367", "0.4568166", "0.45629707", "0.45522118", "...
0.841574
0
Define la conexion a la base de datos
def get_db_connection(uri): client = pymongo.MongoClient(uri) return client.cryptongo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connection_database(self):\n # connection to the database\n self.data_base = mysql.connector.connect(user=self.info[0], password=self.info[1],\n host=self.info[2])\n self.cursor = self.data_base.cursor()\n\n # executed \"use Purbeurre\...
[ "0.710717", "0.6864451", "0.6749383", "0.6738874", "0.66128796", "0.6590426", "0.65721476", "0.65279865", "0.651362", "0.6474826", "0.6444196", "0.6436261", "0.64234966", "0.6408445", "0.6395795", "0.63708216", "0.6339452", "0.6337948", "0.6329089", "0.63109976", "0.6289543",...
0.0
-1
Almacena el documento en la BD siempre y cuando no exista.
def save_ticker(connection, ticker_data=None): #evita operaciones si no existe informacion. if not ticker_data: return False ticker_hash = get_ticker_hash(ticker_data) if check_if_exists(connection, ticker_hash): return False #ticker_data['ticker_hash'] = get_ticker_hash(ticker_data) ticker_data['ticker_hash'] = ticker_hash # Almacena el documento en la BD de Mongo por medio de insertOne() connection.tickers.insert_one(ticker_data) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)", "def create_doc(c, d):\n try:\n c.insert_one(d)\n return True\n except:\n return False", "def get_or_create(self, orm,...
[ "0.63891953", "0.5736484", "0.5692139", "0.5690428", "0.5670163", "0.55836314", "0.5490324", "0.5467444", "0.5449755", "0.54327613", "0.54227865", "0.5420944", "0.54042196", "0.539124", "0.5371962", "0.5348681", "0.5317675", "0.5299088", "0.52955973", "0.5288019", "0.5268371"...
0.4767223
88
Volume of originations can be misleading. Normalize it to some degree by considering the number of houses in the same census tract.
def volume_per_100_households(volume, num_households): if num_households: return volume * 100.0 / num_households else: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise(self):\n s = self._sum()\n if s != 0:\n for element, value in self.focals.items():\n self.focals[element] /= s", "def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total", "def test_...
[ "0.60555303", "0.5992782", "0.5814782", "0.5798587", "0.5788853", "0.57513684", "0.57513684", "0.57438356", "0.5718763", "0.5717603", "0.57025325", "0.5634104", "0.56269354", "0.5596752", "0.5582119", "0.5576086", "0.55752635", "0.5556672", "0.55557096", "0.55428916", "0.5540...
0.5479352
28
Get loan originations for a given lender, county combination. This ignores year for the moment.
def loan_originations(request_dict): state_fips = request_dict.get('state_fips', '') county_fips = request_dict.get('county_fips', '') lender = request_dict.get('lender', '') if state_fips and county_fips and lender: records = HMDARecord.objects.filter( countyfp=county_fips, lender=lender, statefp=state_fips, action_taken__lte=6) # actions 7-8 are preapprovals to ignore query = records.values( 'geoid', 'geoid__census2010households__total' ).annotate(volume=Count('geoid')) data = {} for row in query: data[row['geoid']] = { 'volume': row['volume'], 'num_households': row['geoid__census2010households__total'], 'volume_per_100_households': volume_per_100_households( row['volume'], row['geoid__census2010households__total']) } return data else: return HttpResponseBadRequest( "Missing one of state_fips, county_fips, lender")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_land_conso_per_year(self, level, group_name=None):\n fields = Cerema.get_art_field(self.analyse_start_date, self.analyse_end_date)\n qs = self.get_cerema_cities(group_name=group_name)\n qs = qs.values(level)\n qs = qs.annotate(**{f\"20{field[3:5]}\": Sum(field) / 10000 for field...
[ "0.54118204", "0.5000237", "0.4949208", "0.4931147", "0.4911661", "0.4811012", "0.47071233", "0.47071233", "0.47031814", "0.4534821", "0.45185816", "0.45088586", "0.45060778", "0.44371125", "0.4435685", "0.44336432", "0.4428876", "0.44172257", "0.44085371", "0.43731523", "0.4...
0.6544321
0
Return string representation of the instance.
def __str__(self): return self.branch_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_string(self):\n return self.__repr__()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return s...
[ "0.85652095", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", "0.8392225", ...
0.0
-1
Return repr representation of the instance.
def __repr__(self): return "<Branch: %s>" % self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.ge...
[ "0.8206054", "0.8206054", "0.8206054", "0.81909746", "0.8183906", "0.8177763", "0.8165392", "0.8165392", "0.8165392", "0.8165392", "0.81573534", "0.81573534", "0.81573534", "0.81428844", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81360096", "0.81...
0.0
-1
Loads branch data from Derek.
def _load(self): context = { "username": self.username, "reponame": self.reponame, "name": self.name } LOG.debug("Loading %s" % self.branch_id) doc = self._client.getjson(path="/users/%(username)s/repos/%(reponame)s" "/branches/%(name)s" % context) LOG.debug("doc loaded: %r" % doc) slice_id = "%(username)s/%(reponame)s/%(slice_id)s" % { "username": self.username, "reponame": self.reponame, "slice_id": doc["slice_id"] } self._slice = self._client.slice(slice_id) self._packages = doc["packages"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def branches(self):\r\n url = self.base_url + 'branches/'\r\n return json.loads(self.bb.load_url(url))", "def _finish_init(self):\n\n # This is usually done in set_other(), but we already set it as part of\n # the constructor...
[ "0.6128238", "0.592409", "0.57337046", "0.5703952", "0.56251985", "0.56168956", "0.5596647", "0.55911064", "0.5567281", "0.5439576", "0.5429689", "0.54277277", "0.5410592", "0.5379778", "0.5364775", "0.53645855", "0.53460085", "0.53446186", "0.5340837", "0.5321691", "0.531625...
0.64722985
0
Merge packages from another branch.
def merge(self, branch): if branch.username != self.username or branch.reponame != self.reponame: raise BranchError("Branch to merge must be in the same repository") context = { "username": self.username, "reponame": self.reponame, "name": self.name } LOG.debug("Merging from %r to %r" % (branch, self)) self._client.postjson(path="/users/%(username)s/repos/%(reponame)s/" "branches/%(name)s/merge" % context, payload={"from_branch": branch.name})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge(self, branch_names):\n\n self.git(\"merge\", *branch_names)", "def merge(): #Status: WIP\r\n pass", "def merge(self, ref, *args):\n return self.cmd('merge', ref, *args)", "def main():\n\n local_pkgs = set(os.listdir(GIT_FOLDER))\n local_pkgs = set([it.replace('.git', '') f...
[ "0.65263903", "0.6383677", "0.5910188", "0.5779606", "0.5774576", "0.5738681", "0.57243484", "0.5715652", "0.56322825", "0.56101876", "0.5508702", "0.54770225", "0.5444733", "0.5418547", "0.5407719", "0.5407596", "0.5389976", "0.53783303", "0.5357206", "0.5333331", "0.5327569...
0.61638653
2
Upload packages to branch.
def upload_packages(self, packages): context = { "username": self.username, "reponame": self.reponame, "name": self.name } filepaths = [os.path.join(os.path.dirname(path), pfile['filename']) for path in packages for pfile in deb_changes(path)['files']] filepaths.extend(packages) # get upload token resp = self._client.postjson(path="/users/%(username)s/" "repos/%(reponame)s/" "branches/%(name)s/get_upload_token" % context) token = resp['utoken'] for pfile in filepaths: self._client.upload(path="/upload/%s/send/%s" % (token, os.path.basename(pfile)), filepath=pfile) self._client.post(path="/upload/%s/dput" % token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_package(self, __contents):\n raise NotImplementedError", "def deploy():\n build()\n collect()\n commit()\n push()", "def push(self, base_repo, branch=\"master\"):\n base_repo.push_to(self, branch)", "def push(self):\n out, err, code = self.command( [\"git\", \"push...
[ "0.6992921", "0.65332377", "0.6517984", "0.64805365", "0.6429827", "0.6415388", "0.64010024", "0.6369851", "0.6338582", "0.62850165", "0.6262531", "0.6237359", "0.6235568", "0.6226121", "0.61871606", "0.6132188", "0.60597724", "0.60434294", "0.60356283", "0.6010991", "0.59812...
0.6928756
1
Look up package ID from list of package infos.
def get_pkg_id(pkgs, name, version): for pinfo in pkgs: if pinfo["name"] == name and pinfo["version"] == version: return "%(name)s/%(version)s/%(id)s" % pinfo raise DerekError("No package %s %s in the branch" % (name, version))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bd_selectPackageList_byID(self, _c, _pckgID):\n\n result = {}\n\n _c.execute(\"SELECT id, num, desc, status, source_env, dest_env, app, last_rev FROM package WHERE id=? ORDER BY num DESC\", [_pckgID]) \n data = _c.fetchone()\n\n if data:\n result['id'] = data[0] ...
[ "0.6142764", "0.59174037", "0.5913275", "0.58575547", "0.5794914", "0.57480824", "0.5745953", "0.56321526", "0.5583289", "0.55084556", "0.5450892", "0.5416782", "0.5412792", "0.5388353", "0.5381202", "0.5359032", "0.5345228", "0.5315372", "0.5281844", "0.5267592", "0.5251155"...
0.6708183
0
Return current list of packages in the branch.
def packages(self): if self._packages: return self._packages self._load() return self._packages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def packages(self):\n return []", "def packages(self):\r\n return self._packages", "def get_packages_in_current_dir() -> list:\n from os import listdir\n\n pkgs = []\n ext = ('.tgz', '.txz')\n for file_in_current_dir in sorted(listdir()):\n if file_in_current_dir.endswith(ext):...
[ "0.7107418", "0.6885296", "0.68503803", "0.68283534", "0.6706045", "0.6589522", "0.65444344", "0.65231556", "0.65145665", "0.63925606", "0.6339857", "0.6297373", "0.6296177", "0.62841654", "0.62576807", "0.621807", "0.61917984", "0.61838996", "0.6180382", "0.61489815", "0.612...
0.6295743
13
test upsert user template as anonymous raises access control error
def test_upsert_user_template_as_anonymous_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user1_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template,...
[ "0.79757524", "0.78699815", "0.77971363", "0.7779306", "0.7315423", "0.7268201", "0.72340584", "0.71491605", "0.711088", "0.70220673", "0.66764414", "0.6670372", "0.66626596", "0.6651719", "0.656055", "0.65165913", "0.6485727", "0.64777356", "0.64196575", "0.6418435", "0.6411...
0.81394374
0
test upsert user template as anonymous with access right raises access control error
def test_upsert_user_template_as_anonymous_with_access_right_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user1_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_user_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.user1_template, request=mock_requ...
[ "0.80965334", "0.7977133", "0.7963794", "0.7793425", "0.7664125", "0.7498813", "0.72754526", "0.7163729", "0.71102697", "0.708983", "0.6803297", "0.6699159", "0.6673843", "0.6615065", "0.66076845", "0.66003054", "0.6594539", "0.65447944", "0.6544686", "0.65272033", "0.6497763...
0.82011104
0
test upsert global template as anonymous raises access control error
def test_upsert_global_template_as_anonymous_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.global_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_templa...
[ "0.7650722", "0.756786", "0.73319674", "0.72709674", "0.71599734", "0.6970519", "0.68684846", "0.67423", "0.65906715", "0.6536628", "0.64568055", "0.64072675", "0.6396326", "0.6376108", "0.6357626", "0.63539773", "0.6263191", "0.6222696", "0.6143397", "0.6139016", "0.6118972"...
0.7757241
0
test upsert global template as anonymous raises access control error
def test_upsert_global_template_as_anonymous_with_access_right_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.anonymous_user) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.global_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_anonymous_raises_access_control_error(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n with self.assertRaises(AccessControlError):\n template_api.upsert(\n self.fixture.global_template, request=mock_r...
[ "0.7756423", "0.75673836", "0.7331551", "0.7270877", "0.7159347", "0.6970686", "0.6868954", "0.6742805", "0.65919876", "0.6536369", "0.6457105", "0.6407503", "0.6395695", "0.63756883", "0.6357793", "0.6353528", "0.62642133", "0.6223189", "0.6144017", "0.61375785", "0.61193377...
0.76497203
1
test upsert own template as user saves
def test_upsert_own_template_as_user_saves(self): mock_request = create_mock_request(user=self.user1) template_api.upsert(self.fixture.user1_template, request=mock_request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user1_template, request=mock_request)", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staf...
[ "0.8351025", "0.80516064", "0.8045996", "0.8009917", "0.7813934", "0.6972186", "0.678505", "0.6546894", "0.63948953", "0.63871586", "0.6360882", "0.6320311", "0.63129383", "0.61979985", "0.6153889", "0.61455595", "0.61436296", "0.6135426", "0.6107875", "0.6063069", "0.6057141...
0.866422
0
test upsert other users template as user raises access control error
def test_upsert_other_users_template_as_user_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.user1) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user2_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_upsert_own_template_as_user_saves(self):\n mock_request = create_mock_request(user=se...
[ "0.820034", "0.8083881", "0.8039612", "0.80145544", "0.80041873", "0.76042765", "0.7525442", "0.7524677", "0.7430861", "0.7153691", "0.7078404", "0.69165915", "0.67743903", "0.6736194", "0.6717341", "0.6690355", "0.6580235", "0.65308565", "0.6507076", "0.6504665", "0.6474388"...
0.82194316
0
test upsert global template as user raises access control error
def test_upsert_global_template_as_user_raises_access_control_error(self): mock_request = create_mock_request(user=self.user1) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.global_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=s...
[ "0.8106673", "0.76909196", "0.7503215", "0.7482491", "0.7482349", "0.747998", "0.74361557", "0.7395664", "0.7269864", "0.72538686", "0.72130823", "0.7131496", "0.701815", "0.6556384", "0.65447783", "0.6527001", "0.64378923", "0.6381135", "0.6367743", "0.6320074", "0.63030666"...
0.82474107
0
test upsert own template as staff saves
def test_upsert_own_template_as_staff_saves(self): mock_request = create_mock_request(user=self.staff_user1) template_api.upsert(self.fixture.user1_template, request=mock_request)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_global_template_as_staff_saves(self):\n mock_request = create_mock_request(user=self.staff_user1)\n template_api.upsert(self.fixture.global_template, request=mock_request)", "def test_upsert_own_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self...
[ "0.8371305", "0.80961037", "0.8029739", "0.7766117", "0.7704741", "0.70811933", "0.6713066", "0.648494", "0.64786816", "0.6461537", "0.6387213", "0.638419", "0.63225466", "0.6304995", "0.63047177", "0.62928706", "0.62527716", "0.6247133", "0.6220966", "0.62020767", "0.6166314...
0.85387015
0
test upsert other users template as staff raises access control error
def test_upsert_other_users_template_as_staff_raises_access_control_error( self, ): mock_request = create_mock_request(user=self.staff_user1) with self.assertRaises(AccessControlError): template_api.upsert( self.fixture.user2_template, request=mock_request )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upsert_other_users_template_as_superuser_saves(self):\n mock_request = create_mock_request(user=self.superuser1)\n template_api.upsert(self.fixture.user2_template, request=mock_request)", "def test_upsert_own_template_as_staff_saves(self):\n mock_request = create_mock_request(user=s...
[ "0.8093766", "0.80680555", "0.80376196", "0.7835847", "0.77455807", "0.7744498", "0.76982164", "0.76218945", "0.736171", "0.7214227", "0.7022283", "0.6874125", "0.68300915", "0.68178946", "0.68173504", "0.6804541", "0.67034173", "0.66750884", "0.6632048", "0.6587507", "0.6538...
0.82656085
0