query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Reinvite an already invited user.
def reinvite_user(self, user, email): if self.is_moderator and self.has_perm('accounts.invite_user'): # Reset email, set a new token and update decision datetime user.email = email user.auth_token = generate_unique_id() user.decision_datetime = timezone.now() user.save() return user else: raise PermissionDenied
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def invite_user(request):\r\n params = request.params\r\n\r\n email = params.get('email', None)\r\n user = request.user\r\n\r\n if not email:\r\n # try to get it from the json body\r\n email = request.json_body.get('email', None)\r\n\r\n if not email:\r\n # if still no email, I ...
[ "0.6742321", "0.65622705", "0.6341691", "0.618192", "0.6178779", "0.6154837", "0.61355495", "0.60448194", "0.5970474", "0.59555095", "0.592783", "0.59243935", "0.59088224", "0.590702", "0.5906705", "0.5898827", "0.577978", "0.5737973", "0.5707441", "0.57018846", "0.56371075",...
0.7661665
0
Approve a user's application
def approve_user_application(self, user): if self.is_moderator and \ self.has_perm('accounts.approve_user_application'): user.moderator = self user.moderator_decision = user.APPROVED user.decision_datetime = timezone.now() user.auth_token = generate_unique_id() user.save() return user else: raise PermissionDenied
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def approve(self):\n self._check_if_open()\n data = {\"approved\": True}\n return self.post(\"approve\", data)", "def can_approve(self, user, **data):\n raise Return(False)", "def approve_me(message):\n users = hf.get_users()\n for user in users:\n if user[\"id\"] == me...
[ "0.7413602", "0.71029663", "0.67888886", "0.6747213", "0.67318124", "0.67286676", "0.6719018", "0.66766196", "0.6670902", "0.65097535", "0.64031065", "0.63882875", "0.63717735", "0.63500285", "0.634869", "0.63414145", "0.63078153", "0.6297645", "0.62773865", "0.6240185", "0.6...
0.79233325
0
Reject a user's application
def reject_user_application(self, user): if self.is_moderator \ and self.has_perm('accounts.reject_user_application'): user.moderator = self user.moderator_decision = user.REJECTED user.decision_datetime = timezone.now() user.save() return user else: raise PermissionDenied
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_reject(user):\n if user.comments in (None or \"\"):\n return\n\n subject = \"ECE/CIS Account - Account Application rejected for %s\" % user.username\n application = \"https://www.eecis.udel.edu/NewAccount/\"\n helprequest = \"https://www.eecis.udel.edu/service\"\n sponsor = \"%s@eec...
[ "0.6770561", "0.6585959", "0.65521705", "0.6491148", "0.6487616", "0.646831", "0.63601846", "0.63346696", "0.6136011", "0.61090565", "0.607409", "0.6064712", "0.60478044", "0.59528744", "0.5923107", "0.5874209", "0.5872581", "0.5852314", "0.58413756", "0.58047056", "0.5798305...
0.7710392
0
Return a user's profiency in a particular skill as a percentage, based on the position of the proficiency in PROFICIENCY_CHOICES.
def get_proficiency_percentage(self): choice_values = [choice[0] for choice in self.PROFICIENCY_CHOICES] if '' in choice_values: choice_values.remove('') # Remove the empty proficiency choice choice_values.sort() # Ensure values are in the correct order value = choice_values.index(self.proficiency) + 1 factor = 100 / len(choice_values) percentage = round(value * factor) return percentage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item...
[ "0.624777", "0.5969812", "0.58800423", "0.57505316", "0.57186955", "0.57097393", "0.57036775", "0.56423104", "0.5622649", "0.5582169", "0.5582169", "0.55723554", "0.5554754", "0.55056655", "0.5428772", "0.53997266", "0.53909737", "0.53796136", "0.53647095", "0.53647095", "0.5...
0.8257786
0
If there is no icon matched use default.
def get_icon(self): try: icon = self.icon.fa_icon except AttributeError: icon = 'fa-globe' return icon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def icon(self):\n return DEFAULT_ICON", "def icon(self):\n return None", "def icon(self):\n return None", "def icon(self, value: str | None) -> None:\n self._icon = value", "def icon(self) -> typing.Union[str, None]:\n return self._icon", "def getIconString(self, iconNa...
[ "0.75174177", "0.68926984", "0.68926984", "0.6637837", "0.65864366", "0.649431", "0.64609027", "0.6416154", "0.63991344", "0.6376866", "0.63664967", "0.63042384", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.6299288", "0.629928...
0.59392136
49
Attempt to match a user link to a recognised brand (LinkBrand).
def save(self, *args, **kwargs): domain = urlsplit(self.url).netloc try: self.icon = LinkBrand.objects.get(domain=domain) except ObjectDoesNotExist: pass super(UserLink, self).save(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def link_match_family(link, family_name): # pylint: disable= too-many-return-statements\n if family_name == \"gaussian\":\n return link in [\"identity\", \"log\", \"inverse\"]\n elif family_name == \"gamma\":\n return link in [\"identity\", \"log\", \"inverse\"]\n elif family_name == \"bern...
[ "0.61408734", "0.6093003", "0.5789055", "0.5748644", "0.57433885", "0.57430315", "0.567563", "0.539753", "0.5388041", "0.53709215", "0.53483826", "0.5343389", "0.522596", "0.519779", "0.51586634", "0.51541173", "0.5086018", "0.504153", "0.50308156", "0.49988824", "0.49988824"...
0.54133815
7
Find any existing links to match to a new (or edited) brand
def save(self, *args, **kwargs): super(LinkBrand, self).save(*args, **kwargs) existing_links = UserLink.objects.filter(url__contains=self.domain) # Filter out any false positives for link in existing_links: domain = urlsplit(link.url).netloc if domain != self.domain: existing_links = existing_links.exclude(pk=link.pk) existing_links.update(icon=self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLinkstoBrands(url):\n brandUrls = {}\n try:\n print(\"Maker link being crawled : \", url)\n request = requests.get(url)\n if request.status_code == 200:\n sourceCode = BeautifulSoup(request.text, \"html.parser\")\n for td in sourceCode.findAll('td'):\n ...
[ "0.6620311", "0.6351018", "0.61144876", "0.5799459", "0.57774466", "0.56885266", "0.56424046", "0.5591962", "0.5496416", "0.5412613", "0.538556", "0.536212", "0.5332424", "0.5328891", "0.53039545", "0.5301546", "0.52905977", "0.5289629", "0.52668715", "0.52547145", "0.5247001...
0.5429173
9
Generate the preparation files for the projects in a run
def format_preparation_files(run_dir, sample_sheet, output_dir, pipeline, verbose): sample_sheet = KLSampleSheet(sample_sheet) df_sheet = sample_sheet_to_dataframe(sample_sheet) if pipeline == 'atropos-and-bowtie2': click.echo('Stats collection is not supported for pipeline ' 'atropos-and-bowtie2') else: stats = run_counts(run_dir, sample_sheet) stats['sample_name'] = \ df_sheet.set_index('lane', append=True)['sample_name'] # returns a map of (run, project_name, lane) -> preparation frame preps = preparations_for_run(run_dir, df_sheet, pipeline=pipeline) os.makedirs(output_dir, exist_ok=True) for (run, project, lane), df in preps.items(): fp = os.path.join(output_dir, f'{run}.{project}.{lane}.tsv') if pipeline == 'fastp-and-minimap2': # stats are indexed by sample name and lane, lane is the first # level index. When merging, make sure to select the lane subset # that we care about, otherwise we'll end up with repeated rows df = df.merge(stats.xs(lane, level=1), how='left', on='sample_name') # strip qiita_id from project names in sample_project column df['sample_project'] = df['sample_project'].map( lambda x: re.sub(r'_\d+$', r'', x)) # center_project_name is a legacy column that should mirror # the values for sample_project. df['center_project_name'] = df['sample_project'] df.to_csv(fp, sep='\t', index=False) if verbose: project_name = remove_qiita_id(project) # assume qiita_id is extractable and is an integer, given that # we have already passed error-checking. qiita_id = project.replace(project_name + '_', '') print("%s\t%s" % (qiita_id, abspath(fp)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in file...
[ "0.64848816", "0.6330899", "0.6330899", "0.6330899", "0.6284369", "0.62667686", "0.6219365", "0.620572", "0.6156918", "0.6099373", "0.60919136", "0.60415375", "0.60354227", "0.60354227", "0.6011478", "0.6007769", "0.5993345", "0.59776956", "0.59592646", "0.59452546", "0.59385...
0.66563916
0
Return tokenized list of strings from raw text input
def tokenize1(text): return TOKEN_PATTERN1.findall(text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token_list(text):\n return text.split()", "def tokenize(self, input_string: str) -> List[str]:", "def tokenize(text: str):\n result = []\n for s in text:\n result.append(s)\n return result", "def _tokenize(self, text: str) -> List[str]:\n return self.bpe.tokenize(text)", "de...
[ "0.83176076", "0.7947587", "0.7839667", "0.778215", "0.7778221", "0.77569646", "0.77184075", "0.77048725", "0.76263833", "0.7593482", "0.7591023", "0.7581511", "0.756711", "0.75667906", "0.73598903", "0.73473614", "0.73437184", "0.73308897", "0.731043", "0.73101467", "0.72422...
0.69686973
44
Return tokenized list of strings from raw text input using keras functionality
def tokenize_keras(raw_data): from keras.preprocessing.text import text_to_word_sequence return [text_to_word_sequence(d) for d in raw_data]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred ...
[ "0.70793176", "0.6935871", "0.6846465", "0.6740176", "0.66016084", "0.6601571", "0.65836084", "0.6577888", "0.656995", "0.6567432", "0.65455496", "0.654399", "0.6511661", "0.65019155", "0.6500425", "0.649716", "0.64943504", "0.6473806", "0.6438927", "0.64235365", "0.64078707"...
0.8465603
0
Return True if word passes filter
def filter1(word): if not word: return False w = word.lower() if w in STOPWORDS: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_word_filter(self, fn):\n self._apply_filter(lambda ng, f: any(fn(w) for w in ng))", "async def wordfilter(self, ctx):\n pass", "async def wordfilter_test(self, ctx, *, message):\n found = self.test_sentence(message)\n if found:\n await ctx.send(f\"Message contai...
[ "0.74591035", "0.7444302", "0.7269609", "0.71056175", "0.7085896", "0.69943607", "0.6989586", "0.68581426", "0.6818804", "0.67899287", "0.6701206", "0.6658593", "0.66539127", "0.6639391", "0.6632937", "0.66313547", "0.6630455", "0.6528471", "0.65281737", "0.6523878", "0.65221...
0.78754514
0
Return processed list of words from raw text input To be honest, we're currently using sklearn CountVectorizer and keras text_to_word_sequence instead of this function.
def process_text(text, tokenize=tokenize1, filter=filter1, stem=None, lower=True): assert tokenize, "Must provide tokenize method for preprocess_text" if not text: return [] if lower: text = text.lower() words = tokenize(text) if filter: words = [w for w in words if filter(w)] if stem: words = [stem(w) for w in words] return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def words(self, text):\n return re.findall(r'\\w+', text)", "def token...
[ "0.73962986", "0.7058597", "0.7052718", "0.7027226", "0.700164", "0.699", "0.69413567", "0.6924657", "0.68807524", "0.6822172", "0.680491", "0.6783017", "0.6780739", "0.6760617", "0.67566645", "0.674435", "0.6730428", "0.67244", "0.6674279", "0.6671103", "0.6661868", "0.665...
0.62860745
65
Return dict of wordtoid from raw text data If max_size is specified, vocab is truncated to set of highest frequency words within size.
def build_vocab(raw_data, max_size=None): data = [w for doc in tokenize_keras(raw_data) for w in doc] counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) if max_size: count_pairs = count_pairs[:max_size] words, _ = list(zip(*count_pairs)) word_to_id = dict(zip(words, range(len(words)))) word_to_id[UNKNOWN_WORD] = len(word_to_id) word_to_id[PAD_WORD] = len(word_to_id) return word_to_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def build_vocab(sentences, max_num_words):\n # Build vocabulary\n wo...
[ "0.6375627", "0.6333941", "0.63053066", "0.6265616", "0.6252101", "0.6250396", "0.62214345", "0.6192168", "0.6157442", "0.60978895", "0.6085281", "0.6050244", "0.6039959", "0.6008718", "0.6007198", "0.6006812", "0.59851426", "0.59756815", "0.59671265", "0.5960956", "0.5825441...
0.7943447
0
Convert raw text data into integer ids
def raw_to_ids(raw_data, word_to_id): docs = tokenize_keras(raw_data) uid = word_to_id[UNKNOWN_WORD] return [[word_to_id.get(w, uid) for w in doc] for doc in docs]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def text2ids(self, text: str, length: int):\n # Tokenize\n tokens = self.tokenizer.tokenize(text)\n token_ids = self.tokenizer.tokens2ids(tokens)\n # Padding\n while len(token_ids) < length:\n token_ids.append(0)\n # Truncate\n if len(token_ids) > length:...
[ "0.672705", "0.6686971", "0.66411066", "0.66214275", "0.6610237", "0.6563587", "0.650595", "0.6494439", "0.6406515", "0.6315634", "0.6188173", "0.6168283", "0.6166879", "0.6148879", "0.60975975", "0.6084565", "0.6077067", "0.6060543", "0.60536844", "0.60036033", "0.59868073",...
0.7270317
0
callback for when the detector has found a stop sign. Note that a distance of 0 can mean that the lidar did not pickup the stop sign at all
def stop_sign_detected_callback(self, msg): # distance of the stop sign corners = msg.corners dx = corners[3] - corners[1] dy = corners[2] - corners[0] r = dx/dy # aspect ratio rdist = np.array([.15, .20, .25, .30,.35, .40, .45, .50]) pixelheight = np.array([139, 102, 82, 64, 56, 50, 44, 40]) if dy > pixelheight[-1] and dy < pixelheight[0]: dist = np.interp(dy, pixelheight[::-1], rdist[::-1]) else: return # Get location of camera with respect to the map try: (translation,rotation) = self.tf_listener.lookupTransform('/map', '/camera', rospy.Time(0)) xcam = translation[0] ycam = translation[1] zcam = translation[2] euler = tf.transformations.euler_from_quaternion(rotation) thetacam = euler[2] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return # Get angle of robot with respect to the map try: (translation,rotation) = self.tf_listener.lookupTransform('/map', '/base_footprint', rospy.Time(0)) euler = tf.transformations.euler_from_quaternion(rotation) thetarobot = euler[2] except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException): return # Now we have pose of robot, we want to determine stop sign angle relative # to camera frame thstopsign = (wrapToPi(msg.thetaright) + wrapToPi(msg.thetaleft))/2. zstopsign = dist*np.cos(-thstopsign) xstopsign = dist*np.sin(-thstopsign) x = xcam + xstopsign*np.cos(thetacam) - zstopsign*np.sin(thetacam) y = ycam + xstopsign*np.sin(thetacam) + zstopsign*np.cos(thetacam) # Now that we have x and y coord of stop sign in world frame, append coord found = False for i in range(len(self.stopSigns[0])): xcur = self.stopSigns[0][i] ycur = self.stopSigns[1][i] thetarobotcur = self.stopSigns[2][i] distance = np.sqrt((x - xcur)**2 + (y - ycur)**2) n = self.stopSignCounts[i] if distance < .2: if n < 100: # We have found the same stop sign as before xnew = (n/(n+1.))*xcur + (1./(n+1))*x ynew = (n/(n+1.))*ycur + (1./(n+1))*y thetarobotnew = (n/(n+1.))*thetarobotcur + (1./(n+1))*thetarobot self.stopSigns[0][i] = xnew self.stopSigns[1][i] = ynew self.stopSigns[2][i] = thetarobotnew self.stopSignCounts[i] += 1 found = True if not found: # Found a new one, append it self.stopSigns[0].append(x) self.stopSigns[1].append(y) self.stopSigns[2].append(thetarobot) self.stopSignCounts.append(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _detect_stop(func):\n def wrapper(*args,**kwargs):\n self = args[0]\n self.episode_length -= 1\n if self.episode_length <=0:\n \"\"\"if the episode is end\"\"\"\n self.end = True\n else:\n if self.adsorption:\n ...
[ "0.59943485", "0.56399006", "0.5583566", "0.5494318", "0.54302335", "0.54221815", "0.5411941", "0.5367256", "0.53257966", "0.53016096", "0.5254286", "0.52245134", "0.5204961", "0.5185182", "0.5171382", "0.5162159", "0.5156524", "0.51438564", "0.5138958", "0.51210594", "0.5090...
0.70093864
0
Do not return anything, modify board inplace instead.
def gameOfLife(self, board: List[List[int]]) -> None: if not board or len(board)==0: return rows = len(board) cols = len(board[0]) #lives = 0 for i in range(rows): for j in range(cols): lives = self.n_neighbors(board,i,j) # Rule 1 and Rule 3 if board[i][j]==1 and (lives <2 or lives >3): board[i][j]= 2 # -1 signifies the cell is now dead but originally was live. if board[i][j]== 0 and lives ==3: board[i][j]=3 # signifies the cell is now live but was originally dead. for i in range(rows): for j in range(cols): board[i][j] = board[i][j]%2 return board
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyMove(self, (from_row,from_col), (to_row,to_col)):\n newboard = deepcopy(self)\n piece = newboard.board[from_row][from_col]\n newboard.board[from_row][from_col] = None\n newboard.board[to_row][to_col] = piece\n newboard.toplay = 'BLACK' if self.toplay == 'WHITE' else 'WHI...
[ "0.7664789", "0.70971596", "0.70476234", "0.7024398", "0.69861203", "0.6829364", "0.6778242", "0.6727011", "0.66938925", "0.6692377", "0.6625323", "0.6625323", "0.65893996", "0.6565318", "0.6564258", "0.65587217", "0.6539339", "0.65249294", "0.65223134", "0.6520972", "0.65202...
0.0
-1
Helper method to create torch.DoubleTensor from pandas DataFrame. Upon creating the tensor, the data is copied to a new memory location. Hence, modifying the tensor won't affect the pandas DataFrame.
def create_tensor(self, idx): sample_tensor = torch.zeros( (self.n_dim, self.sample_lengths[idx].max()), dtype=torch.double) for i, col in enumerate(self.samples.columns): # In rare cases, a sample has different length across it's dimensions dim_length = len(self.samples[col][idx]) # Create zero-padded torch.Tensor using data from pandas DataFrame sample_tensor[i, 0:dim_length] = torch.tensor( self.samples[col][idx], dtype=torch.double) return sample_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_tensor(self, df, target=False):\n if target:\n return torch.LongTensor(df.values)\n return torch.FloatTensor(df.values)", "def transform(self, dataframe: DataFrame) -> DataFrame:", "def as_dataframe(self, dframe: pd.DataFrame, reset_index: bool = False):\n if rese...
[ "0.61371243", "0.571318", "0.5686274", "0.55425775", "0.55360436", "0.5491242", "0.541506", "0.5355122", "0.5336436", "0.5318567", "0.5315715", "0.5312427", "0.5299223", "0.5269183", "0.52476364", "0.5246885", "0.5219819", "0.52038544", "0.51658046", "0.51648957", "0.51505226...
0.5500474
5
Custom collate_fn that is called with list of multivariate samples to yield a minibatch It preserves the data structure, e.g., if each sample is a dictionary, it outputs a dictionary with the same set of keys but batched Tensors as values (or lists if the values can not be converted into Tensors).
def collate_fn(sample_list): x_ref_batch = [] x_pos_batch = [] x_negs_batch = [] label_batch = [] for sample in sample_list: x_ref_batch.append(sample["x_ref"]) x_pos_batch.append(sample["x_pos"]) x_negs_batch.append(sample["x_negs"]) label_batch.append(sample["label"]) # Use torch API for RNNs to pad samples to fixed length, L, and stack them in batch-tensor of dim (B,n_dim,L). x_ref_batch = pad_sequence( x_ref_batch, batch_first=True, padding_value=0) # (B,L,n_dim) x_ref_batch = x_ref_batch.transpose(1, 2) # (B,n_dim,L) x_pos_batch = pad_sequence( x_pos_batch, batch_first=True, padding_value=0) # (B,L,n_dim) x_pos_batch = x_pos_batch.transpose(1, 2) # (B,n_dim,L) # Pad neg tensors with varying length of first dim L, and produce batch (B,K,n_dim,L') where L' is padded length x_negs_batch = pad_sequence(x_negs_batch, batch_first=True, padding_value=0) # (B, L', K, n_dim) x_negs_batch = x_negs_batch.transpose(1, 2) # (B, K, L', n_dim) x_negs_batch = x_negs_batch.transpose(2, 3) # (B, K, n_dim, L') return { 'x_ref': x_ref_batch, 'x_pos': x_pos_batch, 'x_negs': x_negs_batch, 'label': label_batch }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collate_fn(batch):\n\n flattened_batch = []\n for data in batch:\n num_examples = len(data['image'])\n for i in range(num_examples):\n flattened_batch.append({\n k: v[i] for k, v in data.items()\n })\n\n return default_collate(flattened_batch)", "de...
[ "0.71019036", "0.6699454", "0.66386545", "0.6537352", "0.65236694", "0.6492617", "0.64010304", "0.6400702", "0.6383652", "0.6318756", "0.62709737", "0.6211533", "0.6105718", "0.59736997", "0.59524226", "0.5950687", "0.5934938", "0.5909209", "0.5867133", "0.58421636", "0.57569...
0.67624515
1
Import a function from a full module path
def import_from(full_name): module_name, function_name = full_name.rsplit('.', 1) mod = import_module(module_name) return getattr(mod, function_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_module(self, location, name):", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)", "def load_function(path):\r\n module_path, _, name = path.rpartition('.')\r\n return getattr(import_module(module_path), name)"...
[ "0.7085891", "0.698463", "0.698463", "0.698463", "0.69600534", "0.69119257", "0.6854556", "0.6701928", "0.6670122", "0.6668526", "0.6647451", "0.66429377", "0.66402143", "0.6558814", "0.6548901", "0.652248", "0.6475477", "0.6475477", "0.6475477", "0.6468391", "0.6468391", "...
0.73028684
0
Creates a new RedisBloom client.
def __init__(self, *args, **kwargs): Redis.__init__(self, *args, **kwargs) # Set the module commands' callbacks MODULE_CALLBACKS = { self.BF_RESERVE : bool_ok, #self.BF_ADD : spaceHolder, #self.BF_MADD : spaceHolder, #self.BF_INSERT : spaceHolder, #self.BF_EXISTS : spaceHolder, #self.BF_MEXISTS : spaceHolder, #self.BF_SCANDUMP : spaceHolder, #self.BF_LOADCHUNK : spaceHolder, self.BF_INFO : BFInfo, self.CF_RESERVE : bool_ok, #self.CF_ADD : spaceHolder, #self.CF_ADDNX : spaceHolder, #self.CF_INSERT : spaceHolder, #self.CF_INSERTNX : spaceHolder, #self.CF_EXISTS : spaceHolder, #self.CF_DEL : spaceHolder, #self.CF_COUNT : spaceHolder, #self.CF_SCANDUMP : spaceHolder, #self.CF_LOADCHUNK : spaceHolder, self.CF_INFO : CFInfo, self.CMS_INITBYDIM : bool_ok, self.CMS_INITBYPROB : bool_ok, #self.CMS_INCRBY : spaceHolder, #self.CMS_QUERY : spaceHolder, self.CMS_MERGE : bool_ok, self.CMS_INFO : CMSInfo, self.TOPK_RESERVE : bool_ok, self.TOPK_ADD : parseToList, #self.TOPK_QUERY : spaceHolder, #self.TOPK_COUNT : spaceHolder, self.TOPK_LIST : parseToList, self.TOPK_INFO : TopKInfo, } for k, v in six.iteritems(MODULE_CALLBACKS): self.set_response_callback(k, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_rbclient(self):\n return RBClient(url=self.TEST_SERVER_URL,\n transport_cls=URLMapTransport)", "def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use t...
[ "0.57075137", "0.5469258", "0.5429889", "0.5429889", "0.5411458", "0.5335622", "0.5313152", "0.5284318", "0.52696276", "0.52184993", "0.51899725", "0.5167407", "0.5128039", "0.51219803", "0.5082389", "0.50596845", "0.5056017", "0.505512", "0.5052801", "0.50405574", "0.5034328...
0.50316715
21
Creates a new Bloom Filter ``key`` with desired probability of false positives ``errorRate`` expected entries to be inserted as ``capacity``. Default expansion value is 2. By default, filter is autoscaling.
def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None): params = [key, errorRate, capacity] self.appendExpansion(params, expansion) self.appendNoScale(params, noScale) return self.execute_command(self.BF_RESERVE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None):\n params = [key, capacity]\n self.appendExpansion(params, expansion)\n self.appendBucketSize(params, bucket_size)\n self.appendMaxIterations(params, max_iterations)\n\n return self.execute_c...
[ "0.6714165", "0.6105351", "0.6015295", "0.573277", "0.5619034", "0.55716175", "0.55599296", "0.5556067", "0.5556067", "0.5556067", "0.5469874", "0.5467277", "0.54228985", "0.5409745", "0.54063326", "0.5370435", "0.53439593", "0.52427536", "0.5227065", "0.52156657", "0.5140086...
0.7681637
0
Adds to a Bloom Filter ``key`` an ``item``.
def bfAdd(self, key, item): params = [key, item] return self.execute_command(self.BF_ADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i...
[ "0.7277136", "0.7030493", "0.6990854", "0.6836308", "0.66995275", "0.66974354", "0.66621774", "0.66100615", "0.65638524", "0.6553088", "0.6477446", "0.6454946", "0.640302", "0.640302", "0.63999856", "0.63677585", "0.63595116", "0.6356604", "0.63035226", "0.6302042", "0.625696...
0.8320729
0
Adds to a Bloom Filter ``key`` multiple ``items``.
def bfMAdd(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i...
[ "0.75810397", "0.6952061", "0.6553019", "0.6400322", "0.6362373", "0.63382643", "0.6300669", "0.62907505", "0.62171847", "0.6120615", "0.6111401", "0.61021876", "0.6070599", "0.6009563", "0.59573513", "0.5943457", "0.59371245", "0.5929365", "0.59166753", "0.59138656", "0.5866...
0.588327
20
Adds to a Bloom Filter ``key`` multiple ``items``. If ``nocreate`` remain ``None`` and ``key does not exist, a new Bloom Filter ``key`` will be created with desired probability of false positives ``errorRate`` and expected entries to be inserted as ``size``.
def bfInsert(self, key, items, capacity=None, error=None, noCreate=None, expansion=None, noScale=None): params = [key] self.appendCapacity(params, capacity) self.appendError(params, error) self.appendExpansion(params, expansion) self.appendNoCreate(params, noCreate) self.appendNoScale(params, noScale) self.appendItems(params, items) return self.execute_command(self.BF_INSERT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n\t\t#super(CountingBloomFilter, self).add(key)\n\t\t#super(CountingBloomFilter, self).generateStats()\n\t\tfor i in self.getBitArrayIndices(key):\n\t\t\tself.ba[i...
[ "0.6203647", "0.6000729", "0.5843988", "0.57451206", "0.5711649", "0.56672007", "0.5634117", "0.5580528", "0.54817045", "0.541424", "0.5405912", "0.53877556", "0.53802043", "0.5330853", "0.53062", "0.52977407", "0.5295973", "0.5289593", "0.52855736", "0.5276881", "0.5263222",...
0.6028828
1
Checks whether an ``item`` exists in Bloom Filter ``key``.
def bfExists(self, key, item): params = [key, item] return self.execute_command(self.BF_EXISTS, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def contains(self, item):\n for h_num in xrang...
[ "0.77040607", "0.76314753", "0.75677925", "0.7561435", "0.75477517", "0.7416592", "0.7416592", "0.7373087", "0.73644096", "0.7320269", "0.7269573", "0.71763974", "0.7118146", "0.70699257", "0.7025244", "0.6994611", "0.6993277", "0.6964525", "0.6963223", "0.6961354", "0.695409...
0.83642733
0
Checks whether ``items`` exist in Bloom Filter ``key``.
def bfMExists(self, key, *items): params = [key] params += items return self.execute_command(self.BF_MEXISTS, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def item_exists(item_id):\n return item_id in all_items", "def __contains__(self, items):\n if type(items) != list:\n raise PJFInvalidType(items, list)\n ...
[ "0.73868114", "0.73612565", "0.7006057", "0.6978514", "0.6682587", "0.6636079", "0.6593363", "0.6593363", "0.6583643", "0.65689707", "0.6546252", "0.6544462", "0.6530537", "0.6525098", "0.6517376", "0.65107256", "0.64853024", "0.6446855", "0.6445891", "0.64283794", "0.6422517...
0.77390194
0
Begins an incremental save of the bloom filter ``key``. This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of ``iter`` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
def bfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.BF_SCANDUMP, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.CF_SCANDUMP, *params)", "def save(self) -> None:\n self._bin_iter.save()", "def save(self) -> dict:\n for pair in self._buffer:\n yield pair.save()", "def _iter(self, ke...
[ "0.5560349", "0.5399405", "0.5302348", "0.5262011", "0.5024422", "0.50054467", "0.49560073", "0.49460107", "0.47173524", "0.4702185", "0.4692565", "0.46744853", "0.46606264", "0.4658389", "0.46374193", "0.45671514", "0.45634228", "0.45548865", "0.45259008", "0.45051858", "0.4...
0.6080377
0
Restores a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage. This command will overwrite any bloom filter stored under key. Ensure that the bloom filter will not be modified between invocations.
def bfLoadChunk(self, key, iter, data): params = [key, iter, data] return self.execute_command(self.BF_LOADCHUNK, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeAutoSaveRestoreFilter(filter):", "def addAutoSaveRestoreFilter(filter):", "def highpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(self._highpass_sos)\n print('Zi shape: ', zi.shape, data.shape)\n self._highpass_state = data[0, :] * np.repeat(zi[:, :, np.newaxis],\n ...
[ "0.5884355", "0.57030326", "0.5571721", "0.5458497", "0.54263365", "0.5365855", "0.53208745", "0.5198635", "0.50720936", "0.5028035", "0.5008022", "0.49658895", "0.4942818", "0.4931641", "0.48837966", "0.4855681", "0.4792813", "0.47841853", "0.47665617", "0.4763505", "0.46963...
0.0
-1
Returns capacity, size, number of filters, number of items inserted, and expansion rate.
def bfInfo(self, key): return self.execute_command(self.BF_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def capacity(self):\n return sum(f.capacity for f in self.filters)", "def capacity(self):\n raise NotImplementedError()", "def capacity(self):\n return self.buffer_capacity.mean(dim=1)", "def Capacity(self) -> int:", "def get_capacity():\n fs.get_capacity()", "def capacity_used(se...
[ "0.75668544", "0.6732171", "0.6672159", "0.6671824", "0.6568066", "0.6558056", "0.64776075", "0.6394633", "0.62894094", "0.6270864", "0.6231363", "0.6148819", "0.6145135", "0.6037437", "0.6036127", "0.59975326", "0.5950704", "0.59222466", "0.59161794", "0.5884257", "0.5837646...
0.0
-1
Creates a new Cuckoo Filter ``key`` an initial ``capacity`` items.
def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None): params = [key, capacity] self.appendExpansion(params, expansion) self.appendBucketSize(params, bucket_size) self.appendMaxIterations(params, max_iterations) return self.execute_command(self.CF_RESERVE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, capacity=100):\n \n self.capacity = capacity\n self.size = 0\n self._keys = []\n self._entry = [[] for _ in range(capacity)]", "def __init__(self, capacity):\n self.capacity = capacity # Number of buckets in the hash table\n self.storage = [Non...
[ "0.63611585", "0.6257026", "0.6124389", "0.6099812", "0.5895444", "0.5895253", "0.58910316", "0.58668506", "0.58037305", "0.5686704", "0.5662281", "0.5616573", "0.5591175", "0.5572409", "0.5540789", "0.5507857", "0.54945195", "0.5490829", "0.5490829", "0.5490829", "0.5427748"...
0.675632
0
Adds an ``item`` to a Cuckoo Filter ``key``.
def cfAdd(self, key, item): params = [key, item] return self.execute_command(self.CF_ADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[it...
[ "0.7453558", "0.6998339", "0.6921159", "0.69202787", "0.6884508", "0.6801688", "0.6676187", "0.655837", "0.65446305", "0.65341115", "0.64497375", "0.63943213", "0.63943213", "0.63741636", "0.63548404", "0.63431346", "0.6286365", "0.6278622", "0.62722087", "0.6255055", "0.6229...
0.7630527
0
Adds an ``item`` to a Cuckoo Filter ``key`` only if item does not yet exist. Command might be slower that ``cfAdd``.
def cfAddNX(self, key, item): params = [key, item] return self.execute_command(self.CF_ADDNX, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add(self, key):\n if key in self:\n ...
[ "0.7416731", "0.7239735", "0.6776708", "0.65681887", "0.65602005", "0.6497649", "0.64799464", "0.6416331", "0.6377396", "0.63700354", "0.6295288", "0.6295288", "0.6282548", "0.62538064", "0.62458456", "0.6231878", "0.6221746", "0.6212229", "0.61935914", "0.6185903", "0.615960...
0.6687297
3
Adds multiple ``items`` to a Cuckoo Filter ``key``, allowing the filter to be created with a custom ``capacity` if it does not yet exist. ``items`` must be provided as a list.
def cfInsert(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def add_items(self, items):\n for item in it...
[ "0.60938436", "0.6008798", "0.5990414", "0.58841145", "0.58353513", "0.5737808", "0.5727663", "0.57137096", "0.565862", "0.56526506", "0.5644674", "0.56005716", "0.5593892", "0.5568903", "0.55669373", "0.55430824", "0.55396664", "0.55076975", "0.5490069", "0.54595166", "0.539...
0.6239115
0
Adds multiple ``items`` to a Cuckoo Filter ``key`` only if they do not exist yet, allowing the filter to be created with a custom ``capacity` if it does not yet exist. ``items`` must be provided as a list.
def cfInsertNX(self, key, items, capacity=None, nocreate=None): params = [key] self.appendCapacity(params, capacity) self.appendNoCreate(params, nocreate) self.appendItems(params, items) return self.execute_command(self.CF_INSERTNX, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfInsert(self, key, items, capacity=None, nocreate=None):\n params = [key]\n self.appendCapacity(params, capacity)\n self.appendNoCreate(params, nocreate)\n self.appendItems(params, items)\n\n return self.execute_command(self.CF_INSERT, *params)", "def bfInsert(self, key, i...
[ "0.59240794", "0.5758097", "0.56953305", "0.566628", "0.5583825", "0.54426944", "0.5348594", "0.5331868", "0.53105265", "0.5299334", "0.5295137", "0.5279402", "0.5263114", "0.52315265", "0.5224183", "0.52107865", "0.52102387", "0.5190047", "0.51768357", "0.5145545", "0.513705...
0.5301296
9
Checks whether an ``item`` exists in Cuckoo Filter ``key``.
def cfExists(self, key, item): params = [key, item] return self.execute_command(self.CF_EXISTS, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def bfExists(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_EXISTS, *params)", "def item_has_key(self, item, key):\n if key in self._reverse...
[ "0.77499324", "0.746354", "0.74272346", "0.73767465", "0.7316772", "0.7260363", "0.72131723", "0.72131723", "0.72055244", "0.7191276", "0.70799226", "0.7073482", "0.70224476", "0.701648", "0.6999903", "0.69994754", "0.6997449", "0.6991005", "0.6959211", "0.69314", "0.69124043...
0.76099366
1
Deletes ``item`` from ``key``.
def cfDel(self, key, item): params = [key, item] return self.execute_command(self.CF_DEL, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_item(self, key, item):\n self[key].remove(item)\n self._remove_reverse_mapping(item, key)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.delete(key)", "def __delitem__(self, key):\n self.f_remove(key)", "def delete_item(...
[ "0.78592277", "0.7817014", "0.7817014", "0.7764789", "0.77646744", "0.7654799", "0.76423573", "0.75042117", "0.7504201", "0.74854916", "0.7483856", "0.7433788", "0.74314106", "0.73982894", "0.73960876", "0.7380565", "0.73796886", "0.7366733", "0.73262364", "0.73262364", "0.73...
0.7879571
0
Returns the number of times an ``item`` may be in the ``key``.
def cfCount(self, key, item): params = [key, item] return self.execute_command(self.CF_COUNT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def getKeyCount(self,\n key):\n if (self.hasKey(key) == 1):\n return self.__keyCount[key]\n else:\n return 0", "def count(self, item):...
[ "0.754235", "0.7415456", "0.7260023", "0.71076053", "0.7074687", "0.6912356", "0.6888656", "0.6855236", "0.6669268", "0.666577", "0.6609228", "0.65588135", "0.652157", "0.650688", "0.6440919", "0.6308882", "0.62916195", "0.6257994", "0.62517", "0.6243789", "0.6169287", "0.6...
0.67729896
8
Begins an incremental save of the Cuckoo filter ``key``. This is useful for large Cuckoo filters which cannot fit into the normal SAVE and RESTORE model. The first time this command is called, the value of ``iter`` should be 0. This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
def cfScandump(self, key, iter): params = [key, iter] return self.execute_command(self.CF_SCANDUMP, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfScandump(self, key, iter):\n params = [key, iter]\n \n return self.execute_command(self.BF_SCANDUMP, *params)", "def save(self) -> dict:\n for pair in self._buffer:\n yield pair.save()", "def __iter__(self):\r\n for item in self._data:\r\n yield it...
[ "0.557857", "0.5078635", "0.49877542", "0.4933926", "0.4932952", "0.48787275", "0.47174305", "0.46994108", "0.4630205", "0.46070197", "0.45404267", "0.45206273", "0.4477999", "0.44704136", "0.4470278", "0.44687983", "0.44679555", "0.4457995", "0.44183904", "0.44140702", "0.43...
0.5583304
0
Restores a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage. This command will overwrite any Cuckoo filter stored under key. Ensure that the Cuckoo filter will not be modified between invocations.
def cfLoadChunk(self, key, iter, data): params = [key, iter, data] return self.execute_command(self.CF_LOADCHUNK, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def removeAutoSaveRestoreFilter(filter):", "def addAutoSaveRestoreFilter(filter):", "def removeAutoSaveFilter(filter):", "def do_reset(self, args):\n\t\tself.parent.filter = {}\n\t\tself.apply_filter()\n\t\tself._update_prompts()", "def highpass_filter_reset(self, data):\n zi = scipy.signal.sosfilt_zi(s...
[ "0.608854", "0.5897821", "0.56328", "0.5349189", "0.5334587", "0.5307931", "0.5285011", "0.52552176", "0.503992", "0.498326", "0.4977669", "0.49759644", "0.48976877", "0.48970455", "0.48909724", "0.48817316", "0.4835844", "0.47675633", "0.47412106", "0.47311813", "0.47181708"...
0.0
-1
Returns size, number of buckets, number of filter, number of items inserted, number of items deleted, bucket size, expansion rate, and max iteration.
def cfInfo(self, key): return self.execute_command(self.CF_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def length(self):\n # TODO: Count number of key-value entries in each of the buckets\n return self.size\n # for bucket in self.buckets():", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n ...
[ "0.6689139", "0.6531792", "0.6451731", "0.639494", "0.6214589", "0.6209066", "0.6115226", "0.61036277", "0.5979696", "0.5947789", "0.5932758", "0.5926838", "0.5926114", "0.59038323", "0.59038323", "0.58981425", "0.5884746", "0.5877877", "0.5871863", "0.5868464", "0.58302355",...
0.0
-1
Initializes a CountMin Sketch ``key`` to dimensions (``width``, ``depth``) specified by user.
def cmsInitByDim(self, key, width, depth): params = [key, width, depth] return self.execute_command(self.CMS_INITBYDIM, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, k:int, **kwargs):\n self.k = k", "def __init__(self, width = 40):\n self.width = width\n self.state = 0\n self.total = 0", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def __init__(self, k_d, k_s=0., p=20., k_m=0., k_a=None)...
[ "0.5303504", "0.5293798", "0.52392584", "0.5238729", "0.52039605", "0.516473", "0.51517797", "0.5144335", "0.5137341", "0.5133678", "0.51300824", "0.5129396", "0.51235527", "0.5118941", "0.508737", "0.5040831", "0.5036856", "0.50319624", "0.50160795", "0.50102866", "0.5008397...
0.7107055
0
Initializes a CountMin Sketch ``key`` to characteristics (``error``, ``probability``) specified by user.
def cmsInitByProb(self, key, error, probability): params = [key, error, probability] return self.execute_command(self.CMS_INITBYPROB, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(X, k):\n if not isinstance(X, np.ndarray) or X.ndim != 2:\n return None, None, None\n if not isinstance(k, int) or k <= 0:\n return None, None, None\n _, d = X.shape\n C, clss = kmeans(X, k)\n pi = 1 / k * np.ones(k)\n m = C\n S = np.array([np.identity(d)] * k)\n ...
[ "0.5790586", "0.5750808", "0.55292356", "0.5522919", "0.5522306", "0.53946745", "0.5369128", "0.53256714", "0.5308711", "0.5307491", "0.5304574", "0.52648705", "0.5253688", "0.5247738", "0.52242655", "0.52212495", "0.52023345", "0.518744", "0.5175941", "0.51755065", "0.516966...
0.6565891
0
Adds/increases ``items`` to a CountMin Sketch ``key`` by ''increments''. Both ``items`` and ``increments`` are lists. Example cmsIncrBy('A', ['foo'], [1])
def cmsIncrBy(self, key, items, increments): params = [key] self.appendItemsAndIncrements(params, items, increments) return self.execute_command(self.CMS_INCRBY, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def incr(self, key, delta=1, callback=None):\n self._incrdecr(\"incr\", key, delta, callback=callback)", "def incr(self, key, delta=1):\n\t\treturn self._incrdecr(\"incr\", key, delta)", "def increase(self, key:str) -> None:\n\n hash_key = self.hash_key(key)\n head = self.array[hash_key] ...
[ "0.5571012", "0.54712886", "0.5445248", "0.53391284", "0.53194755", "0.53172624", "0.5278319", "0.52269477", "0.52106184", "0.51903224", "0.5171463", "0.5151687", "0.5137558", "0.5133662", "0.5103444", "0.5102637", "0.5099352", "0.5090726", "0.5089509", "0.5078452", "0.507450...
0.815581
0
Returns count for an ``item`` from ``key``. Multiple items can be queried with one call.
def cmsQuery(self, key, *items): params = [key] params += items return self.execute_command(self.CMS_QUERY, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfCount(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_COUNT, *params)", "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def topkCount(self, key, *items):\n params = [key]\n ...
[ "0.7717446", "0.72450763", "0.72318083", "0.68318814", "0.6799004", "0.6794261", "0.66601163", "0.65616006", "0.6446357", "0.64149535", "0.63869673", "0.63530505", "0.6233722", "0.62298226", "0.5982605", "0.597717", "0.59531206", "0.5869809", "0.5767538", "0.57387173", "0.570...
0.0
-1
Merges ``numKeys`` of sketches into ``destKey``. Sketches specified in ``srcKeys``. All sketches must have identical width and depth. ``Weights`` can be used to multiply certain sketches. Default weight is 1. Both ``srcKeys`` and ``weights`` are lists.
def cmsMerge(self, destKey, numKeys, srcKeys, weights=[]): params = [destKey, numKeys] params += srcKeys self.appendWeights(params, weights) return self.execute_command(self.CMS_MERGE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copySkinWeights(*args, destinationSkin: Union[AnyStr, bool]=\"\", influenceAssociation:\n Union[AnyStr, List[AnyStr], bool]=\"\", mirrorInverse: bool=True, mirrorMode:\n Union[AnyStr, bool]=\"\", noBlendWeight: bool=True, noMirror: bool=True,\n normalize...
[ "0.549154", "0.5410098", "0.520968", "0.49663427", "0.4958788", "0.4947422", "0.48904583", "0.4840362", "0.4815075", "0.47568074", "0.47536424", "0.47400427", "0.4730924", "0.4675684", "0.46377957", "0.46358636", "0.46322548", "0.46215504", "0.4620774", "0.46132562", "0.46081...
0.71798104
0
Returns width, depth and total count of the sketch.
def cmsInfo(self, key): return self.execute_command(self.CMS_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_depth_shape(self):\n width = -1\n height = -1\n for (serial, device) in self._enabled_devices.items():\n for stream in device.pipeline_profile.get_streams():\n if (rs.stream.depth == stream.stream_type()):\n width = stream.as_video_stream_pr...
[ "0.68444043", "0.6686202", "0.65267235", "0.6432864", "0.6241753", "0.62116355", "0.61252075", "0.60722893", "0.60547465", "0.6041557", "0.5984962", "0.59782183", "0.5959491", "0.5959491", "0.59516287", "0.58823055", "0.5880708", "0.58763885", "0.58660775", "0.5841817", "0.58...
0.0
-1
Creates a new Cuckoo Filter ``key`` with desired probability of false positives ``errorRate`` expected entries to be inserted as ``size``.
def topkReserve(self, key, k, width, depth, decay): params = [key, k, width, depth, decay] return self.execute_command(self.TOPK_RESERVE, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfCreate(self, key, errorRate, capacity, expansion=None, noScale=None):\n params = [key, errorRate, capacity]\n self.appendExpansion(params, expansion)\n self.appendNoScale(params, noScale)\n\n return self.execute_command(self.BF_RESERVE, *params)", "def __init__(self, key_size=10...
[ "0.5268055", "0.5220767", "0.51832116", "0.51610947", "0.5103992", "0.5081258", "0.4993609", "0.4992658", "0.49491334", "0.49001232", "0.4897395", "0.48936903", "0.47643712", "0.4748526", "0.47471568", "0.47449547", "0.4744873", "0.47413242", "0.47407177", "0.473573", "0.4728...
0.0
-1
Adds one ``item`` or more to a Cuckoo Filter ``key``.
def topkAdd(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_ADD, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.CF_ADD, *params)", "def bfAdd(self, key, item):\n params = [key, item]\n \n return self.execute_command(self.BF_ADD, *params)", "def filter_matches_add(self, key, value):\n\t\tif k...
[ "0.72713625", "0.7200462", "0.70165783", "0.67412406", "0.6649933", "0.6583354", "0.6483099", "0.6472438", "0.63995546", "0.6359965", "0.623782", "0.6182844", "0.6171349", "0.6163423", "0.61477727", "0.61084974", "0.61084974", "0.61084974", "0.61075824", "0.60661644", "0.6011...
0.6077388
19
Checks whether one ``item`` or more is a TopK item at ``key``.
def topkQuery(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_QUERY, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, item, key):\n ndx = self._findPostion(key)\n return ndx is not None", "def item_has_key(self, item, key):\n if key in self._reverse_store[item]:\n return True\n else:\n return False", "def has_item(self, usage_key):\r\n try:\r\n ...
[ "0.634382", "0.6114394", "0.611171", "0.57911634", "0.570563", "0.5608097", "0.5595988", "0.5594606", "0.5594606", "0.55808914", "0.5543288", "0.55300564", "0.5517709", "0.5510101", "0.5477933", "0.545195", "0.54484284", "0.5443446", "0.54301524", "0.53889227", "0.5388687", ...
0.60485935
3
Returns count for one ``item`` or more from ``key``.
def topkCount(self, key, *items): params = [key] params += items return self.execute_command(self.TOPK_COUNT, *params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cfCount(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_COUNT, *params)", "def count(self, item):\n if item in self: \n return self[item]\n else: \n return 0", "def getKeyCount(self,\n key):\n if (s...
[ "0.7219808", "0.7189522", "0.71250147", "0.68928266", "0.67245823", "0.6536939", "0.6466635", "0.64591026", "0.643793", "0.63102275", "0.6303591", "0.62909424", "0.62568396", "0.6254312", "0.61354196", "0.61354196", "0.6092566", "0.6090408", "0.6070197", "0.599113", "0.596527...
0.71737325
2
Return full list of items in TopK list of ``key```.
def topkList(self, key): return self.execute_command(self.TOPK_LIST, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_for_key(self, key) -> list:\n return [res[key] for res in self.list]", "def get_list(key):\n ret = hookenv.action_get(key)\n return ret.split() if ret else []", "def topkQuery(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_comman...
[ "0.6962184", "0.657635", "0.63970435", "0.631337", "0.6151614", "0.6138156", "0.6085079", "0.6063242", "0.59806406", "0.59505635", "0.59002477", "0.58530796", "0.5838963", "0.58134776", "0.57977164", "0.57843095", "0.5766967", "0.57466274", "0.57404083", "0.5712583", "0.57029...
0.82646406
0
Returns k, width, depth and decay values of ``key``.
def topkInfo(self, key): return self.execute_command(self.TOPK_INFO, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def k(self):\n return self._k", "def k(self):\n return self._k", "def k(self):\n return self._k", "def _cache_key_from_kvs_key(self, key):\r\n if key.scope == Scope.user_state:\r\n return (key.scope, key.block_scope_id)\r\n elif key.scope == Scope.user_state_summ...
[ "0.537914", "0.537914", "0.537914", "0.53716195", "0.5363166", "0.53627735", "0.53413373", "0.5331749", "0.5288173", "0.52487564", "0.52258396", "0.51713914", "0.5165139", "0.5122881", "0.51025707", "0.5095331", "0.5091592", "0.50683814", "0.5058652", "0.4994853", "0.49159655...
0.0
-1
Return a new pipeline object that can queue multiple commands for later execution. ``transaction`` indicates whether all commands should be executed atomically. Apart from making a group of operations atomic, pipelines are useful for reducing the backandforth overhead between the client and server. Overridden in order to provide the right client through the pipeline.
def pipeline(self, transaction=True, shard_hint=None): p = Pipeline( connection_pool=self.connection_pool, response_callbacks=self.response_callbacks, transaction=transaction, shard_hint=shard_hint) return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipeline(self, transaction=True, shard_hint=None):\n p = AsyncPipeline(\n connection_pool=self.client.connection_pool,\n response_callbacks=self._MODULE_CALLBACKS,\n transaction=transaction,\n shard_hint=shard_hint,\n )\n p.index_name = self.inde...
[ "0.6506651", "0.6506439", "0.60975367", "0.5716096", "0.5687337", "0.5610756", "0.5471087", "0.5375021", "0.5373202", "0.5361515", "0.53164816", "0.52698827", "0.5247132", "0.5190451", "0.5172265", "0.51619154", "0.5102271", "0.5086458", "0.5023004", "0.5023004", "0.49856988"...
0.7290739
0
This will call the parent class to validate the connection and initialize the values
def __init__(self, query_params=None, equipment=None, module=None): super().__init__() self.equipment = equipment self.module = module self.query_params = query_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_connection(self, connection):", "def __init__(self):\n\n\t\tself.connection = self.get_connection()", "def __init__(self, connection):\n self.conn = connection", "def __init__(self, connection):\n\n self._conn = connection", "def init(self, userdata, conn):\r\n pass", "def _...
[ "0.74869585", "0.7371334", "0.7031037", "0.69948906", "0.6977147", "0.68673927", "0.68106675", "0.6798663", "0.6792512", "0.67900044", "0.6789289", "0.6721082", "0.66627276", "0.6661784", "0.66280216", "0.6615395", "0.6609552", "0.65898615", "0.658387", "0.65413547", "0.64912...
0.0
-1
This will return the graph data for the outage module
def get_outage(self): try: assert self._db_connection, { STATUS_KEY: HTTP_500_INTERNAL_SERVER_ERROR, MESSAGE_KEY: DB_ERROR} if self.equipment == COKE_DRUM_VALUE and self.module == OUTAGE_VALUE: """ This will return the graph data for the selected outage module """ query_params = { TAG_NAME_REQUEST: self.query_params.GET[TAG_NAME_REQUEST], START_DATE_REQUEST: self.query_params.GET[START_DATE_REQUEST], END_DATE_REQUEST: self.query_params.GET[END_DATE_REQUEST] } MODULE_LEVEL_MULTILINE_TAG = tuple(LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH) if MULTILINE_REQUEST in self.query_params.GET: """ This will return the graph data for the actual and predicted tags for the selected outage module """ query_params[MULTILINE_REQUEST] = self.query_params.GET[MULTILINE_REQUEST] if query_params: if START_DATE_REQUEST not in query_params or not query_params[START_DATE_REQUEST] and \ MULTILINE_REQUEST not in query_params: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH_NULL_START_DATE.format( self.module, query_params[TAG_NAME_REQUEST], query_params[END_DATE_REQUEST])) elif query_params[START_DATE_REQUEST] and MULTILINE_REQUEST not in query_params: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH.format( self.module, query_params[TAG_NAME_REQUEST], query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) elif query_params[START_DATE_REQUEST] and query_params[MULTILINE_REQUEST]: if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH: graph_data = django_search_query_all( DETAILED_OUTAGE_MODULE_MULTILINE_GRAPH.format( self.module, MODULE_LEVEL_MULTILINE_TAG, query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) else: graph_data = django_search_query_all( DETAILED_OUTAGE_GRAPH.format( self.module, query_params[TAG_NAME_REQUEST], query_params[START_DATE_REQUEST], query_params[END_DATE_REQUEST])) df_data = pd.DataFrame(graph_data) min_max = django_search_query_all( MIN_MAX_DATA.format( self.module, query_params[TAG_NAME_REQUEST] )) df_min_max_data = pd.DataFrame(min_max) graph = [] if not df_data.empty: df_data = df_data.where(pd.notnull(df_data) == True, None) df_data.sort_values(TIMESTAMP_KEY, ascending=True, inplace=True) df_unit = df_data[UNIT].iloc[0] df_description = df_data[DESCRIPTION].iloc[0] df_timestamp = list(dict.fromkeys(list(df_data[TIMESTAMP_KEY]))) if query_params[TAG_NAME_REQUEST] in LIST_OF_OUTAGE_MODULE_LEVEL_MULTILINE_TAGS_GRAPH: df_result = df_data.groupby(TAG_NAME_REQUEST) actual_north_data = [] predicted_north_data = [] actual_south_data = [] predicted_south_data = [] if len(df_result) == 2: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == query_params[TAG_NAME_REQUEST]][ DESCRIPTION].iloc[0] df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_north_data = list(df_north_actual['north_drum_tag_value']) df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_north_data = list(df_north_predicted['north_drum_tag_value']) df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_south_data = list(df_south_actual['south_drum_tag_value']) df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_south_data = list(df_south_predicted['south_drum_tag_value']) elif len(df_result) == 1: if df_result[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_ACTUAL_TAG][ DESCRIPTION].iloc[0] df_north_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_north_data = list(df_north_actual['north_drum_tag_value']) df_south_actual = df_result.get_group(OUTAGE_MODULE_LEVEL_ACTUAL_TAG) actual_south_data = list(df_south_actual['south_drum_tag_value']) elif df_result[TAG_NAME_REQUEST] != OUTAGE_MODULE_LEVEL_ACTUAL_TAG: df_description = \ df_data[df_data[TAG_NAME_REQUEST] == OUTAGE_MODULE_LEVEL_PREDICTED_TAG][ DESCRIPTION].iloc[0] df_north_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_north_data = list(df_north_predicted['north_drum_tag_value']) df_south_predicted = df_result.get_group(OUTAGE_MODULE_LEVEL_PREDICTED_TAG) predicted_south_data = list(df_south_predicted['south_drum_tag_value']) temp = {"north_actual": actual_north_data, "north_predicted": predicted_north_data, "south_actual": actual_south_data, "south_predicted": predicted_south_data, "x_axis": df_timestamp, "unit": df_unit, "description": df_description} else: temp = {"y_axis": list(df_data[TAG_VALUE]), "x_axis": df_timestamp, "unit": df_unit, "description": df_description} if not df_min_max_data.empty: temp["min_data"] = df_min_max_data[MIN_VALUE].iloc[0] temp["max_data"] = df_min_max_data[MAX_VALUE].iloc[0] else: temp["min_data"] = None temp["max_data"] = None graph.append(temp) return graph except AssertionError as e: log_error("Exception due to : %s" + str(e)) return asert_res(e) except Exception as e: log_error("Exception due to : %s" + str(e)) return json_InternalServerError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_outage_graph(request, equipment_name=None, module_name=None):\r\n query_params, obj = None, None\r\n try:\r\n\r\n query_params = request\r\n\r\n except:\r\n pass\r\n\r\n try:\r\n if request.method == GET_REQUEST:\r\n obj = OutageGraph(query_params, equipment_name...
[ "0.7131809", "0.6295313", "0.62823576", "0.62179625", "0.6100377", "0.59277797", "0.5841857", "0.58356446", "0.5830653", "0.5827107", "0.5826806", "0.57876396", "0.5763705", "0.57545763", "0.56935877", "0.5691411", "0.5688853", "0.56795114", "0.5666311", "0.56479836", "0.5642...
0.7193129
0
This function will return the graph data for the selected module
def get_outage_graph(request, equipment_name=None, module_name=None): query_params, obj = None, None try: query_params = request except: pass try: if request.method == GET_REQUEST: obj = OutageGraph(query_params, equipment_name, module_name) return obj.get_outage() log_debug(METHOD_NOT_ALLOWED) return JsonResponse({MESSAGE_KEY: METHOD_NOT_ALLOWED}, status=HTTP_405_METHOD_NOT_ALLOWED) except Exception as e: excMsg = "get_outage_graph_data API : " + str(error_instance(e)) return excMsg finally: if obj: del obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_graph(self) -> dict:\n response = requests.get(self.channel, params=\"get_graph\")\n return json_to_graph(response.content)", "def getGraph(self):\n\t\treturn self.graph", "def get_graphs_data_connection(self):\n return self.m_connection.graphs_data", "def get_graph(**options):\r...
[ "0.67377543", "0.6618313", "0.6539239", "0.6381631", "0.6221923", "0.60826457", "0.60516125", "0.6034938", "0.5980619", "0.5941827", "0.58881456", "0.5873024", "0.5869902", "0.5853487", "0.58521986", "0.58521986", "0.58454376", "0.5823218", "0.57971686", "0.57595277", "0.5745...
0.0
-1
get the available services to be activated read the models dir to find the services installed to be added to the system by the administrator
def available_services(): all_datas = () data = () for class_path in settings.TH_SERVICES: class_name = class_path.rsplit('.', 1)[1] # 2nd array position contains the name of the service data = (class_name, class_name.rsplit('Service', 1)[1]) all_datas = (data,) + all_datas return all_datas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getServices(self):\n pass", "def available_services(cls) -> List[str]:\n ret = []\n for (_, name, _) in pkgutil.iter_modules([str(SERVICES_PATH)]):\n ret.append(name)\n return ret", "def available_services(self) -> list[str]:\r\n return self.services", "def c...
[ "0.73665935", "0.72499305", "0.7065005", "0.69415635", "0.691009", "0.68078464", "0.67918813", "0.6701775", "0.6570361", "0.6570103", "0.6446339", "0.64197904", "0.6369506", "0.6263007", "0.6227629", "0.6224836", "0.6223671", "0.62065744", "0.61650133", "0.61564034", "0.61475...
0.7025927
3
Initialize the parameters of the logistic regression
def __init__(self, input, n_in, n_out,binary=True,stochastic=True): # initialize with 0 the weights W as a matrix of shape (n_in, n_out) self.W = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='W', borrow=True ) # initialize the biases b as a vector of n_out 0s self.b = theano.shared( value=numpy.zeros( (n_out,), dtype=theano.config.floatX ), name='b', borrow=True ) self.Wb = theano.shared( value=numpy.zeros( (n_in, n_out), dtype=theano.config.floatX ), name='Wb', borrow=True ) if (binary): self.wrt = [self.Wb, self.b] self.p_y_given_x = T.nnet.softmax(T.dot(input, self.Wb) + self.b) self.output=T.dot(input, self.Wb) + self.b else: self.wrt = [self.W, self.b] self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b) self.output=self.p_y_given_x # parameters of the model # symbolic expression for computing the matrix of class-membership # probabilities # Where: # W is a matrix where column-k represent the separation hyperplane for # class-k # x is a matrix where row-j represents input training sample-j # b is a vector where element-k represent the free parameter of # hyperplane-k # symbolic description of how to compute prediction as class whose # probability is maximal self.y_pred = T.argmax(self.p_y_given_x, axis=1) # keep track of model input self.input = input # parameters of the model self.params = [self.W,self.b] self.Ws=[self.W,self.Wb]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_initial_params(model: LogisticRegression):\n n_classes = 15 # threat types\n n_features = 33 # Number of features in dataset\n model.classes_ = np.array([i for i in range(15)])\n\n model.coef_ = np.zeros((n_classes, n_features))\n if model.fit_intercept:\n model.intercept_ = np.zeros(...
[ "0.77980274", "0.7470837", "0.72140586", "0.6893265", "0.68832946", "0.6734596", "0.6729252", "0.66861194", "0.6655325", "0.65969175", "0.6565215", "0.65200406", "0.6474855", "0.64675874", "0.6451465", "0.6448672", "0.644657", "0.6324524", "0.6307155", "0.6290791", "0.6231589...
0.0
-1
Return a float representing the number of errors in the minibatch over the total number of examples of the minibatch ; zero one loss over the size of the minibatch
def errors(self, target): return T.mean(T.neq(self.y_pred, T.argmax(target, axis=1)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)", "def nb_errors_nb(self, input_data, target):\n input_data_resize = input_data.view(2000, 1,...
[ "0.64593107", "0.6453829", "0.6450095", "0.6359758", "0.6284254", "0.6248897", "0.61730903", "0.6143888", "0.6141596", "0.61282915", "0.61234075", "0.6117177", "0.6080115", "0.60368556", "0.60218304", "0.60163116", "0.60140604", "0.5989487", "0.59858793", "0.59834915", "0.597...
0.0
-1
Initialize the parameters for the multilayer perceptron
def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic): self.binary=binary self.stochastic=stochastic # Since we are dealing with a one hidden layer MLP, this will translate # into a HiddenLayer with a tanh activation function connected to the # LogisticRegression layer; the activation function can be replaced by # sigmoid or any other nonlinear function. self.hiddenLayers = [] self.normLayers=[] for i in xrange(n_hiddenLayers): h_input = input if i == 0 else self.hiddenLayers[i-1].output h_in = n_in if i == 0 else n_hidden # if binary==True, we append a binary hiddenlayer if binary==True: self.hiddenLayers.append( HiddenLayer( rng=rng, input=h_input, n_in=h_in, n_out=n_hidden, activation=T.tanh, binary=True, stochastic=stochastic )) self.normLayers.append( BatchNormLayer( input=self.hiddenLayers[i].output, n_in=n_hidden, n_out=n_hidden )) else: self.hiddenLayers.append( HiddenLayer( rng=rng, input=h_input, n_in=h_in, n_out=n_hidden, activation=T.tanh, binary=False, stochastic=False )) # The logistic regression layer gets as input the hidden units # of the hidden layer self.logRegressionLayer = LogisticRegression( input=self.hiddenLayers[-1].output, n_in=n_hidden, n_out=n_out, binary=binary, stochastic=stochastic ) # same holds for the function computing the number of errors self.errors = self.logRegressionLayer.errors # the parameters of the model are the parameters of the two layer it is # made out of self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws # keep track of model input self.input = input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n ...
[ "0.77758974", "0.7704682", "0.76280135", "0.7340647", "0.7220832", "0.71430016", "0.7137226", "0.7131746", "0.7091034", "0.70712316", "0.70580506", "0.6996698", "0.6996188", "0.6979724", "0.68992215", "0.6892215", "0.6837597", "0.6826578", "0.67905426", "0.67721635", "0.67602...
0.0
-1
This class is made to support a variable number of layers.
def __init__(self, numpy_rng, theano_rng=None, n_ins=24, ################################################### hidden_layers_sizes=[24,18,12,6], n_outs=2): self.sigmoid_layers = [] self.rbm_layers = [] self.params = [] self.n_layers = len(hidden_layers_sizes) assert self.n_layers > 0 if not theano_rng: theano_rng = MRG_RandomStreams(numpy_rng.randint(2 ** 30)) # allocate symbolic variables for the data self.x = T.matrix('x') # the data is presented as rasterized images self.y = T.ivector('y') # the labels are presented as 1D vector # of [int] labels self.z = T.matrix('z') #print self.x.type # end-snippet-1 # The DBN is an MLP, for which all weights of intermediate # layers are shared with a different RBM. We will first # construct the DBN as a deep multilayer perceptron, and when # constructing each sigmoidal layer we also construct an RBM # that shares weights with that layer. During pretraining we # will train these RBMs (which will lead to chainging the # weights of the MLP as well) During finetuning we will finish # training the DBN by doing stochastic gradient descent on the # MLP. for i in range(self.n_layers): # construct the sigmoidal layer # the size of the input is either the number of hidden # units of the layer below or the input size if we are on # the first layer if i == 0: input_size = n_ins else: input_size = hidden_layers_sizes[i - 1] # the input to this layer is either the activation of the # hidden layer below or the input of the DBN if you are on # the first layer if i == 0: layer_input = self.x else: layer_input = self.sigmoid_layers[-1].output self.z = layer_input sigmoid_layer = HiddenLayer(rng=numpy_rng, input=layer_input, n_in=input_size, n_out=hidden_layers_sizes[i], activation=T.nnet.sigmoid) # add the layer to our list of layers self.sigmoid_layers.append(sigmoid_layer) # its arguably a philosophical question... but we are # going to only declare that the parameters of the # sigmoid_layers are parameters of the DBN. The visible # biases in the RBM are parameters of those RBMs, but not # of the DBN. self.params.extend(sigmoid_layer.params) # Construct an RBM that shared weights with this layer rbm_layer = RBM(numpy_rng=numpy_rng, theano_rng=theano_rng, input=layer_input, n_visible=input_size, n_hidden=hidden_layers_sizes[i], W=sigmoid_layer.W, hbias=sigmoid_layer.b) self.rbm_layers.append(rbm_layer) #print(type(self.sigmoid_layers[-1].output)) # We now need to add a logistic layer on top of the MLP self.logLayer = LogisticRegression( input=self.sigmoid_layers[-1].output, n_in=hidden_layers_sizes[-1], n_out=n_outs) self.params.extend(self.logLayer.params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_layers(self):\n raise NotImplementedError", "def layers(self): # -> LayerView:\n ...", "def num_layers(self): # -> int:\n ...", "def layers(self, x):\n raise NotImplementedError", "def __init__(self, layers):\n\n\t\tself.layers = layers", "def run(layers):", "def __ini...
[ "0.75543296", "0.7163699", "0.7143102", "0.70626694", "0.698838", "0.66191417", "0.6470164", "0.64236826", "0.638904", "0.637632", "0.6366208", "0.6310667", "0.6310052", "0.6291664", "0.62838024", "0.6280735", "0.62782174", "0.6276819", "0.62729853", "0.62683105", "0.6253566"...
0.0
-1
Demonstrates how to train and test a Deep Belief Network. This is demonstrated on MNIST.
def test_DBN(finetune_lr=0.2, pretraining_epochs=30, pretrain_lr=0.1, k=1, training_epochs=200, batch_size = 1): ################################################################################################ ###############################################################load data#################################################################### datasets = numpy.loadtxt("german2.csv", delimiter = "," , usecols=(range(24)) , dtype=theano.config.floatX) labelsets = numpy.loadtxt("german2.csv", delimiter = "," , usecols=(24,) , dtype=int) train_set_x = theano.shared(numpy.asarray(datasets[0:600], dtype=theano.config.floatX)) train_set_y = theano.shared(numpy.asarray(labelsets[0:600], dtype=int)) valid_set_x = theano.shared(numpy.asarray(datasets[0:600], dtype=theano.config.floatX)) valid_set_y = theano.shared(numpy.asarray(labelsets[0:600], dtype=int)) test_set_x = theano.shared(numpy.asarray(datasets[800:999], dtype=theano.config.floatX)) test_set_y = theano.shared(numpy.asarray(labelsets[800:999], dtype=int)) ################################################################################################################################################ # compute number of minibatches for training, validation and testing n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size # numpy random generator numpy_rng = numpy.random.RandomState(123) print '... building the model' # construct the Deep Belief Network ###########################################change feature size : n_ins and label size: n_outs################################################################ dbn = DBN(numpy_rng=numpy_rng, n_ins=24, hidden_layers_sizes=[24,18,12,6], n_outs=2) #print dbn.params[0].eval() ############################################################################################################################################### # start-snippet-2 ######################### # PRETRAINING THE MODEL # ######################### print '... getting the pretraining functions' pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size, k=k) print '... pre-training the model' start_time = timeit.default_timer() ## Pre-train layer-wise for i in range(dbn.n_layers): # go through pretraining epochs for epoch in range(pretraining_epochs): # go through the training set c = [] for batch_index in range(n_train_batches): c.append(pretraining_fns[i](index=batch_index, lr=pretrain_lr)) print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch), print numpy.mean(c) RbmParamList = [] for i in dbn.params: print i.eval().shape RbmParamList.append(i.eval()) with open('RbmParamList.pkl', 'w') as f: pickle.dump(RbmParamList, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(...
[ "0.70206416", "0.6779854", "0.67381644", "0.6688426", "0.6619917", "0.66178626", "0.66168207", "0.65870345", "0.6558734", "0.6551392", "0.654411", "0.6518047", "0.6504657", "0.64782375", "0.64767015", "0.64680374", "0.6459553", "0.63857526", "0.63578683", "0.6350918", "0.6332...
0.5869477
72
Return a mock component of a general model.
def mock_component(): component = Mock() component.free_parameters = flex.double([1.0]) component.free_parameter_esds = None component.n_params = 1 component.var_cov_matrix = sparse.matrix(1, 1) return component
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_model(self) -> None:\n get_model()", "def real_model(request):\n return request.config.option.real_model", "def model(self) -> Type[Model]:", "def model_name(self) -> str:\n return \"mock-model-name\"", "def test_get_model_method(self):\n # arrange\n model_manage...
[ "0.6227962", "0.61123717", "0.60828376", "0.60729104", "0.59207785", "0.58533525", "0.5756956", "0.5729371", "0.5705438", "0.5661981", "0.56480694", "0.55965275", "0.5589341", "0.55885005", "0.553621", "0.55356354", "0.5533403", "0.55187505", "0.55115426", "0.5495379", "0.547...
0.63239694
0
Return a mock component of a general model.
def mock_scaling_component(n_refl): component = mock_component() component.calculate_scales.return_value = flex.double(n_refl, 1.0) component.n_refl = [n_refl] return component
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mock_component():\n component = Mock()\n component.free_parameters = flex.double([1.0])\n component.free_parameter_esds = None\n component.n_params = 1\n component.var_cov_matrix = sparse.matrix(1, 1)\n return component", "def test_get_model(self) -> None:\n get_model()", "def real...
[ "0.63239694", "0.6227962", "0.61123717", "0.60828376", "0.60729104", "0.59207785", "0.58533525", "0.5756956", "0.5729371", "0.5705438", "0.5661981", "0.56480694", "0.55965275", "0.5589341", "0.55885005", "0.553621", "0.55356354", "0.5533403", "0.55187505", "0.55115426", "0.54...
0.0
-1
Return a mock data manager of a general model.
def mock_data_manager(components): dm = Mock() dm.components = components dm.fixed_components = [] return dm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_dummy_data_manager():\n import repoze.filesafe\n repoze.filesafe._local.manager = mgr = DummyDataManager()\n return mgr", "def _get_data_manager(self):\n\n ftype = self.conf['General']['save_as']\n if ftype == 'npz':\n return NPZDataManager(self.conf, self.log)\n ...
[ "0.63638645", "0.62232745", "0.5958567", "0.5882889", "0.5777589", "0.57463723", "0.5660761", "0.5626389", "0.5614345", "0.5575118", "0.55164546", "0.5475766", "0.54707235", "0.54639786", "0.54465616", "0.5437837", "0.5368954", "0.53603864", "0.5357883", "0.5350542", "0.53417...
0.6330217
1
Test for the general active_parameter_manage class.
def test_general_apm(): components = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } apm = active_parameter_manager(components, ["scale", "decay"]) assert "decay" in apm.components_list assert "scale" in apm.components_list assert "absorption" not in apm.components_list assert apm.n_active_params == ( components["scale"].n_params + components["decay"].n_params ) n_cumul = 0 for component in apm.components: assert apm.components[component]["n_params"] == components[component].n_params assert apm.components[component]["start_idx"] == n_cumul assert ( apm.components[component]["end_idx"] == n_cumul + apm.components[component]["n_params"] ) n_cumul += apm.components[component]["n_params"] apm.set_param_vals(flex.double([2.0, 1.5])) assert apm.get_param_vals() == flex.double([2.0, 1.5]) # Test params were updated in components assert list(components["scale"].free_parameters) == [2.0] assert list(components["decay"].free_parameters) == [1.5] # Test selection of parameters decay_params = apm.select_parameters("decay") assert len(decay_params) == 1 assert decay_params[0] == 1.5 # Test calculate model state uncertainties var_cov = flex.double([1.0, 0.5, 0.5, 2.0]) var_cov.reshape(flex.grid(2, 2)) apm.calculate_model_state_uncertainties(var_cov) assert components["scale"].var_cov_matrix[0, 0] == 1.0 assert components["decay"].var_cov_matrix[0, 0] == 2.0 # Test set param esds. apm.set_param_esds(flex.double([0.1, 0.2])) assert components["scale"].free_parameter_esds == flex.double([0.1]) assert components["decay"].free_parameter_esds == flex.double([0.2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_overridable_parameter() -> None:\n param_dict = ParamClass.get_overridable_parameters()\n assert \"name\" in param_dict\n assert \"flag\" in param_dict\n assert \"not_flag\" in param_dict\n assert \"seed\" in param_dict\n assert \"number\" in param_dict\n assert \"integers\" in param_...
[ "0.62300676", "0.5915647", "0.5863919", "0.56514627", "0.55497897", "0.5497675", "0.5491017", "0.54151815", "0.53953904", "0.53859353", "0.5378423", "0.5352581", "0.53417635", "0.53193915", "0.52967614", "0.5275244", "0.5273615", "0.5203125", "0.51854485", "0.51483536", "0.51...
0.5788472
3
Test for the general multi_active_parameter_manage class.
def test_multi_apm(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } components_2 = {"scale": mock_component(), "decay": mock_component()} multi_apm = multi_active_parameter_manager( ScalingTarget(), [components_1, components_2], [["scale", "decay"], ["scale"]], active_parameter_manager, ) # Test correct setup of apm_list attribute. for apm in multi_apm.apm_list: assert isinstance(apm, active_parameter_manager) assert len(multi_apm.apm_list) == 2 assert multi_apm.components_list == ["scale", "decay", "scale"] assert multi_apm.n_active_params == 3 assert multi_apm.apm_data[0] == {"start_idx": 0, "end_idx": 2} assert multi_apm.apm_data[1] == {"start_idx": 2, "end_idx": 3} # Test parameter selection. multi_apm.set_param_vals(flex.double([3.0, 2.5, 2.0])) assert multi_apm.get_param_vals() == flex.double([3.0, 2.5, 2.0]) assert multi_apm.select_parameters(0) == flex.double([3.0, 2.5]) assert multi_apm.select_parameters(1) == flex.double([2.0]) # Test setting parameter esds. multi_apm.set_param_esds(flex.double([0.1, 0.2, 0.3])) assert components_1["scale"].free_parameter_esds == flex.double([0.1]) assert components_1["decay"].free_parameter_esds == flex.double([0.2]) assert components_2["scale"].free_parameter_esds == flex.double([0.3]) # Test setting var_cov matrices for each component. var_cov = flex.double([1.0, 0.5, 0.5, 0.5, 2.0, 0.5, 0.5, 0.5, 3.0]) var_cov.reshape(flex.grid(3, 3)) multi_apm.calculate_model_state_uncertainties(var_cov) assert components_1["scale"].var_cov_matrix[0, 0] == 1.0 assert components_1["decay"].var_cov_matrix[0, 0] == 2.0 assert components_2["scale"].var_cov_matrix[0, 0] == 3.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_scaling_active_parameter_manager():\n components_2 = {\"1\": mock_scaling_component(2), \"2\": mock_scaling_component(2)}\n scaling_apm = scaling_active_parameter_manager(components_2, [\"1\"])\n assert list(scaling_apm.constant_g_values[0]) == list(\n components_2[\"2\"].calculate_scales(...
[ "0.5979494", "0.564233", "0.56316334", "0.56300414", "0.55950177", "0.55662066", "0.5561874", "0.5522249", "0.55102235", "0.5508482", "0.5448773", "0.54069316", "0.5395452", "0.53479075", "0.5345451", "0.532887", "0.53067946", "0.5266664", "0.5219508", "0.52126247", "0.517741...
0.5946211
1
Test the apm factory for concurrent refinement.
def test_ParameterManagerGenerator_concurrent(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } data_manager = mock_data_manager(components_1) pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) apms = pmg.parameter_managers() assert len(apms) == 1 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" in apm.components_list components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } components_2 = {"1": mock_component(), "2": mock_component()} data_manager_1 = mock_data_manager(components_1) data_manager_2 = mock_data_manager(components_2) pmg = ParameterManagerGenerator( [data_manager_1, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) multi_apms = pmg.parameter_managers() assert len(multi_apms) == 1 multi_apm = multi_apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) for apm in multi_apm.apm_list: assert isinstance(apm, active_parameter_manager) assert "scale" in multi_apm.apm_list[0].components_list assert "decay" in multi_apm.apm_list[0].components_list assert "absorption" in multi_apm.apm_list[0].components_list assert "1" in multi_apm.apm_list[1].components_list assert "2" in multi_apm.apm_list[1].components_list # now try fixing a component data_manager.fixed_components = ["absorption"] pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="concurrent", ) apms = pmg.parameter_managers() assert len(apms) == 1 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" not in apm.components_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_multiple_pis_simultaneously_to_vpg_check_reallocation(self):\n proj_obj, fabric_obj, pr_objs = self._create_prerequisites(\n create_second_pr=True)\n test_id = self.id()\n VPG_CLASS = self._api_server.get_resource_class('virtual-port-group')\n org_process_ae_id =...
[ "0.588525", "0.58404994", "0.5700447", "0.5668184", "0.5639154", "0.5630297", "0.5601512", "0.558073", "0.5506044", "0.5464134", "0.5464046", "0.5454045", "0.54427963", "0.5430056", "0.54291415", "0.54067427", "0.54062814", "0.5388401", "0.53826404", "0.53818905", "0.53586805...
0.63022053
0
Test the apm factory for consecutive refinement.
def test_ParameterManagerGenerator_consecutive(): components_1 = { "scale": mock_component(), "decay": mock_component(), "absorption": mock_component(), } data_manager = mock_data_manager(components_1) data_manager.consecutive_refinement_order = [["scale", "decay"], ["absorption"]] # Test single dataset case. pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" in apm.components_list assert "absorption" not in apm.components_list apm = apms[1] assert isinstance(apm, multi_active_parameter_manager) assert "scale" not in apm.components_list assert "decay" not in apm.components_list assert "absorption" in apm.components_list # Test multi dataset case. components_2 = {"1": mock_component(), "2": mock_component()} data_manager_2 = mock_data_manager(components_2) data_manager_2.consecutive_refinement_order = [["1"], ["2"]] pmg = ParameterManagerGenerator( [data_manager, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 multi_apm = apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) apm_1 = multi_apm.apm_list[0] assert "scale" in apm_1.components_list assert "decay" in apm_1.components_list assert "absorption" not in apm_1.components_list assert multi_apm.apm_list[1].components_list == ["1"] multi_apm = apms[1] assert isinstance(multi_apm, multi_active_parameter_manager) assert multi_apm.apm_list[0].components_list == ["absorption"] assert multi_apm.apm_list[1].components_list == ["2"] # Test multi dataset case with different number of cycles for each data_manager. components_2 = {"1": mock_component()} data_manager_2 = mock_data_manager(components_2) data_manager_2.consecutive_refinement_order = [["1"], ["2"]] pmg = ParameterManagerGenerator( [data_manager, data_manager_2], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) assert pmg.param_lists[0] == [["scale", "decay"], ["absorption"]] assert pmg.param_lists[1] == [["1"]] apms = list(pmg.parameter_managers()) assert len(apms) == 2 multi_apm = apms[0] assert isinstance(multi_apm, multi_active_parameter_manager) apm_1 = multi_apm.apm_list[0] assert "scale" in apm_1.components_list assert "decay" in apm_1.components_list assert "absorption" not in apm_1.components_list assert multi_apm.apm_list[1].components_list == ["1"] multi_apm = apms[1] assert isinstance(multi_apm, multi_active_parameter_manager) assert multi_apm.apm_list[0].components_list == ["absorption"] # Only change relative to previous test case. assert multi_apm.apm_list[1].components_list == [] # Test fixing the decay parameter. data_manager.fixed_components = ["decay"] pmg = ParameterManagerGenerator( [data_manager], apm_type=active_parameter_manager, target=ScalingTarget(), mode="consecutive", ) apms = list(pmg.parameter_managers()) assert len(apms) == 2 apm = apms[0] assert isinstance(apm, multi_active_parameter_manager) assert "scale" in apm.components_list assert "decay" not in apm.components_list assert "absorption" not in apm.components_list apm = apms[1] assert isinstance(apm, multi_active_parameter_manager) assert "scale" not in apm.components_list assert "decay" not in apm.components_list assert "absorption" in apm.components_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_general_apm():\n components = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n apm = active_parameter_manager(components, [\"scale\", \"decay\"])\n assert \"decay\" in apm.components_list\n assert \"scale\" in a...
[ "0.5912187", "0.57896715", "0.57230085", "0.56915814", "0.5550722", "0.55056584", "0.5462291", "0.54244", "0.54005516", "0.53685725", "0.53471875", "0.53402764", "0.5339042", "0.53294677", "0.528481", "0.5281426", "0.52730227", "0.5270677", "0.52555203", "0.5252101", "0.52428...
0.6325197
0
Test the scalingspecific parameter manager.
def test_scaling_active_parameter_manager(): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(2)} scaling_apm = scaling_active_parameter_manager(components_2, ["1"]) assert list(scaling_apm.constant_g_values[0]) == list( components_2["2"].calculate_scales() ) assert len(scaling_apm.constant_g_values) == 1 assert scaling_apm.n_obs == [2] # Test that no constant_g_values if both components selected scaling_apm = scaling_active_parameter_manager(components_2, ["1", "2"]) assert scaling_apm.constant_g_values is None # Check that one can't initialise with an unequal number of reflections, # either within the selection or overall. with pytest.raises(AssertionError): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(1)} scaling_apm = scaling_active_parameter_manager(components_2, ["1", "2"]) with pytest.raises(AssertionError): components_2 = {"1": mock_scaling_component(2), "2": mock_scaling_component(1)} scaling_apm = scaling_active_parameter_manager(components_2, ["1"]) data_manager = mock_data_manager(components_2) pmg = ScalingParameterManagerGenerator( [data_manager], target=ScalingTarget(), mode="concurrent" ) assert isinstance(pmg.apm_type, type(scaling_active_parameter_manager))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_measure_parameters(self):\n pass", "def test_scale(app):\n\n assert False", "def test_general_apm():\n components = {\n \"scale\": mock_component(),\n \"decay\": mock_component(),\n \"absorption\": mock_component(),\n }\n\n apm = active_parameter_manager(com...
[ "0.6702554", "0.6387912", "0.6046434", "0.6030869", "0.59028023", "0.5901838", "0.5870547", "0.58489865", "0.5837942", "0.583072", "0.5788725", "0.57385606", "0.57318795", "0.5716319", "0.56859964", "0.56810164", "0.56458664", "0.5612986", "0.56105846", "0.5608693", "0.556143...
0.7040565
0
Sentiment stats which displays buzz, news score, articles last week, articles weekly average, bullish vs bearish percentages, sector average bullish percentage, and sector average news score
def sentiment_stats(other_args: List[str], ticker: str): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="stats", description=""" Sentiment stats which displays buzz, news score, articles last week, articles weekly average, bullish vs bearish percentages, sector average bullish percentage, and sector average news score. [Source: https://finnhub.io] """, ) try: ns_parser = parse_known_args_and_warn(parser, other_args) if not ns_parser: return d_stats = get_sentiment_stats(ticker) if d_stats: print(f"Buzz: {round(100*d_stats['buzz']['buzz'],2)} %") print(f"News Score: {round(100*d_stats['companyNewsScore'],2)} %") print("") print(f"Articles Last Week: {d_stats['buzz']['articlesInLastWeek']}") print(f"Articles Weekly Average: {d_stats['buzz']['weeklyAverage']}") print("") print(f"Bullish: {round(100*d_stats['sentiment']['bullishPercent'],2)} %") print(f"Bearish: {round(100*d_stats['sentiment']['bearishPercent'],2)} %") print("") print( f"Sector Average Bullish: {round(100*d_stats['sectorAverageBullishPercent'],2)} %" ) print( f"Sector Average News Score: {round(100*d_stats['sectorAverageNewsScore'],2)} %" ) else: print("No sentiment stats found.") print("") except Exception as e: print(e, "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sentiment(self) -> Dict[str, float]:", "def overall_sentiment(self, _testing=False):\n df = self.df.copy()\n\n sentiment_scores = df[self.review_column].apply(self.sentiment_for_one_comment)\n self.sentiment_scores_all = sentiment_scores\n print(\"Average sentiment score: {}\".for...
[ "0.71262354", "0.6855994", "0.67449534", "0.6719744", "0.66272366", "0.6555126", "0.6548427", "0.65435684", "0.6531834", "0.6526213", "0.64024013", "0.63911426", "0.6358385", "0.6352042", "0.6334273", "0.6325912", "0.6321714", "0.6317291", "0.62689185", "0.62674236", "0.62538...
0.60600317
29
_deserialize defines a custom Marshmallow Schema Field that takes in mutlitype input data to applevel objects.
def _deserialize( self, value: Any, attr: str = None, data: Mapping[str, Any] = None, **kwargs ): errors = [] # iterate through the types being passed into UnionField via val_types for field in self.valid_types: try: # inherit deserialize method from Fields class return field.deserialize(value, attr, data, **kwargs) # if error, add error message to error list except ValidationError as error: errors.append(error.messages) raise ValidationError(errors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_deserialize (self):\n pass", "def deserialize(self, data):", "def deserialize(self, data, schema, **kwargs):\n return self.serializer.load(data, schema, **kwargs)", "def SplitDataclassField(default: str):\n\n\n class SplitMarshmallowField(fields.Field):\n \"\"\"Custom marshm...
[ "0.6336738", "0.62138134", "0.62059647", "0.5973148", "0.5872674", "0.5866745", "0.58309895", "0.5817072", "0.5784642", "0.5752867", "0.5675256", "0.56737214", "0.5669053", "0.5660375", "0.564848", "0.55696976", "0.55506283", "0.5532286", "0.5526995", "0.5522127", "0.5515528"...
0.6168223
3
Receive a request from the worker work_socket receive a request on this socket timeout if request isn't received by the timeout, raise six.moves.queue.Empty default = blocks forever This polls on both the worker and up_queue sockets and will throw an exception if there is anything available on the upqueue as this indicates that nothing is running.
def recv(self, work_socket, timeout=None): poller = zmq.Poller() poller.register(self.up_queue_recv_socket, zmq.POLLIN) poller.register(work_socket, zmq.POLLIN) for socket, state in poller.poll(timeout): if socket == self.up_queue_recv_socket and state == zmq.POLLIN: result, e = self.up_queue.get() if e is not None: raise e else: raise cellprofiler_core.pipeline.event.CancelledException( "Unexpected exit during recv" ) if socket == work_socket and state == zmq.POLLIN: return cellprofiler_core.utilities.zmq.communicable.Communicable.recv( work_socket ) raise six.moves.queue.Empty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_request_thread(self):\n while True:\n try:\n request, client_address = self._request_queue.get(\n timeout=self.timeout_on_get,\n )\n except Queue.Empty:\n # You wouldn't believe how much crap this can end up le...
[ "0.6019085", "0.6004343", "0.58936167", "0.5832211", "0.5812502", "0.58101034", "0.5788062", "0.5670236", "0.5595741", "0.55511177", "0.55298686", "0.54770106", "0.54572827", "0.5441434", "0.5433049", "0.54328305", "0.5424303", "0.5412369", "0.5375704", "0.5372488", "0.534302...
0.7843548
0
Execute a closure on the AnalysisWorker thread fn closure to execute Returns the function's result or throws whatever exception was thrown by the function.
def execute(self, fn, *args, **kwargs): self.ex(fn, *args, **kwargs) return self.ecute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply(self, external_callable, *args, **kwargs):\n self.work_request_queue.put((external_callable, args, kwargs))\n return self.result_queue.get()", "async def call(fn: Callable, *args, **kwargs) -> Any:\n async with websockets.connect(WS_SERVER_URI) as websocket:\n\n task = serialize...
[ "0.58918166", "0.5804437", "0.5800247", "0.57977927", "0.5780088", "0.5774668", "0.56417096", "0.56402177", "0.5606427", "0.5577625", "0.5576409", "0.5576409", "0.55548936", "0.5543454", "0.5538874", "0.55007344", "0.5496635", "0.5471326", "0.54556096", "0.54122365", "0.54101...
0.5575183
12
Do the first part of a functional execution
def ex(self, fn, *args, **kwargs): if len(args) == 0 and len(kwargs) == 0: self.down_queue.put(fn) else: def closure(): return fn(*args, **kwargs) self.down_queue.put(closure)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def firstFunction(self):", "def run_one_step(self):\n pass", "def do_begin(begin):\n if begin:\n do_action(begin)", "def step(self):\n self.function()", "def do_twice(f):\n f()\n f()", "def do_twice(f):\n f()\n f()", "def complete_run():\n pass", "def return_fir...
[ "0.67399466", "0.6280568", "0.587187", "0.5847407", "0.5788252", "0.5788252", "0.57825094", "0.57793474", "0.573395", "0.57085407", "0.56923556", "0.5653058", "0.56507367", "0.56455547", "0.56219184", "0.5605289", "0.5575513", "0.55392635", "0.5532281", "0.5517023", "0.550593...
0.0
-1
Retrieve the results of self.ex()
def ecute(self): msg = self.up_queue_recv_socket.recv() result, e = self.up_queue.get() if e is not None: raise e return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def results(self):\r\n pass", "def get_results(self):\n return self.result", "def _get_result(self):\r\n \r\n return self._result", "def results(self):\n pass", "def getResults():", "def result(self):", "def result(self):", "def result(self):\n if self._resul...
[ "0.6693895", "0.66713417", "0.66669464", "0.6630863", "0.66149294", "0.6539933", "0.6539933", "0.64237297", "0.6279126", "0.6253117", "0.6223495", "0.62201995", "0.6201204", "0.6201204", "0.607676", "0.6055973", "0.6055973", "0.6055973", "0.60555613", "0.598526", "0.598526", ...
0.0
-1
Artificially set up the worker's work socket This sets self.aw.work_socket so that methods other than "run" can be tested in the worker.
def set_work_socket(self): self.analysis_id = uuid.uuid4().hex def do_set_work_socket(aw): aw.work_socket = cellprofiler_core.constants.worker.the_zmq_context.socket( zmq.REQ ) aw.work_socket.connect(self.work_addr) aw.work_request_address = self.work_addr aw.current_analysis_id = self.analysis_id self.awthread.execute(do_set_work_socket, self.awthread.aw)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self) -> None:\n self.running = True\n self.listen()\n self.start_workers()\n\n # Send server socket to workers.\n assert self.socket is not None\n for work_queue in self.work_queues:\n work_queue[0].send(self.family)\n send_handle(work_queu...
[ "0.74050635", "0.6771996", "0.66032565", "0.64634144", "0.6445292", "0.62212235", "0.6142211", "0.6116784", "0.6084497", "0.6056881", "0.5957323", "0.59568125", "0.5929769", "0.5915095", "0.5910893", "0.5893372", "0.5853307", "0.5753932", "0.5749135", "0.57407844", "0.5719958...
0.8267066
0
Announce the work address until we get some sort of a request
def send_announcement_get_work_request(self): self.analysis_id = uuid.uuid4().hex while True: self.announce_socket.send_json(((self.analysis_id, self.work_addr),)) try: return self.awthread.recv(self.work_socket, 250) except six.moves.queue.Empty: continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def request_work(self):\n self.workRequested.emit()", "def make_work_request(self):\n request = StoreRequest()\n self.bb_client.read_wait(request, self.handle_request)", "def answer_waiting_call(self) -> None:", "def do_work(self):", "async def send_referral(self) -> None:\n # p...
[ "0.5951986", "0.5589441", "0.5423254", "0.53269106", "0.52976644", "0.52254903", "0.52201986", "0.520611", "0.52030164", "0.5180417", "0.51489556", "0.5138724", "0.51302093", "0.5128945", "0.510327", "0.50935763", "0.50749195", "0.5074746", "0.5072681", "0.505735", "0.5051909...
0.6637995
0
Get an appropriately initialized measurements structure for the good pipeline
def get_measurements_for_good_pipeline(nimages=1, group_numbers=None): import cellprofiler_core path = os.path.abspath( os.path.join( os.path.dirname(cellprofiler_core.__file__), "..", "tests/data/ExampleSBSImages", ) ) # path = os.path.join(tests.modules.example_images_directory(), "ExampleSBSImages") m = cellprofiler_core.measurement.Measurements() if group_numbers is None: group_numbers = [1] * nimages group_indexes = [1] last_group_number = group_numbers[0] group_index = 1 for group_number in group_numbers: if group_number == last_group_number: group_index += 1 else: group_index = 1 group_indexes.append(group_index) for i in range(1, nimages + 1): filename = "Channel2-%02d-%s-%02d.tif" % ( i, "ABCDEFGH"[int((i - 1) / 12)], ((i - 1) % 12) + 1, ) url = cellprofiler_core.utilities.pathname.pathname2url( os.path.join(path, filename) ) m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.C_FILE_NAME + "_DNA", i, ] = filename m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.C_PATH_NAME + "_DNA", i, ] = path m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.C_URL + "_DNA", i, ] = url m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.GROUP_NUMBER, i, ] = group_numbers[i - 1] m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.constants.measurement.GROUP_INDEX, i, ] = group_indexes[i - 1] jblob = javabridge.run_script( """ importPackage(Packages.org.cellprofiler.imageset); importPackage(Packages.org.cellprofiler.imageset.filter); var imageFile=new ImageFile(new java.net.URI(url)); var imageFileDetails = new ImageFileDetails(imageFile); var imageSeries=new ImageSeries(imageFile, 0); var imageSeriesDetails = new ImageSeriesDetails(imageSeries, imageFileDetails); var imagePlane=new ImagePlane(imageSeries, 0, ImagePlane.ALWAYS_MONOCHROME); var ipd = new ImagePlaneDetails(imagePlane, imageSeriesDetails); var stack = ImagePlaneDetailsStack.makeMonochromeStack(ipd); var stacks = java.util.Collections.singletonList(stack); var keys = java.util.Collections.singletonList(imageNumber); var imageSet = new ImageSet(stacks, keys); imageSet.compress(java.util.Collections.singletonList("DNA"), null); """, dict(url=url, imageNumber=str(i)), ) blob = javabridge.get_env().get_byte_array_elements(jblob) m[ cellprofiler_core.constants.measurement.IMAGE, cellprofiler_core.modules.namesandtypes.M_IMAGE_SET, i, blob.dtype, ] = blob pipeline = cellprofiler_core.pipeline.Pipeline() pipeline.loadtxt(six.moves.StringIO(GOOD_PIPELINE)) pipeline.write_pipeline_measurement(m) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measurements(self) -> NONEARRAY:\n pass", "def getMeasures():", "def extract_specs(self):\n vDeflection_unit = \"lcd-info.{}.conversion-set.conversion.force.scaling.unit.unit\".format(\n self.channel_numbers[\"vDeflection\"])\n self.units[\"vDeflection\"] = self.general[vDef...
[ "0.63832515", "0.6135345", "0.6120204", "0.60634154", "0.605351", "0.5972833", "0.5960781", "0.5844192", "0.5828747", "0.5825251", "0.582229", "0.5809576", "0.5800582", "0.5798093", "0.5785926", "0.57514644", "0.5689381", "0.56682414", "0.5606564", "0.5601516", "0.5559532", ...
0.0
-1
Returns the X window id of the window whose title matches regex `title_regex`
def get_window_id(title_regex): cmd = "wmctrl -l" logit(cmd) output = subprocess.check_output(cmd.split()).decode("utf-8").splitlines() logit(output) for line in output: w_id = line.split()[0] title = line.split(" ", 3)[3] if re.match(title_regex, title): return w_id raise Exception(f"Could not find window with title matching regex: {title_regex}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetProcessIdByWindowTitle(window_title: str) -> int:\n result = ctypes.c_uint32(0)\n\n string_buffer_size = len(window_title) + 2 # (+2) for the next possible character of a title and the NULL char.\n string_buffer = ctypes.create_unicode_buffer(string_buffer_size)\n\n def callback(hwnd, size):\n ...
[ "0.67164993", "0.62230086", "0.607506", "0.6065819", "0.60467404", "0.58529276", "0.5789825", "0.57747257", "0.5757205", "0.57141364", "0.5661283", "0.56199354", "0.56173295", "0.5543509", "0.55334383", "0.548825", "0.5437465", "0.5373055", "0.5367433", "0.5316267", "0.531211...
0.8586612
0
Ensure we can create a new user if we have the permission.
def test_create_new_student_user(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { 'name': "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="John@mailinator.com") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertEqual(1, len(activation_token))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def test_if_not_c...
[ "0.7776861", "0.72906166", "0.7237606", "0.7017382", "0.69414604", "0.6917588", "0.6917588", "0.6917588", "0.67925555", "0.6755567", "0.6740731", "0.6710452", "0.67034537", "0.6681693", "0.6673598", "0.6666447", "0.6623244", "0.66229874", "0.66059834", "0.65804935", "0.656421...
0.0
-1
Ensure we can create a new user if we have the permission.
def test_create_new_user(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="John@mailinator.com") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertEqual(1, len(activation_token))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create(self, request, *args, **kwargs):\n user = request.user\n if user.is_authenticated and not user.has_perm(\"users.add_user\"):\n self.permission_denied(request, message=_(\"You cannot create users.\"))\n return super().create(request, *args, **kwargs)", "def test_if_not_c...
[ "0.7776861", "0.72906166", "0.7237606", "0.7017382", "0.69414604", "0.6917588", "0.6917588", "0.6917588", "0.67925555", "0.6755567", "0.6740731", "0.6710452", "0.67034537", "0.6681693", "0.6673598", "0.6666447", "0.6623244", "0.66229874", "0.66059834", "0.65804935", "0.656421...
0.0
-1
Ensure we can't create a student user without academic_ fields.
def test_create_new_student_user_missing_field(self): data = { 'email': 'John@mailinator.com', 'password': 'test123!', } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_can_not_create_education_instance_without_user(self):\n\t\twith self.assertRaises(\n\t\t\tIntegrityError,\n\t\t\tmsg = 'Should raise IntegrityError if user not provided.'\n\t\t\t):\n\n\t\t\tEducation.objects.create(\n\t\t\t\tschool_name=self.school_name,\n\t\t\t\tcourse_name=self.course_name,\n\t\t\t\tsta...
[ "0.67956454", "0.63918", "0.636316", "0.6349266", "0.63289773", "0.6238365", "0.623791", "0.6165642", "0.61546856", "0.61181474", "0.6080576", "0.60766065", "0.60527575", "0.60310954", "0.60178196", "0.6006018", "0.59811884", "0.5958835", "0.59572184", "0.59439963", "0.594344...
0.64296496
1
Ensure we can't create a new user with blank fields
def test_create_new_user_blank_fields(self): self.maxDiff = None data = { 'email': '', 'password': '', } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { 'email': ['This field may not be blank.'], 'password': ['This field may not be blank.'], } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_blank(self):\n form_data = {\n 'username': 'testuser',\n 'password1': '',\n 'password2': ''\n }\n form = StrictUserCreationForm(data=form_data)\n self.assertFalse(form.is_valid())", "def test_create_user_missing_fields(self):\n payload ...
[ "0.7712952", "0.7632216", "0.7593471", "0.726332", "0.7243656", "0.7158078", "0.7157128", "0.7000987", "0.6996104", "0.6994467", "0.6968338", "0.69449747", "0.6941492", "0.69386184", "0.69285846", "0.69195503", "0.6915723", "0.6893214", "0.68801963", "0.687974", "0.6873898", ...
0.7567398
3
Ensure we can't create a new user without required fields
def test_create_new_user_missing_fields(self): data = {} response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { 'email': ['This field is required.'], 'password': ['This field is required.'] } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user_missing_fields(self):\n payload = {\n 'email': 'email',\n 'password': ''\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_creating_a_new_user_without_email(sel...
[ "0.7630348", "0.73862165", "0.73674697", "0.73465914", "0.72970724", "0.72438395", "0.7208631", "0.7136524", "0.7124549", "0.7122272", "0.7103522", "0.7093455", "0.7093455", "0.7093455", "0.7093455", "0.7093455", "0.70929193", "0.7077152", "0.707205", "0.7072001", "0.70619893...
0.71747696
7
Ensure we can't create a new user with a weak password
def test_create_new_user_weak_password(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': '19274682736', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = {"password": ['This password is entirely numeric.']} self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user_password_too_short(self):\n res = self.client.post(CREATE_USER_URL, {\n **self.mock_user,\n 'password': 'pw'\n })\n\n db_user = get_user_model().objects.filter(\n email=self.mock_user['email']\n )\n\n self.assertEqual(res.stat...
[ "0.7448158", "0.7334659", "0.73337275", "0.7231263", "0.7226161", "0.72220945", "0.714039", "0.7128195", "0.7127693", "0.7122859", "0.710582", "0.7076057", "0.7073139", "0.70351803", "0.6981093", "0.6925133", "0.69214153", "0.6903709", "0.6858862", "0.68386996", "0.6816049", ...
0.6505213
48
Ensure we can't create a new user with an invalid phone number
def test_create_new_user_invalid_phone(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': '1fasd6dq#$%', 'phone': '12345', 'other_phone': '23445dfg', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { "phone": ['Invalid format.'], "other_phone": ['Invalid format.'] } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_phone_number(self, phone_number):\n if User.objects.filter(phone_number=phone_number).exists():\n raise serializers.ValidationError('Phone Number already registered.')\n return phone_number", "def test_new_user_invalid_email(self):\n with self.assertRaises(ValueError)...
[ "0.7145561", "0.70290124", "0.6975835", "0.68602705", "0.68565595", "0.6848795", "0.684489", "0.6838828", "0.68144745", "0.6813772", "0.6810992", "0.6809171", "0.68087786", "0.68087786", "0.68087786", "0.68087786", "0.68087786", "0.6709067", "0.67028594", "0.6652703", "0.6623...
0.7310096
0
Ensure we can't create a new user with an already existing email
def test_create_new_user_duplicate_email(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } user = UserFactory() user.email = 'JOHN@mailinator.com' user.save() response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) content = { 'email': [ "An account for the specified email address already exists." ] } self.assertEqual(json.loads(response.content), content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_private_create_user_without_email(self):\n with pytest.raises(ValueError) as exinfo:\n EmailUser.objects._create_user(None, None, False, False)\n self.assertIn('email must be set', str(exinfo.value))", "def test_create_use_with_invalid_email(self):\n with self.assertRaise...
[ "0.8002483", "0.7915221", "0.78696436", "0.78556156", "0.78552705", "0.78552043", "0.7842394", "0.7826193", "0.7826193", "0.7826193", "0.7826193", "0.7826193", "0.78208935", "0.78188866", "0.78091335", "0.78064054", "0.77855194", "0.77645355", "0.7674032", "0.76636356", "0.76...
0.68754745
69
Ensure that the activation email is sent when user signs up.
def test_create_user_activation_email(self): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="John@mailinator.com") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertFalse(user.is_active) self.assertEqual(1, len(activation_token)) # Test that one message was sent: self.assertEqual(len(mail.outbox), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activat...
[ "0.72729725", "0.7271193", "0.72603685", "0.7180653", "0.71139836", "0.710328", "0.708832", "0.70866233", "0.70748895", "0.6895029", "0.6886898", "0.68769574", "0.6810442", "0.68082666", "0.675095", "0.6744684", "0.67334735", "0.6731933", "0.6719185", "0.6693199", "0.6619442"...
0.5958555
89
Ensure that the user is notified that no email was sent.
def test_create_user_activation_email_failure(self, send): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="John@mailinator.com") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertFalse(user.is_active) self.assertEqual(1, len(activation_token)) # Test that no email was sent: self.assertEqual(len(mail.outbox), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(bla...
[ "0.7036216", "0.6975554", "0.6825931", "0.67460984", "0.6742591", "0.6721783", "0.66891694", "0.65338993", "0.65338993", "0.65253884", "0.6507732", "0.6412952", "0.63811535", "0.6334888", "0.6278315", "0.62352383", "0.6226457", "0.6224285", "0.61838245", "0.61838245", "0.6182...
0.0
-1
Ensure that the user is automatically activated.
def test_create_user_auto_activate(self, services): data = { 'username': 'John', 'email': 'John@mailinator.com', 'password': 'test123!', 'phone': '1234567890', 'first_name': 'Chuck', 'last_name': 'Norris', 'university': { "name": "random_university" }, 'academic_field': {'name': "random_field"}, 'academic_level': {'name': "random_level"}, 'gender': "M", 'birthdate': "1999-11-11", } response = self.client.post( reverse('user-list'), data, format='json', ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(json.loads(response.content)['phone'], '1234567890') user = User.objects.get(email="John@mailinator.com") activation_token = ActionToken.objects.filter( user=user, type='account_activation', ) self.assertTrue(user.is_active) self.assertEqual(1, len(activation_token)) # Test that no email was sent: self.assertEqual(len(mail.outbox), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False", "def activate_user(self, user):\n if not user.active:\n user.active = True\n # noinspection PyUnresolvedReferences\n self.save(user)...
[ "0.78858703", "0.7588761", "0.7308194", "0.72612184", "0.7234818", "0.72069454", "0.7042805", "0.7022283", "0.6872894", "0.67527574", "0.66841036", "0.6675689", "0.6663803", "0.6635442", "0.66318125", "0.6628338", "0.6611106", "0.6603589", "0.658556", "0.6558611", "0.65234345...
0.62174755
53
Ensure we can list all users.
def test_list_users(self): self.client.force_authenticate(user=self.admin) response = self.client.get(reverse('user-list')) self.assertEqual(json.loads(response.content)['count'], 2) # Users are ordered alphabetically by email first_user = json.loads(response.content)['results'][0] second_user = json.loads(response.content)['results'][1] self.assertEqual(first_user['email'], self.admin.email) membership = { 'url': 'http://testserver/memberships/' + str(self.membership.id), 'id': self.membership.id, 'name': 'basic_membership', 'available': True, 'available_on_product_types': [], 'available_on_products': [], 'options': [], 'picture': None, 'price': '50.00', 'details': '1-Year student membership', 'duration': '365 00:00:00', 'available_on_retreat_types': [], 'academic_levels': ['http://testserver/academic_levels/' + str(self.academic_level.id)] } self.assertEqual( remove_translation_fields(second_user['membership']), membership ) # Check the system doesn't return attributes not expected attributes = [ 'id', 'url', 'email', 'first_name', 'last_name', 'is_active', 'phone', 'other_phone', 'is_superuser', 'is_staff', 'university', 'last_login', 'date_joined', 'academic_level', 'academic_field', 'gender', 'language', 'birthdate', 'groups', 'user_permissions', 'tickets', 'membership', 'membership_end', 'city', 'personnal_restrictions', 'academic_program_code', 'faculty', 'student_number', 'volunteer_for_workplace', 'hide_newsletter', 'is_in_newsletter', 'number_of_free_virtual_retreat', 'membership_end_notification', 'get_number_of_past_tomatoes', 'get_number_of_future_tomatoes', 'last_acceptation_terms_and_conditions', 'tomato_field_matrix', 'current_month_tomatoes', ] for key in first_user.keys(): self.assertTrue( key in attributes, 'Attribute "{0}" is not expected but is ' 'returned by the system.'.format(key) ) attributes.remove(key) # Ensure the system returns all expected attributes self.assertTrue( len(attributes) == 0, 'The system failed to return some ' 'attributes : {0}'.format(attributes) ) self.assertEqual(response.status_code, status.HTTP_200_OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def list_users(self):\n raise NotImplementedError", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_admin_user_list_all_users(self):\n response = self.client.get...
[ "0.78882915", "0.77124584", "0.76391757", "0.74921876", "0.74234813", "0.73127735", "0.72611696", "0.7228305", "0.721806", "0.71892637", "0.71163815", "0.70789593", "0.7072042", "0.7043019", "0.7032264", "0.70049846", "0.69722825", "0.6963775", "0.6953116", "0.6939012", "0.69...
0.0
-1
Ensure we can list all users.
def test_list_users_with_search(self): self.client.force_authenticate(user=self.admin) response = self.client.get(reverse('user-list') + '?search=chuck') self.assertEqual(json.loads(response.content)['count'], 1) # Users are ordered alphabetically by email first_user = json.loads(response.content)['results'][0] self.assertEqual(first_user['email'], self.admin.email) # Check the system doesn't return attributes not expected attributes = [ 'id', 'url', 'email', 'first_name', 'last_name', 'is_active', 'phone', 'other_phone', 'is_superuser', 'is_staff', 'university', 'last_login', 'date_joined', 'academic_level', 'academic_field', 'gender', 'language', 'birthdate', 'groups', 'user_permissions', 'tickets', 'membership', 'membership_end', 'city', 'personnal_restrictions', 'academic_program_code', 'faculty', 'student_number', 'volunteer_for_workplace', 'hide_newsletter', 'is_in_newsletter', 'number_of_free_virtual_retreat', 'membership_end_notification', 'get_number_of_past_tomatoes', 'get_number_of_future_tomatoes', 'last_acceptation_terms_and_conditions', 'tomato_field_matrix', 'current_month_tomatoes', ] for key in first_user.keys(): self.assertTrue( key in attributes, 'Attribute "{0}" is not expected but is ' 'returned by the system.'.format(key) ) attributes.remove(key) # Ensure the system returns all expected attributes self.assertTrue( len(attributes) == 0, 'The system failed to return some ' 'attributes : {0}'.format(attributes) ) self.assertEqual(response.status_code, status.HTTP_200_OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def list_users(self):\n raise NotImplementedError", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_admin_user_list_all_users(self):\n response = self.client.get...
[ "0.78882915", "0.77124584", "0.76391757", "0.74921876", "0.74234813", "0.73127735", "0.72611696", "0.7228305", "0.721806", "0.71892637", "0.71163815", "0.70789593", "0.7072042", "0.7043019", "0.7032264", "0.70049846", "0.69722825", "0.6963775", "0.6953116", "0.6939012", "0.69...
0.0
-1
Ensure we can't list users without authentication.
def test_list_users_without_authenticate(self): response = self.client.get(reverse('user-list')) content = {"detail": "Authentication credentials were not provided."} self.assertEqual(json.loads(response.content), content) self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_users_without_permissions(self):\n self.client.force_authenticate(user=self.user)\n\n response = self.client.get(reverse('user-list'))\n\n content = {\n 'detail': 'You do not have permission to perform this action.'\n }\n self.assertEqual(json.loads(respo...
[ "0.7592878", "0.7291725", "0.7268098", "0.72500527", "0.7094657", "0.6990975", "0.6927358", "0.69206643", "0.6902929", "0.68474746", "0.67786574", "0.6738562", "0.67326707", "0.67326707", "0.6700241", "0.664444", "0.6609902", "0.6586935", "0.6504748", "0.6471352", "0.64705676...
0.75815666
1
Ensure we can't list users without permissions.
def test_list_users_without_permissions(self): self.client.force_authenticate(user=self.user) response = self.client.get(reverse('user-list')) content = { 'detail': 'You do not have permission to perform this action.' } self.assertEqual(json.loads(response.content), content) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_admin_user_list_all_users_permission_denied(self):\n self.client.logout()\n self.client.login(\n username=self.invalid_user.username,\n password=self.invalid_user.password\n )\n response = self.client.get(CONSTS.USER_ADMIN_LIST)\n self.assertEqual(r...
[ "0.7638809", "0.69624966", "0.6810429", "0.6799262", "0.67936945", "0.6770797", "0.6763287", "0.6763287", "0.66203946", "0.66203946", "0.6566771", "0.65271413", "0.6525158", "0.64896446", "0.64557964", "0.6414002", "0.63701904", "0.6349665", "0.63055766", "0.62822396", "0.626...
0.7912165
0
Ensure we can send notification for membership end
def test_send_notification_end_membership(self): fixed_time = timezone.now() end_time_membership = fixed_time + relativedelta(days=28) self.user.membership = self.membership self.user.membership_end = end_time_membership self.user.save() with mock.patch( 'store.serializers.timezone.now', return_value=fixed_time ): response = self.client.get( reverse('user-execute-automatic-email-membership-end') ) content = { 'stop': False, 'email_send_count': 1 } self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( json.loads(response.content), content ) self.assertEqual(len(mail.outbox), 1) self.user.refresh_from_db() self.assertEqual(self.user.membership_end_notification, fixed_time) with mock.patch( 'store.serializers.timezone.now', return_value=fixed_time ): response = self.client.get( reverse('user-execute-automatic-email-membership-end') ) content = { 'stop': False, 'email_send_count': 0 } self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( json.loads(response.content), content ) # no new mail self.assertEqual(len(mail.outbox), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_successful_subscriptions_notifies_pm(self) -> None:\n invitee = self.example_user(\"iago\")\n\n current_stream = self.get_streams(invitee)[0]\n invite_streams = self.make_random_stream_names([current_stream])[:1]\n self.common_subscribe_to_streams(\n invitee,\n ...
[ "0.631417", "0.61985207", "0.61863524", "0.6167759", "0.6167759", "0.61630905", "0.6139909", "0.6068332", "0.60387355", "0.60372734", "0.6013381", "0.60123867", "0.6006325", "0.59600574", "0.5957167", "0.5927392", "0.5894092", "0.5890335", "0.5887974", "0.5873129", "0.5872538...
0.7043496
0
Ensure we can resend an activation email on demand
def test_resend_activation_email(self): data = { 'email': self.user.email, } response = self.client.post( reverse('user-resend-activation-email'), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_200_OK, response.content ) self.assertEqual( response.content, b'', ) self.assertEqual(len(mail.outbox), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_resend_activation_email(self):\n user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), send_email=False, **self.user_info)\n self.assertEqual(len(mail.outbox), 0)\n\n profile = self.registration_profile.objects.get(user=user)\n ...
[ "0.76781446", "0.75325173", "0.7526243", "0.73247623", "0.723994", "0.72196436", "0.7116841", "0.69973254", "0.68883735", "0.68695354", "0.68685806", "0.6830412", "0.67408323", "0.66736573", "0.66241586", "0.6598078", "0.65814126", "0.65538317", "0.6545949", "0.6444337", "0.6...
0.7207715
6
Ensure admin can credit tickets to a user
def test_credit_ticket_as_admin(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.admin) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_200_OK, ) self.assertEqual( User.objects.get(pk=user.id).tickets, 1 + nb_tickets_to_add )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credit_ticket_as_user(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.user)\n response = self.client.post(\n ...
[ "0.76057625", "0.7198469", "0.663993", "0.65313613", "0.6304915", "0.6245688", "0.62093043", "0.60432243", "0.5984338", "0.59788305", "0.5956783", "0.5948479", "0.5885243", "0.58366257", "0.5809721", "0.5722764", "0.569822", "0.5689488", "0.5654343", "0.5625085", "0.56015855"...
0.8034523
0
Ensure user can't credit tickets to a user
def test_credit_ticket_as_user(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 5 data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.user) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_403_FORBIDDEN, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credit_ticket_negative_int(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = -5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.p...
[ "0.69946736", "0.6597223", "0.64734346", "0.6470661", "0.63877785", "0.6298837", "0.62688833", "0.62648696", "0.62245417", "0.61735904", "0.6159603", "0.6098433", "0.6039654", "0.60156566", "0.600577", "0.5985433", "0.5973899", "0.5934116", "0.59265006", "0.59215", "0.5908241...
0.6947074
1
Ensure admin can't credit invalid tickets to a user
def test_credit_ticket_not_int(self): user = UserFactory() self.assertEqual(user.tickets, 1) nb_tickets_to_add = 'this is not an int' data = { 'nb_tickets': nb_tickets_to_add, } self.client.force_authenticate(user=self.admin) response = self.client.post( reverse( 'user-credit-tickets', kwargs={'pk': user.id}, ), data, format='json', ) self.assertEqual( response.status_code, status.HTTP_400_BAD_REQUEST, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_credit_ticket_as_admin(self):\n user = UserFactory()\n self.assertEqual(user.tickets, 1)\n nb_tickets_to_add = 5\n data = {\n 'nb_tickets': nb_tickets_to_add,\n }\n\n self.client.force_authenticate(user=self.admin)\n response = self.client.post(\...
[ "0.7239014", "0.6919179", "0.6916417", "0.662821", "0.65123874", "0.6383882", "0.6308077", "0.617782", "0.6163303", "0.608552", "0.59519124", "0.5946032", "0.5904957", "0.58991903", "0.58230376", "0.5820032", "0.58199584", "0.5809911", "0.58059365", "0.576321", "0.5720081", ...
0.6576258
4