query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Generate a simple plot of the test and training learning curve.
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, train_sizes=np.linspace(.1, 1.0, 20), verbose=1, n_jobs=30, ): fig = plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_learning_curve(model, X_train, X_test, y_train, y_test):\n\n m, train_scores, valid_scores = learning_curve(estimator = model, \n X = X_train, y = y_train.ravel(), train_sizes = np.linspace(0.1,1.0, 80))\n\n train_cv_err = np.mean(train_scores, axis=...
[ "0.77526236", "0.765993", "0.7510692", "0.7418097", "0.74108005", "0.7352996", "0.7338834", "0.71422535", "0.70823896", "0.70736265", "0.7060562", "0.704026", "0.7026929", "0.7022064", "0.7012828", "0.6985284", "0.6938504", "0.6930585", "0.6926712", "0.692088", "0.6913427", ...
0.6493104
68
Modified `get_state` thread implementation.
async def get_discussion(context, author:str, permlink:str, observer:str=''): db = context['db'] author = valid_account(author) permlink = valid_permlink(permlink) observer = valid_account(observer, allow_empty=True) sql = "SELECT * FROM bridge_get_discussion(:author,:permlink,:observer)" rows = await db.query_all(sql, author=author, permlink=permlink, observer=observer) if not rows or len(rows) == 0: return {} root_id = rows[0]['id'] all_posts = {} root_post = _bridge_post_object(rows[0]) root_post['active_votes'] = await find_votes_impl(db, rows[0]['author'], rows[0]['permlink'], VotesPresentation.BridgeApi) root_post = append_statistics_to_post(root_post, rows[0], False) root_post['replies'] = [] all_posts[root_id] = root_post parent_to_children_id_map = {} for index in range(1, len(rows)): parent_id = rows[index]['parent_id'] if parent_id not in parent_to_children_id_map: parent_to_children_id_map[parent_id] = [] parent_to_children_id_map[parent_id].append(rows[index]['id']) post = _bridge_post_object(rows[index]) post['active_votes'] = await find_votes_impl(db, rows[index]['author'], rows[index]['permlink'], VotesPresentation.BridgeApi) post = append_statistics_to_post(post, rows[index], False) post['replies'] = [] all_posts[post['post_id']] = post for key in parent_to_children_id_map: children = parent_to_children_id_map[key] post = all_posts[key] for child_id in children: post['replies'].append(_ref(all_posts[child_id])) #result has to be in form of dictionary of dictionaries {post_ref: post} results = {} for key in all_posts: post_ref = _ref(all_posts[key]) results[post_ref] = all_posts[key] return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_state(self):", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n return self.__state", "def _get_state(self):\n r...
[ "0.71893764", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806", "0.71459806"...
0.0
-1
Downloads all files from the SugarSync account to the provided output folder
def download_files(self, output, replace=False): try: # Create output directory # self._output_path = os.path.join(output, # "sugardl_{}".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))) # os.makedirs(self._output_path) # Just write to the provided output directory self._output_path = output ##### # Authenticate: getting a refresh token, then an access token ##### print("Authenticating..") self._get_refresh_token() self._get_access_token() ##### # User Info ##### self._get_user_info() ##### # Get all folder metadata prior to download ##### self._get_sync_folders() ##### # Download: Recursively download all syncfolder contents ##### for folder in self._folder_metadata: print("== SYNC FOLDER DOWNLOAD: {} ==".format(folder['displayName'])) self._download_folder_contents(folder['contents'], "{}/{}".format(self._output_path, folder['displayName']), start_idx=0, replace=replace) print("") except Exception as e: print("Error in download_files: {}".format(traceback.print_exc())) return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(urls, dest_folder):\n pass", "def download_output_files(self):\n bucket_list = self.bucket.list(\"output/part\")\n for bucket_entry in bucket_list:\n key_string = str(bucket_entry.key)\n # check if file exists locally, if not: download it\n if not os.p...
[ "0.6863336", "0.6838265", "0.6813811", "0.6791622", "0.6458129", "0.64210194", "0.63175696", "0.6252998", "0.62363803", "0.621832", "0.6204603", "0.6164557", "0.6153396", "0.61488926", "0.6148461", "0.6134311", "0.6120213", "0.60974497", "0.6057297", "0.5996962", "0.5962937",...
0.74477714
0
Retrieves Refresh Token, a prerequisite for the Access Token. Useful so we dont need the user/pass after this
def _get_refresh_token(self): doc = minidom.Document() root = doc.createElement('appAuthorization') doc.appendChild(root) user = doc.createElement('username') user.appendChild(doc.createTextNode(self.username)) root.appendChild(user) pw = doc.createElement('password') pw.appendChild(doc.createTextNode(self.password)) root.appendChild(pw) application = doc.createElement('application') application.appendChild(doc.createTextNode(self.appId)) root.appendChild(application) aki = doc.createElement('accessKeyId') aki.appendChild(doc.createTextNode(self.publicAccessKey)) root.appendChild(aki) pak = doc.createElement('privateAccessKey') pak.appendChild(doc.createTextNode(self.privateAccessKey)) root.appendChild(pak) data = doc.toprettyxml() resp = requests.post(BASE_URL + "app-authorization", data=data, headers=self._default_headers, verify=False) if resp.status_code >= 300: raise Exception("Failed to authorize app: {}".format(resp)) # Save off the refresh token self._refresh_token = resp.headers.get('Location', None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_c...
[ "0.79821616", "0.77993184", "0.77876174", "0.7653094", "0.7641783", "0.76409805", "0.7515908", "0.7497555", "0.7450978", "0.74201334", "0.74056983", "0.7358515", "0.73581094", "0.7352093", "0.7338572", "0.7290165", "0.72726715", "0.72268915", "0.72166353", "0.72111404", "0.71...
0.7480263
8
Requests Access Token using the Refresh Token. Access Token is required for all future requests
def _get_access_token(self): self._access_token = None if not self._refresh_token: raise ValueError("Refresh Token not set") doc = minidom.Document() root = doc.createElement('tokenAuthRequest') doc.appendChild(root) aki = doc.createElement('accessKeyId') aki.appendChild(doc.createTextNode(self.publicAccessKey)) root.appendChild(aki) pak = doc.createElement('privateAccessKey') pak.appendChild(doc.createTextNode(self.privateAccessKey)) root.appendChild(pak) rt = doc.createElement('refreshToken') rt.appendChild(doc.createTextNode(self._refresh_token)) root.appendChild(rt) data = doc.toprettyxml() resp = requests.post(BASE_URL + "authorization", data=data, headers=self._default_headers, verify=False) if resp.status_code >= 300: raise Exception("Failed to claim access token: {}".format(resp)) vals = etree_to_dict(ET.XML(resp.content.decode('utf-8'))) self._access_token = resp.headers.get('Location', None) if not self._access_token: raise ValueError("Unable to get access token") self._user_id = os.path.basename(vals.get('authorization').get('user')) # Always set the expiry 30 minutes from now so we dont have to deal with parsing timezones # self._access_token_expiry = dateutil_parser.parse(vals.get('authorization').get('expiration')) self._access_token_expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_access_token(self):\n parameters = {'client_id': self.CLIENT_ID,\n 'auth_code': self.auth_code,\n 'client_secret': self.CLIENT_SECRET,\n 'grant_type': 'authorization_code'}\n url = self.ACCESS_TOKEN_URL % parameters\n data = self._get_refresh_...
[ "0.79307115", "0.78929", "0.77902746", "0.77651143", "0.7737486", "0.7703352", "0.7701342", "0.76643175", "0.76561", "0.7591492", "0.75551724", "0.75300753", "0.75291276", "0.7458949", "0.7428596", "0.74130595", "0.7403479", "0.73890567", "0.73884934", "0.73849046", "0.736639...
0.7272183
23
Retrieves user information to include sync folders
def _get_user_info(self): if not self._refresh_token: raise ValueError("Refresh Token not set") # Add access token to the headers add_headers = dict(self._default_headers) add_headers['Authorization'] = self._access_token resp = requests.get(BASE_URL + "user/{}".format(self._user_id), headers=add_headers, verify=False) if resp.status_code >= 300: raise Exception("Failed to retrieve user info: {}".format(resp)) vals = etree_to_dict(ET.XML(resp.content.decode('utf-8'))) # Print generic user info print("") print("== USER INFO ==") print("Username: {}".format(vals.get('user').get('username'))) print("Nickname: {}".format(vals.get('user').get('nickname'))) print("Usage: {} MB / {} MB".format(int(int(vals.get('user').get('quota').get('usage')) / (1024*1024)), int(int(vals.get('user').get('quota').get('limit')) / (1024*1024)))) print("") # Grab folder ids we care about self._user_sync_folders_url = vals.get('user').get('syncfolders')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_info(self) -> str:\n return self._searcher.get_user_info()", "def get_users_info(): \n \n data = user_obj.get_users_info()\n return data", "def user_info(self):\n response = self.query('user_info')\n return response", "def getUserInfo(self, user):\n return pwd.ge...
[ "0.69573396", "0.68997866", "0.6759884", "0.6710544", "0.6675518", "0.661304", "0.6520475", "0.6491115", "0.6431651", "0.63984233", "0.6313943", "0.63038987", "0.6303114", "0.63017505", "0.6268096", "0.62518907", "0.62275803", "0.61987466", "0.61969614", "0.6171311", "0.61620...
0.7239684
0
Retrieves metadata on all sync folders
def _get_sync_folders(self): if not self._user_sync_folders_url: raise ValueError("User sync folders URL not retrieved") if not self._refresh_token: raise ValueError("Refresh Token not set") # Add access token to the headers add_headers = dict(self._default_headers) add_headers['Authorization'] = self._access_token resp = requests.get(self._user_sync_folders_url, headers=add_headers, verify=False) if resp.status_code >= 300: raise Exception("Failed to claim access token: {}".format(resp)) vals = etree_to_dict(ET.XML(resp.content.decode('utf-8'))) # Print and store relevant sync folder information print("== SYNC FOLDERS ==") for folder in vals.get('collectionContents').get('collection'): print("Folder: {}".format(folder.get('displayName'))) self._folder_metadata.append(folder) print("")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_root_metadata(self):\n r = self._do_request(\n 'get',\n http_server_utils.join_url_components(\n [self._api_drive_endpoint_prefix, 'root']),\n params={'select': 'id,name,fileSystemInfo'})\n return r.json()", "def syncfolder():", "def getFol...
[ "0.648162", "0.60808307", "0.59680235", "0.58834165", "0.58828735", "0.58178836", "0.58129156", "0.57245374", "0.5677313", "0.5626248", "0.5588174", "0.5587416", "0.55705136", "0.55277115", "0.55202806", "0.5497931", "0.54850954", "0.54689896", "0.5424635", "0.5419897", "0.54...
0.7083151
0
Recursively downloads all file content within a folder
def _download_folder_contents(self, folder_uri, relpath, start_idx=0, replace=False): if not self._access_token: raise ValueError("Access Token not set") # Create the folder in the output dir if it doesnt exist if not os.path.exists(relpath): print("Creating folder: {}".format(relpath)) os.makedirs(relpath) # Add access token to the headers add_headers = dict(self._default_headers) add_headers['Authorization'] = self._access_token resp = requests.get(folder_uri, headers=add_headers, params={'start': start_idx}, verify=False) if resp.status_code >= 300: raise Exception("Failed to retrieve folder info: {}".format(resp)) vals = etree_to_dict(ET.XML(resp.content.decode('utf-8'))) # Download all top level files ret_files = vals.get('collectionContents', {}).get('file', list()) if isinstance(ret_files, dict): # Case when there is only a single file files = [] files.append(ret_files) else: files = ret_files for f in files: try: filepath = "{}/{}".format(relpath, f['displayName']) if not replace and os.path.exists(filepath): print("File already exists, skipping {}".format(filepath)) continue print("Downloading: {}".format(filepath)) self._download_file_contents(f, filepath) except: print("Error downloading {}: {}".format(f, traceback.print_exc())) # If there are more than 500 items, recursively call on this same folder but start at start_idx+500 item_count = len(vals.get('collectionContents', {}).get('collection', list())) + len( vals.get('collectionContents', {}).get('file', list())) if item_count >= 500: self._download_folder_contents(folder_uri, relpath, start_idx=start_idx+500, replace=replace) # Download all folders subfolders_ret = vals.get('collectionContents', {}).get('collection', list()) if isinstance(subfolders_ret, dict): # Case when there is only one subfolder subfolders = [] subfolders.append(subfolders_ret) else: subfolders = subfolders_ret for subfolder in subfolders: try: self._download_folder_contents(subfolder['contents'], relpath + "/" + subfolder['displayName'], replace=replace) except: print("Error downloading subfolder: {}".format(traceback.print_exc()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_all_files(self, root_url, version):\n file_list = self._http_client.get(root_url + '?ref=refs/tags/' + version)\n for file in file_list.json():\n if file['type'] == 'file':\n download_url = file['download_url']\n download_path = self.get_module_an...
[ "0.72201246", "0.7049657", "0.6965782", "0.6872731", "0.6746397", "0.6685815", "0.6519834", "0.64734966", "0.6453111", "0.64178264", "0.6356167", "0.63462275", "0.6333368", "0.62987703", "0.6254699", "0.62543726", "0.62343174", "0.6186609", "0.61842734", "0.61728805", "0.6165...
0.6973404
2
If we're unable to establish a connection to the Elasticsearch server, CannotLoadConfiguration (which the circulation manager can understand) is raised instead of an Elasticsearchspecific exception.
def test_elasticsearch_error_in_constructor_becomes_cannotloadconfiguration(self): # Unlike other tests in this module, this one runs even if no # ElasticSearch server is running, since it's testing what # happens if there's a problem communicating with that server. class Mock(ExternalSearchIndex): def set_works_index_and_alias(self, _db): raise ElasticsearchException("very bad") with pytest.raises(CannotLoadConfiguration) as excinfo: Mock(self._db) assert "Exception communicating with Elasticsearch server: " in str(excinfo.value) assert "very bad" in str(excinfo.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def check_connection(self, hass: HomeAssistantType):\n from elasticsearch import (\n AuthenticationException,\n AuthorizationException,\n ConnectionError,\n ElasticsearchException,\n SSLError,\n )\n\n client = None\n is_suppor...
[ "0.6263491", "0.62350416", "0.6013181", "0.5960673", "0.59529793", "0.5821141", "0.5678517", "0.5538262", "0.5535928", "0.5493257", "0.54761046", "0.5462948", "0.53717524", "0.5338984", "0.5287106", "0.52525455", "0.5251487", "0.5240034", "0.5202334", "0.5163459", "0.51596135...
0.699837
0
The name of the search index is the prefix (defined in ExternalSearchTest.setup) plus a version number associated with this version of the core code.
def test_works_index_name(self): assert "test_index-v4" == self.search.works_index_name(self._db)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_index_name(self):\r\n return 'index_{}'.format(self.db_field_name)", "def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")", "def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")", "def build_index():\n pas...
[ "0.58981884", "0.5859757", "0.5859757", "0.5726856", "0.56515306", "0.5513793", "0.5505178", "0.54348093", "0.53886336", "0.5384905", "0.5370095", "0.5368973", "0.53638005", "0.5342818", "0.5323321", "0.53231466", "0.53118646", "0.53068525", "0.53068525", "0.52520263", "0.521...
0.6855528
0
When all the filters are applied to `start`, the result is `finish`.
def filters_to(start, finish): for find, replace in filters: start = find.sub(replace, start) assert start == finish
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_all(self) -> None:\r\n for a_filter in self.filters[::-1]:\r\n a_filter.after_all()", "def analyze(self, start, end):\n return", "def FilterDone(self, last_bits):\n return last_bits", "def __call__(self, start):\r\n return self._iterate(start)", "def catch_up(se...
[ "0.60245126", "0.58253586", "0.5557765", "0.546555", "0.5420422", "0.5409369", "0.53233445", "0.5279904", "0.5211791", "0.51451194", "0.51429945", "0.5127559", "0.50891775", "0.50891775", "0.50891775", "0.50891775", "0.5051931", "0.5035896", "0.49861154", "0.4938378", "0.4924...
0.74531156
0
Iterate over a WorkList until it ends, and return all of the pages.
def pages(worklist): pagination = SortKeyPagination(size=2) facets = Facets( self._default_library, None, None, order=Facets.ORDER_TITLE ) pages = [] while pagination: pages.append(worklist.works( self._db, facets, pagination, self.search )) pagination = pagination.next_page # The last page should always be empty -- that's how we # knew we'd reached the end. assert [] == pages[-1] # Return all the other pages for verification. return pages[:-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pages(self, url_list):\n page_helper = self.get_page\n pool = ThreadPool(self.max_threads)\n results = pool.map(page_helper, url_list)\n pool.close()\n pool.join()\n return results", "def pages(self):\n # The page list comes in three sections. Given radiu...
[ "0.6512649", "0.6426794", "0.6418355", "0.63479626", "0.62923247", "0.6280523", "0.6178995", "0.6132314", "0.61132336", "0.6108371", "0.6048124", "0.6044336", "0.60390985", "0.6030068", "0.5951634", "0.59403145", "0.59379506", "0.58951074", "0.5890618", "0.5873708", "0.587297...
0.7885278
0
Verify that when the books created during test setup are ordered by the given `sort_field`, they show up in the given `order`. Also verify that when the search is ordered descending, the same books show up in the opposite order. This proves that `sort_field` isn't being ignored creating a test that only succeeds by chance.
def assert_order(sort_field, order, **filter_kwargs): expect = self._expect_results facets = Facets( self._default_library, Facets.COLLECTION_FULL, Facets.AVAILABLE_ALL, order=sort_field, order_ascending=True ) expect(order, None, Filter(facets=facets, **filter_kwargs)) facets.order_ascending = False expect(list(reversed(order)), None, Filter(facets=facets, **filter_kwargs)) # Get each item in the list as a separate page. This # proves that pagination works for this sort order for # both Pagination and SortKeyPagination. facets.order_ascending = True for pagination_class in ( Pagination, SortKeyPagination ): pagination = pagination_class(size=1) to_process = list(order) + [[]] while to_process: filter = Filter(facets=facets, **filter_kwargs) expect_result = to_process.pop(0) expect(expect_result, None, filter, pagination=pagination) pagination = pagination.next_page # We are now off the edge of the list -- we got an # empty page of results and there is no next page. assert None == pagination # Now try the same tests but in reverse order. facets.order_ascending = False for pagination_class in ( Pagination, SortKeyPagination ): pagination = pagination_class(size=1) to_process = list(reversed(order)) + [[]] results = [] pagination = SortKeyPagination(size=1) while to_process: filter = Filter(facets=facets, **filter_kwargs) expect_result = to_process.pop(0) expect(expect_result, None, filter, pagination=pagination) pagination = pagination.next_page # We are now off the edge of the list -- we got an # empty page of results and there is no next page. assert None == pagination
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".for...
[ "0.69119376", "0.62695354", "0.59014153", "0.5880185", "0.5848647", "0.5769646", "0.5743111", "0.5740987", "0.56924033", "0.56718487", "0.56502676", "0.5648147", "0.5643026", "0.56352484", "0.56259537", "0.55134785", "0.55029243", "0.5502388", "0.5476563", "0.54709595", "0.54...
0.7052679
0
Simulate the application of a nested filter.
def filter(self, **kwargs): new_filters = self.nested_filter_calls + [kwargs] return MockSearch( self, self._query, new_filters, self.order, self._script_fields )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mock_chain(self, filters, new_filter):\n if filters is None:\n # There are no active filters.\n filters = []\n if isinstance(filters, elasticsearch_dsl_query):\n # An initial filter was passed in. Convert it to a list.\n filters = [filters]\n fi...
[ "0.6055056", "0.5969588", "0.5811234", "0.5677359", "0.5674291", "0.56733555", "0.56623197", "0.56011295", "0.5598666", "0.5580886", "0.55451864", "0.55215704", "0.55069554", "0.54262555", "0.54063", "0.5369425", "0.5340776", "0.53186697", "0.5311007", "0.5300697", "0.5297867...
0.52616525
25
Simulate the creation of an ElasticsearchDSL `Search` object from an ElasticsearchDSL `Query` object.
def query(self, query): return MockSearch( self, query, self.nested_filter_calls, self.order, self._script_fields )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _search(self, query):\n return self._request(query)", "def search_query(\n self,\n index, # type: str\n query, # type: SearchQuery\n *options, # type: SearchOptions\n **kwargs\n ) -> SearchResult:\n\n query = SearchQueryBuilder.create_search_query_object...
[ "0.7029542", "0.6688671", "0.6600027", "0.6517039", "0.6485709", "0.64265573", "0.64017105", "0.6352035", "0.6290121", "0.61653656", "0.614457", "0.61306244", "0.60751146", "0.6055303", "0.60288244", "0.6017776", "0.6013076", "0.59807044", "0.59282154", "0.591098", "0.5907803...
0.75526977
0
Simulate the application of a sort order.
def sort(self, *order_fields): return MockSearch( self, self._query, self.nested_filter_calls, order_fields, self._script_fields )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_sorting(sort=selection_sort, num_items=20, max_value=50):\n # TODO: Repeat until all items are in sorted order\n # TODO: Take first unsorted item\n # TODO: Insert it in sorted order in front of items", "def test_benchmark_sorted(benchmark, benchmark_items_fixture):\n do_benchmark(benchmark_i...
[ "0.6867885", "0.64630604", "0.6454256", "0.63690174", "0.632256", "0.6318335", "0.62491775", "0.62284666", "0.62284666", "0.6171056", "0.616647", "0.61227566", "0.61051816", "0.6099615", "0.6089664", "0.60870415", "0.6045168", "0.6039011", "0.6034501", "0.5993905", "0.5981229...
0.0
-1
Simulate the addition of script fields.
def script_fields(self, **kwargs): return MockSearch( self, self._query, self.nested_filter_calls, self.order, kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, script, inputs, outputs):", "def _add_fields(self, fields):\n for field in fields:\n self.add(field)", "def script(self):", "def add_inputs(self, inputs):\n self.inputs += inputs", "def add_new_item_field(*fields, **keywords):\n\n for field in fields:\n prin...
[ "0.692667", "0.56195414", "0.5602282", "0.5565711", "0.554856", "0.5542783", "0.5513092", "0.54535246", "0.5447269", "0.5425964", "0.54044926", "0.5340656", "0.5258546", "0.52512413", "0.51859945", "0.51825964", "0.51686376", "0.51534796", "0.5128652", "0.5083519", "0.506479"...
0.0
-1
Verify that both universal methods were called and that the return values were incorporated into the query being built by `search`. This method modifies the `search` object in place so that the rest of a test can ignore all the universal stuff.
def validate_universal_calls(cls): assert True == cls.universal_called assert True == cls.nested_called # Reset for next time. cls.base_called = None cls.nested_called = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_perform_search_old(self):\n es_instance_info = {'cluster_name': 'elasticsearch', 'cluster_uuid': 'kPjOcrpMQaWWm4neFdzLrw', 'name': 'f492663fbfa2', 'tagline': 'You Know, for Search', 'version': {'build_date': '2019-04-05T22:55:32.697037Z', 'build_flavor': 'oss', 'build_hash': 'b7e28a7', 'build_snaps...
[ "0.6531283", "0.64964837", "0.63405144", "0.6176755", "0.6176755", "0.6176755", "0.608852", "0.5953733", "0.59461486", "0.5940335", "0.57574964", "0.571873", "0.5685846", "0.5673872", "0.5671505", "0.5655952", "0.565061", "0.56470966", "0.56348187", "0.5606218", "0.55955267",...
0.0
-1
Build a Query object from a set of facets, then call build() on it.
def from_facets(*args, **kwargs): facets = Facets(self._default_library, *args, **kwargs) filter = Filter(facets=facets) qu = MockQuery("query string", filter=filter) built = qu.build(search) # Return the rest to be verified in a test-specific way. return built
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n qu...
[ "0.6496745", "0.6315449", "0.6258068", "0.60086054", "0.5911915", "0.57755864", "0.573584", "0.5670942", "0.56401163", "0.5602675", "0.5568983", "0.5543967", "0.553353", "0.54699177", "0.5458636", "0.54096997", "0.5407078", "0.5323815", "0.53104126", "0.52941847", "0.52759683...
0.7220995
0
Helper method for the most common case, where a Filter.build() returns a main filter and no nested filters.
def assert_filter_builds_to(self, expect, filter, _chain_filters=None): final_query = {'bool': {'must_not': [RESEARCH.to_dict()]}} if expect: final_query['bool']['must'] = expect main, nested = filter.build(_chain_filters) assert final_query == main.to_dict() return main, nested
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_filter_chain(self):\n result = None\n for klass in self.filters:\n tmp = klass(self, self.args, result)\n logging.info(\"%s %s\", klass, tmp.active)\n if tmp.active:\n result = tmp\n return result or (lambda x: x)", "def _build(self,...
[ "0.71433246", "0.6450868", "0.6068573", "0.59432274", "0.5906227", "0.5796689", "0.5791745", "0.5788301", "0.5783497", "0.5759372", "0.573049", "0.5677852", "0.5637443", "0.5626888", "0.5609167", "0.55952406", "0.5576881", "0.5536685", "0.5529956", "0.5527361", "0.55241287", ...
0.59127074
4
Validate the 'easy' part of the sort order the tiebreaker fields. Return the 'difficult' part.
def validate_sort_order(filter, main_field): # The tiebreaker fields are always in the same order, but # if the main sort field is one of the tiebreaker fields, # it's removed from the list -- there's no need to sort on # that field a second time. default_sort_fields = [ {x: "asc"} for x in ['sort_author', 'sort_title', 'work_id'] if x != main_field ] assert default_sort_fields == filter.sort_order[1:] return filter.sort_order[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_admin_sort_by(sort_on):\n try:\n sort_attributes = ['title', 'md_pub_date', 'summary']\n if sort_on in sort_attributes:\n return sort_on\n else:\n return 'title'\n except Exception as e:\n print \"Exception: \" + str(e)", "def sorting_by_criter...
[ "0.5422345", "0.51779574", "0.51326853", "0.51216465", "0.5098696", "0.49896", "0.498799", "0.49557397", "0.49503902", "0.49242207", "0.48612294", "0.48536846", "0.48222044", "0.48191088", "0.48098657", "0.4788777", "0.47594061", "0.47372675", "0.4728122", "0.47202504", "0.47...
0.6764114
0
Verify that `filter` is a boolean filter that matches one of a number of possibilities. Return those possibilities.
def dichotomy(filter): assert "bool" == filter.name assert 1 == filter.minimum_should_match return filter.should
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_filter_mixed_function(self):\n for none_type in (False, True):\n for all_type in (False, True):\n for any_type in (False, True, None):\n result = none_type is False and all_type is True \\\n and (any_type is None or any_type is Tru...
[ "0.6741921", "0.6474875", "0.6376854", "0.6179683", "0.6159572", "0.59450597", "0.5884499", "0.5780944", "0.5735032", "0.5734875", "0.5713773", "0.5666095", "0.56029093", "0.5585088", "0.55647486", "0.55589753", "0.55378103", "0.55280507", "0.55214506", "0.5489386", "0.548355...
0.67647254
0
Verify that a filter only matches when there is no value for the given field.
def assert_matches_nonexistent_field(f, field): assert ( f.to_dict() == {'bool': {'must_not': [{'exists': {'field': field}}]}})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_filterval(filterval):\n if filterval != 'description' and filterval != 'fulldescription' and filterval != 'completed':\n return False\n else:\n return True", "def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n ...
[ "0.6997897", "0.65928715", "0.65643317", "0.6421701", "0.64098537", "0.6297924", "0.6291485", "0.62772375", "0.6188193", "0.6145177", "0.6119406", "0.6065322", "0.60579246", "0.6054443", "0.60046804", "0.5999436", "0.59982795", "0.5972854", "0.59515387", "0.59507996", "0.5950...
0.7020142
0
A mock of _chain_filters so we don't have to check test results against supercomplicated Elasticsearch filter objects. Instead, we'll get a list of smaller filter objects.
def _mock_chain(self, filters, new_filter): if filters is None: # There are no active filters. filters = [] if isinstance(filters, elasticsearch_dsl_query): # An initial filter was passed in. Convert it to a list. filters = [filters] filters.append(new_filter) return filters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_apply_filter(mocker):\n list_of_filter_dict_keys = [\n 'EqualTo',\n 'Contains',\n 'ContainsAll',\n 'ContainsAny',\n 'ContainsIgnoreCase',\n 'DoesNotContain',\n 'GreaterThan',\n 'GreaterThanOrEqualTo',\n 'DoesNotContainIgnoreCase',\n ...
[ "0.65804183", "0.6574269", "0.6492736", "0.63977313", "0.6358299", "0.631415", "0.6254892", "0.6224293", "0.620702", "0.6090167", "0.6076573", "0.6039105", "0.59843934", "0.5979323", "0.59743553", "0.5895422", "0.58411616", "0.5840082", "0.57979", "0.5793634", "0.5764596", ...
0.8190246
0
Clears the model directory and only maintains the latest `checkpoints` number of checkpoints.
def clear_model_dir(self, checkpoints, logger): files = os.listdir(self.model_dir) last_modification = [(os.path.getmtime(os.path.join(self.model_dir, f)), f) for f in files] # Sort the list by last modified. last_modification.sort(key=itemgetter(0)) # Delete everything but the last 10 files. ckpnt_no = 0 for time, f in last_modification[:-checkpoints]: ckpnt_no += 1 os.remove(os.path.join(self.model_dir, f)) msg = "Deleted %d checkpoints" % (ckpnt_no) logger.debug(msg) print(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_checkpoints(self):\n if tf.gfile.Exists(str(self.info.checkpoint_path)):\n tf.gfile.DeleteRecursively(str(self.info.checkpoint_path))", "def clear_model_checkpoints(self):\n if self.file_prefix is None:\n return\n\n with os.scandir() as path_list:\n ...
[ "0.7887548", "0.78405684", "0.70502526", "0.7037212", "0.69053566", "0.6885986", "0.64904153", "0.6437186", "0.64033055", "0.6368597", "0.6348156", "0.63442576", "0.6341169", "0.6286433", "0.6260607", "0.62325686", "0.6117156", "0.6105783", "0.60879576", "0.6058965", "0.60501...
0.8535333
0
Create a new instance of the factory.
def __init__(self): super().__init__() self.register_as_type(DefaultLoggerFactory.NullLoggerDescriptor, NullLogger) self.register_as_type(DefaultLoggerFactory.ConsoleLoggerDescriptor, ConsoleLogger) self.register_as_type(DefaultLoggerFactory.CompositeLoggerDescriptor, CompositeLogger)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createInstance (self, factory, **kw):\n kw.update(self.__kw)\n return factory(*self.__args, **kw)", "def create(cls):\n pass\n return cls()", "def create_factory(cls, *args):\n raise NotImplementedError", "def create(cls, _):\n return cls", "def factory(self):\...
[ "0.8068922", "0.7944279", "0.7823079", "0.76350766", "0.7491401", "0.7373942", "0.72846454", "0.72057045", "0.7116696", "0.7116696", "0.70897645", "0.70344335", "0.7027849", "0.7027849", "0.70022064", "0.70022064", "0.6944803", "0.6784452", "0.6782056", "0.674577", "0.6729694...
0.0
-1
Rebuilds the surfaces based on the original positions and alpha value. This can be used to reset the states of buttons after returning to a Menu a second time.
def reset(self): self.x = self.x_original self.alpha = self.alpha_original # Button "background" - active self.active_background_surface.set_alpha(self.alpha) # Button "background" - inactive self.inactive_background_surface.set_alpha(self.alpha) # active self.active_text_surface = self.active_font.render(self.text, True, self.color_text) self.active_textRect = self.active_text_surface.get_rect() # inactive self.inactive_text_surface = self.inactive_font.render(self.text, True, self.color_text) self.inactive_textRect = self.inactive_text_surface.get_rect() if self.text_alignment == 'CENTER': self.active_textRect.center = ((self.x + (self.rect.w / 2)), (self.y + (self.rect.h / 2))) self.inactive_textRect.center = ((self.x + (self.rect.w / 2)), (self.y + (self.rect.h / 2))) elif self.text_alignment == 'RIGHT': self.active_textRect.centery = self.y + (self.rect.h / 2) self.active_textRect.right = self.x + self.w - 15 # padding of 15 self.inactive_textRect.centery = self.y + (self.rect.h / 2) self.inactive_textRect.right = self.x + self.w - 15 # padding of 15 else: # LEFT (or invalid) self.active_textRect.centery = self.y + (self.rect.h / 2) self.active_textRect.left = self.x + 15 # padding of 15 self.inactive_textRect.centery = self.y + (self.rect.h / 2) self.inactive_textRect.left = self.x + 15 # padding of 15
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _obje...
[ "0.6098649", "0.5991369", "0.5905092", "0.5815281", "0.5792299", "0.57288677", "0.5722896", "0.5714045", "0.5713487", "0.5713487", "0.57038414", "0.5668134", "0.56377494", "0.5615933", "0.5568823", "0.55665016", "0.5559408", "0.55072635", "0.54790586", "0.54756534", "0.547195...
0.6623888
0
Checks which rendering is needed and renders the button onto the screen surface. Checks whether the button was clicked and if so, returns the text of the button.
def render(self, mouse, events, fading=False): clicked = True if pygame.MOUSEBUTTONDOWN in [e.type for e in events] else False if fading: self.render_fading() elif self.mouse_on_button(mouse): self.render_active() if clicked: # mouse on button + click return self.text else: self.render_inactive() return None # readability - return explicitly None if button was not clicked
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_button(self):\n # Draw the button's outline\n pg.draw.rect(self.screen, self.text_color, pg.Rect(self.rect.left - 1, self.rect.top - 1, self.rect.width + 2, self.rect.height + 2))\n\n # Draw the button\n pg.draw.rect(self.screen, self.button_color, self.rect)\n\n # Blit ...
[ "0.72366494", "0.7209145", "0.6849019", "0.6800909", "0.6409076", "0.6328239", "0.6212745", "0.6210769", "0.61994016", "0.61717445", "0.61338997", "0.61237854", "0.6081018", "0.6080486", "0.6069053", "0.60556203", "0.6053025", "0.6003425", "0.600016", "0.5922334", "0.5922334"...
0.63266563
6
Rendering the inactive button onto the screen surface.
def render_inactive(self): # Rendering button "background" self.screen.blit(self.inactive_background_surface, (self.x, self.y)) # Rendering button text self.screen.blit(self.active_text_surface, self.active_textRect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_active(self):\n # Rendering button \"background\"\n if self.resize_right:\n self.active_background_surface = pygame.Surface((self.w * 1.05, self.h))\n else:\n self.active_background_surface = pygame.Surface((self.w, self.h))\n self.active_background_surf...
[ "0.824468", "0.727387", "0.7242291", "0.69929683", "0.69051856", "0.6741595", "0.6675783", "0.6625641", "0.660019", "0.6552847", "0.65396786", "0.6446757", "0.6421722", "0.64049184", "0.63985157", "0.6314176", "0.6296651", "0.6284545", "0.62568057", "0.6159919", "0.6135134", ...
0.90140796
0
Rendering the active button onto the screen surface.
def render_active(self): # Rendering button "background" if self.resize_right: self.active_background_surface = pygame.Surface((self.w * 1.05, self.h)) else: self.active_background_surface = pygame.Surface((self.w, self.h)) self.active_background_surface.set_alpha(self.alpha) self.active_background_surface.fill(self.color_bg_active) self.screen.blit(self.active_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates # Rendering button text self.screen.blit(self.inactive_text_surface, self.inactive_textRect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_inactive(self):\n # Rendering button \"background\"\n self.screen.blit(self.inactive_background_surface, (self.x, self.y))\n # Rendering button text\n self.screen.blit(self.active_text_surface, self.active_textRect)", "def draw_button(self):\r\n self.surface.fill(sel...
[ "0.8091009", "0.73016804", "0.7288868", "0.72423387", "0.7085554", "0.6943015", "0.6838501", "0.6808473", "0.67748725", "0.6729412", "0.664229", "0.6615208", "0.6613954", "0.65206283", "0.6385157", "0.63824177", "0.63584137", "0.63529414", "0.62700206", "0.6234393", "0.622167...
0.8646287
0
Renders the animation when a button was clicked
def render_fading(self): self.alpha = self.alpha - self.fading_steps self.inactive_background_surface.set_alpha(self.alpha) if self.sliding_disappearance: self.x -= self.sliding_steps self.active_textRect.x -= self.sliding_steps # Rendering button "background" self.screen.blit(self.inactive_background_surface, (self.x, self.y)) # (0,0) are the top-left coordinates if self.alpha > self.alpha_border: # Render button text until its alpha value is reduced by x self.screen.blit(self.active_text_surface, self.active_textRect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_animation(self):\n self.animation = True\n self.fig = plt.figure()\n anim_running = True\n\n def onClick(event):\n nonlocal anim_running\n if anim_running:\n anim.event_source.stop()\n anim_running = False\n else:\n ...
[ "0.688087", "0.6423931", "0.6393294", "0.62338537", "0.6215481", "0.6131152", "0.61150575", "0.6104077", "0.6104077", "0.60195327", "0.5972781", "0.59392756", "0.5903568", "0.5837927", "0.5831422", "0.57819444", "0.57465935", "0.5713443", "0.56247807", "0.56239694", "0.562179...
0.5403067
61
Checks whether the mouse is on the button and returns a boolean.
def mouse_on_button(self, mouse) -> bool: return self.x + self.w > mouse[0] > self.x and self.y + self.h > mouse[1] > self.y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __check_if_got_pressed(self):\n mouse_x_pos,mouse_y_pos = pg.mouse.get_pos()\n\n if utilitiez.on_object(self.rect.x, self.rect.y, self.rect.width, self.rect.height, mouse_x_pos, mouse_y_pos,\n MOUSE_WIDTH, MOUSE_HEIGHT):\n self.__on_click()", "def isButt...
[ "0.76346713", "0.75949246", "0.75622696", "0.74238867", "0.73842466", "0.73723227", "0.73164", "0.72117794", "0.71592665", "0.7093802", "0.70712423", "0.70122606", "0.68814075", "0.68803525", "0.685196", "0.68248737", "0.682373", "0.6745367", "0.6737135", "0.6720025", "0.6718...
0.8396938
0
Test that a correct description passes the check and that a dot is added.
def test_description(self): self.assertEqual( "Description.", DescribedModel.parse_obj({"name": "Name", "description": "Description"}).description, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at leas...
[ "0.7467771", "0.7183706", "0.7170313", "0.69735307", "0.69044524", "0.67804605", "0.66104364", "0.65192705", "0.64968574", "0.6449131", "0.6411322", "0.6408082", "0.63846517", "0.63669723", "0.63474953", "0.6342997", "0.6311893", "0.63043153", "0.6295491", "0.6244171", "0.623...
0.71865505
1
Test that a description with punctuation passes the check.
def test_description_with_punctuation(self): self.assertEqual( "Description?", DescribedModel.parse_obj({"name": "Name", "description": "Description?"}).description, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_description(question):\n assert \"description\" in question[\"instance\"]\n description = question[\"instance\"][\"description\"]\n # there shouldn't be whitespace at the beginning or end\n assert description.strip() == description\n words = description.split()\n # we should have at leas...
[ "0.7259576", "0.70288295", "0.67982197", "0.6772335", "0.66292626", "0.6555726", "0.65338016", "0.65078914", "0.6434341", "0.6406588", "0.6294614", "0.620535", "0.61716986", "0.61261255", "0.6119967", "0.6109077", "0.6107635", "0.61050224", "0.60843354", "0.6074227", "0.60651...
0.794725
0
Test that the description is mandatory.
def test_missing_description(self): self.check_validation_error("description\n field required", name="Name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")", "def testDescription(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"description\")\n\n ...
[ "0.8143264", "0.7655286", "0.7621963", "0.7548257", "0.7531445", "0.74477756", "0.7441477", "0.7300264", "0.72981095", "0.7288913", "0.7284006", "0.7265562", "0.7217067", "0.7123357", "0.7076178", "0.7076178", "0.7076178", "0.7076178", "0.70645094", "0.7035895", "0.7025577", ...
0.8563048
0
Test that the description has a nonzero length.
def test_empty_description(self): self.check_validation_error('description\n string does not match regex ".+"', name="Name", description="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_no_description(self):\n context = TestContext(session_context=ducktape_mock.session_context(),\n cls=DummyTestNoDescription, function=DummyTestNoDescription.test_this)\n assert context.description == \"\"", "def test_missing_description(self):\n self.ch...
[ "0.6839186", "0.6832089", "0.6650879", "0.66502285", "0.6583756", "0.65266544", "0.6453843", "0.6399938", "0.63959414", "0.63910407", "0.63910407", "0.63910407", "0.63910407", "0.63842446", "0.63687706", "0.636613", "0.63494056", "0.6325557", "0.6310538", "0.63036925", "0.630...
0.7342262
0
Extend to setup the model.
def setUp(self): super().setUp() described_model_kwargs = {"name": "Name", "description": "Description"} self.mapped_model = MappedModel[DescribedModel].parse_obj({"described_model_type": described_model_kwargs}) self.expected_described_model = DescribedModel(**described_model_kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_models(self):\n pass", "def init_model(self):\n pass", "def initialize_model(self):\n pass", "def setUpClass(self):\n\n base_model = BaseModel()", "def prepare_model(self, **kwargs):\n pass", "def _setupModel(self, parameters):\r\n ModelFitterCore.setup...
[ "0.79114515", "0.77609944", "0.76080114", "0.75181067", "0.74347234", "0.7401676", "0.72847205", "0.7225797", "0.72077334", "0.7136066", "0.7136066", "0.7136066", "0.7136066", "0.7112577", "0.7076257", "0.70315", "0.70315", "0.70315", "0.70315", "0.6996519", "0.69867325", "...
0.0
-1
Test that values can be retrieved by key using __getitem__().
def test_get_item(self): self.assertEqual(self.expected_described_model, self.mapped_model["described_model_type"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, key):\n pass", "def __getitem__(self, key):", "def __getitem__(self, key):\n raise NotImplementedError()", "def __getitem__(self, key):\n for entry_key, value in self.read(key):\n if entry_key != key:\n raise KeyError(key)\n return value\n raise KeyE...
[ "0.75055385", "0.74885994", "0.73212683", "0.7278315", "0.7257539", "0.7129784", "0.70715296", "0.70595926", "0.70080745", "0.70076877", "0.69866014", "0.69863576", "0.6985903", "0.6985903", "0.6985903", "0.6981361", "0.69438547", "0.69308174", "0.6926797", "0.6906123", "0.68...
0.0
-1
Test that values can be retrieved by key using get().
def test_get(self): self.assertEqual(self.expected_described_model, self.mapped_model.get("described_model_type"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1,2,'three'],\n '4': {1:'one', 2:'two'}}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key])\n\n ...
[ "0.7516749", "0.69722515", "0.69722515", "0.6736516", "0.67187446", "0.67044663", "0.6631371", "0.6624731", "0.6615741", "0.65834504", "0.6528521", "0.64331293", "0.6407079", "0.6395825", "0.6355946", "0.6348174", "0.6346767", "0.62921834", "0.6275098", "0.6267472", "0.625161...
0.0
-1
Test that the items can be retrieved.
def test_items(self): self.assertEqual([("described_model_type", self.expected_described_model)], list(self.mapped_model.items()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_item_list(self):\n resp = self.app.get('/items')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(len(data), 3)", "def test_get_items_page(self, mock_requests_get):\n result = resources.get_items_page(1, \"a...
[ "0.74696356", "0.7277848", "0.7122716", "0.68902785", "0.68500966", "0.6840849", "0.6809992", "0.6799842", "0.6778106", "0.6757121", "0.67133766", "0.6710793", "0.6653335", "0.6622517", "0.6612151", "0.66074485", "0.66016537", "0.65803057", "0.6577676", "0.6576313", "0.654119...
0.62851614
47
Test that the values can be retrieved.
def test_values(self): self.assertEqual([self.expected_described_model], list(self.mapped_model.values()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_many_values(self):\n write this test!", "def test_getknowndata(self):\n result = recordparser.getfields(self.rawdata, self.fieldmap,\n self.sourcekeys)\n self.assertEqual(self.knownvalues, result)", "def test_properties_get(self):\n pass", "def test_getitem...
[ "0.72376066", "0.66556334", "0.66527504", "0.6525868", "0.65233016", "0.65196735", "0.64961874", "0.64638084", "0.64451057", "0.642613", "0.6415883", "0.64122844", "0.64025444", "0.64025444", "0.6368156", "0.63579667", "0.6348108", "0.63410705", "0.63293576", "0.6287174", "0....
0.6480253
7
Test that the format method returns the value.
def test_format(self): class FooEnum(StrEnum): """Concrete string enum.""" FOO = "foo" self.assertEqual("foo", f"{FooEnum.FOO}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_formatResult(self):\r\n x = self.FWP({'x': 3})\r\n self.assertEqual(x.formatResult(3), '3')", "def testFormatLabelAndValue(self):\n\n self.assertEqual('Abc: xyz', self.inv._FormatLabelAndValue('abc', 'xyz', 1))\n self.assertEqual('ABc: xyz', self.inv._FormatLabelAndValue('abc', 'xyz'...
[ "0.7601972", "0.6776671", "0.64708084", "0.64118683", "0.6358639", "0.63170815", "0.6206864", "0.61815", "0.6169668", "0.6160455", "0.61496854", "0.60949385", "0.6047818", "0.6042502", "0.59908664", "0.5988443", "0.59854466", "0.5971309", "0.5958548", "0.5958548", "0.5903482"...
0.0
-1
Returns list of repositories in the target systems defined by the given connection.
def fetch_repos(connection): try: response = connection.get_json('repository') except HTTPRequestError as ex: raise exception_from_http_error(ex) from ex result = response.get('result', []) return [Repository(connection, repo['rid'], data=repo) for repo in result]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_repos(self):\n return [self.config[\"repo\"]]", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def list_repositories(self):\n repos = self.repo_conn.list_repositories()\n return repos", "def find_repos(source_connection):\n\n #TODO\n ...
[ "0.73065317", "0.7184677", "0.7014473", "0.69301844", "0.6757819", "0.66381013", "0.65795696", "0.6515173", "0.6481872", "0.6415119", "0.63748735", "0.63295364", "0.631364", "0.627221", "0.6263997", "0.62445974", "0.6240596", "0.6182124", "0.61526173", "0.6142442", "0.6122399...
0.71246964
2
Wait for clone process to finish
def wait_for_clone(repo, wait_for_ready, http_exc): start_time = time.time() while time.time() - start_time < wait_for_ready: repo.wipe_data() try: if repo.is_cloned: return except HTTPRequestError: _mod_log().debug('Failed to get status of the repository %s', repo.rid) raise SAPCliError(f'Waiting for the repository to be in READY state timed out\n{http_exc}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait(self):\n self.Popen.wait()", "def wait(self):\n pass", "def wait(self):\n pass", "def wait_finish(self):\r\n self.proc.join()", "def wait(self):\n\n ...
[ "0.65956825", "0.65956825", "0.65956825", "0.65956825", "0.6524684", "0.6290063", "0.6290063", "0.6060584", "0.6007381", "0.59562814", "0.59526855", "0.59149146", "0.5834006", "0.5834006", "0.58271825", "0.5822418", "0.57953876", "0.57722795", "0.5770641", "0.5743308", "0.572...
0.6913001
0
Creates and clones the repository in the target systems
def clone(connection, url, rid, vsid='6IT', start_dir='src/', vcs_token=None, error_exists=True, role='SOURCE', typ='GITHUB'): config = {} if start_dir: config['VCS_TARGET_DIR'] = start_dir if vcs_token: config['CLIENT_VCS_AUTH_TOKEN'] = vcs_token repo = Repository(connection, rid) try: repo.create(url, vsid, config=config, role=role, typ=typ) except GCTSRepoAlreadyExistsError as ex: if error_exists: raise ex _mod_log().debug(ex) _mod_log().info(str(ex)) repo.wipe_data() if not repo.is_cloned: repo.clone() else: _mod_log().info('Not cloning the repository "%s": already performed') return repo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clone_system(self):\n pass", "def create_repo_clone(self, path, https):\n _, _, login, remote_dir = path.split('/', 3) # 3 x '/' before real path\n remote_dir = os.path.dirname(remote_dir) # final segment from clone\n print remote_dir\n cmd = ['ssh', login, 'mkdir', ...
[ "0.66225773", "0.64688706", "0.6379742", "0.6350996", "0.6275477", "0.62710005", "0.62257934", "0.6220888", "0.6183023", "0.6109122", "0.6078347", "0.6077387", "0.6044741", "0.60430527", "0.60360557", "0.6035144", "0.6016773", "0.6012548", "0.60095423", "0.5999956", "0.599848...
0.57568884
46
Checks out the given branch in the given repository on the give system
def checkout(connection, branch, rid=None, repo=None): if repo is None: repo = Repository(connection, rid) return repo.checkout(branch)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gitCheckoutBranch(self, path, branch):\r\n\r\n with workInDirectory(path):\r\n fetch_cmd = [\"git\", \"fetch\"]\r\n if self.verbose:\r\n print(\"Runing Command : {}\".format(\" \".join(fetch_cmd)))\r\n\r\n SubProcessUtility.runCommand(fetch_cmd)\r\n\r\n ...
[ "0.7553268", "0.72212356", "0.7122081", "0.71060395", "0.7037166", "0.69275075", "0.6837741", "0.6769412", "0.6685118", "0.6674647", "0.6668449", "0.65646195", "0.6517232", "0.6484005", "0.64520335", "0.6417905", "0.6379777", "0.6373071", "0.6324217", "0.6288217", "0.6272058"...
0.728686
1
Returns log history of the given repository on the give system
def log(connection, rid=None, repo=None): if repo is None: repo = Repository(connection, rid) return repo.log()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rolling_log_history():\n current_tag = get_current_tag()\n return get_log_history(current_tag)", "def retrieve_git_log(self):\n result = [str(entry).split(\"\\t\")[1]\n for entry in self.repo.head.log()]\n\n return result", "def getLogs():", "def getLogs():", "d...
[ "0.6816535", "0.6790785", "0.64824134", "0.64824134", "0.6458293", "0.644352", "0.63438153", "0.63379043", "0.6326975", "0.6298118", "0.6273212", "0.6261958", "0.62492275", "0.6150543", "0.6111015", "0.6078224", "0.6065768", "0.604785", "0.6034963", "0.6026202", "0.601138", ...
0.6000602
22
Pulls the given repository on the give system
def pull(connection, rid=None, repo=None): if repo is None: repo = Repository(connection, rid) return repo.pull()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pull1(repo, **kwargs):\n ret = do_pull(repo, \"topology.virl\")\n if not ret:\n exit(1)", "def pull(self):\n origin = self.git_repo.remotes.origin\n origin.pull()", "def pull(self, remote, branch, *args):\n return self.cmd('pull', remote, branch, *args)", "def pull(refer...
[ "0.73290116", "0.717763", "0.7176306", "0.7152554", "0.7016408", "0.69733423", "0.69663095", "0.69643307", "0.69623786", "0.69125956", "0.6869978", "0.67377836", "0.66976327", "0.6685887", "0.6681624", "0.666945", "0.66548884", "0.6588705", "0.6575877", "0.65757114", "0.65705...
0.7484018
0
Deletes the given repository on the give system
def delete(connection, rid=None, repo=None): if repo is None: repo = Repository(connection, rid) return repo.delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repository_delete(ctx: click.Context, repository_name):\n subcommand_repository.cmd_delete(ctx.obj, repository_name)", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')", "d...
[ "0.8317304", "0.817318", "0.774548", "0.7673443", "0.75179964", "0.6937679", "0.6887084", "0.6834599", "0.6765182", "0.6756616", "0.6703037", "0.66614944", "0.6376965", "0.63704604", "0.6368734", "0.6269005", "0.6256236", "0.62037766", "0.61481625", "0.60742587", "0.6066743",...
0.7290421
5
Get Token for the currently logged in user
def get_user_credentials(connection): response = connection.get_json('user') user_data = response.get('user', None) if user_data is None: raise SAPCliError('gCTS response does not contain \'user\'') config_data = user_data.get('config', None) if config_data is None: return [] user_credentials = [cred for cred in config_data if cred['key'] == 'USER_AUTH_CRED_ENDPOINTS'] return json.loads(user_credentials[0]['value'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_token(self):\n return user.get_token()", "def get(self):\n if current_user and not current_user.is_anonymous:\n user = current_user\n tok = Token(user, 3600)\n return tok\n return jsonify({404: 'User not found'})", "def get_token():\n if g.curre...
[ "0.8868773", "0.8156606", "0.8019521", "0.8019521", "0.7996061", "0.7894515", "0.78614265", "0.77490443", "0.77235836", "0.7714836", "0.7688327", "0.7681948", "0.75940716", "0.75773776", "0.7504578", "0.7444878", "0.74156564", "0.74042356", "0.73997515", "0.7359239", "0.73592...
0.0
-1
Set Token for the currently logged in user
def set_user_api_token(connection, api_url, token): body = { 'endpoint': api_url, 'user': '', 'password': '', 'token': token, 'type': 'token' } connection.post_obj_as_json('user/credentials', body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_UserToken(self, value):\n super(GetCategoriesInputSet, self)._set_input('UserToken', value)", "def login_token(self, token):\n self.token = token # this will also set the refresh_token to None", "def _set_token(self) -> None:\n if 'token' in self.params['user'].keys():\n ...
[ "0.71641374", "0.71383333", "0.71284956", "0.7036171", "0.6982911", "0.69558555", "0.6954422", "0.68936217", "0.68726385", "0.6756373", "0.6738934", "0.6696999", "0.66546893", "0.66532344", "0.66050583", "0.66050583", "0.65756255", "0.6556584", "0.6556224", "0.6555834", "0.64...
0.6654888
12
Delete Token for the currently logged in user
def delete_user_credentials(connection, api_url): body = { 'endpoint': api_url, 'user': '', 'password': '', 'token': '', 'type': 'none' } connection.post_obj_as_json('user/credentials', body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in token...
[ "0.7799497", "0.77679306", "0.7517583", "0.7433433", "0.742697", "0.7416216", "0.74131984", "0.7221469", "0.72203773", "0.71465814", "0.71020013", "0.7097267", "0.70933104", "0.70177597", "0.69026256", "0.6894906", "0.6887351", "0.6852845", "0.6840806", "0.68192285", "0.68083...
0.0
-1
Get configuration property value for given key
def get_system_config_property(connection, config_key): response = connection.get_json(f'system/config/{config_key}') config_value = response.get('result') if config_value is None: raise SAPCliError("gCTS response does not contain 'result'") return config_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, key):\n return self.config.get(key)", "def get_property(self, key):\n _key = DJANGO_CONF[key]\n return getattr(self, _key, CONF_SPEC[_key])", "def _get_config_value(self, section, key):\n return config.get(section, key)", "def get_value(self, key):\n if key no...
[ "0.8216822", "0.82159626", "0.81225705", "0.80191004", "0.78103554", "0.78074646", "0.77338487", "0.7696018", "0.76687634", "0.76586175", "0.7633325", "0.76286393", "0.742231", "0.73506427", "0.7315717", "0.73031425", "0.72236985", "0.7163746", "0.7160865", "0.71571016", "0.7...
0.70693445
25
Create or update the configuration property
def set_system_config_property(connection, config_key, value): body = { 'key': config_key, 'value': value, } response = connection.post_obj_as_json('system/config', body).json() config_value = response.get('result') if config_value is None: raise SAPCliError("gCTS response does not contain 'result'") return config_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_existing_property():\n\n value = 'new'\n\n contents = (\"[Info]\\n\"\n \"sdk = old\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0", "def test...
[ "0.6991356", "0.68301314", "0.64680314", "0.63569725", "0.63561064", "0.6272596", "0.6241467", "0.6150869", "0.6149671", "0.61463547", "0.610089", "0.6067047", "0.6049001", "0.60469306", "0.6039595", "0.59342664", "0.59069026", "0.58782035", "0.5872062", "0.58575135", "0.5849...
0.5456461
78
fully connected layers or dense layer
def fclayer(in_features, out_features): fc = nn.Linear(in_features, out_features) nn.init.kaiming_normal_(fc.weight) return fc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_fully_connected_layer(input_layer,\n layer_size,\n activation=tf.nn.relu,\n layer_name='',\n logs=False):\n if not layer_name:\n layer_name = ''.join(str(x) for x in np.random....
[ "0.71426284", "0.6833023", "0.6805999", "0.67833257", "0.67681617", "0.6711874", "0.6682316", "0.6651481", "0.6631076", "0.6611241", "0.65508956", "0.65147924", "0.6511002", "0.64513654", "0.64409477", "0.63658357", "0.63640463", "0.6321559", "0.62621194", "0.6261182", "0.623...
0.58284247
94
Get the statistics for the all builders.
def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats: print('getting list of builders...') stats = BuildStats() for builder in requests.get(BASE_URL).json().keys(): # TODO: maybe filter the builds to the ones we care about stats += get_builder_stats(builder, time_window ) return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats:\n print('Gettings builds for {}...'.format(builder))\n # TODO: can we limit the data we're requesting?\n url = '{}/{}/builds/_all'.format(BASE_URL, builder)\n stats = BuildStats()\n for build, results in requests.get(...
[ "0.70549256", "0.6995379", "0.66598487", "0.6448039", "0.6427277", "0.6422584", "0.6358498", "0.63564634", "0.6297995", "0.62931466", "0.62858886", "0.6255862", "0.6241165", "0.6240544", "0.61738175", "0.61579835", "0.61428285", "0.6082591", "0.60764337", "0.60596114", "0.602...
0.7632881
0
Get the statistics for one builder.
def get_builder_stats(builder: str, time_window: datetime.datetime) -> BuildStats: print('Gettings builds for {}...'.format(builder)) # TODO: can we limit the data we're requesting? url = '{}/{}/builds/_all'.format(BASE_URL, builder) stats = BuildStats() for build, results in requests.get(url).json().items(): start_time = datetime.datetime.fromtimestamp(float(results['times'][0])) if start_time < time_window: continue successful = results['text'] == ['build', 'successful'] stats.add(successful) return stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_buildbot_stats(time_window : datetime.datetime) -> BuildStats:\n print('getting list of builders...')\n stats = BuildStats()\n for builder in requests.get(BASE_URL).json().keys():\n # TODO: maybe filter the builds to the ones we care about\n stats += get_builder_stats(builder, time_w...
[ "0.7178298", "0.6534593", "0.64914143", "0.6488856", "0.63973767", "0.63478684", "0.6330162", "0.62668484", "0.6216657", "0.61736965", "0.615647", "0.6031543", "0.6031311", "0.6029025", "0.5993337", "0.59911054", "0.5989218", "0.5979158", "0.59560424", "0.5946391", "0.5927121...
0.7503165
0
Create metric descriptors on Stackdriver. Recreating these with every call is fine.
def gcp_create_metric_descriptor(project_id: str): client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) for desc_type, desc_desc in [ ["buildbots_percent_failed", "Percentage of failed builds"], ["buildbots_builds_successful", "Number of successful builds in the last 24h."], ["buildbots_builds_failed", "Number of failed builds in the last 24h."], ["buildbots_builds_total", "Total number of builds in the last 24h."], ]: descriptor = monitoring_v3.types.MetricDescriptor() descriptor.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type) descriptor.metric_kind = ( monitoring_v3.enums.MetricDescriptor.MetricKind.GAUGE) descriptor.value_type = ( monitoring_v3.enums.MetricDescriptor.ValueType.DOUBLE) descriptor.description = desc_desc descriptor = client.create_metric_descriptor(project_name, descriptor) print('Created {}.'.format(descriptor.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recreate_metrics():\n all = monitor_client.list_metric_descriptors(\n project_path, filter_='metric.type=starts_with(\"custom.\")'\n )\n for a in all:\n if \"accumulator\" in str(a) or \"biquery\" in str(a):\n metric_name = monitor_client.metric_descriptor_path(\n ...
[ "0.70970815", "0.6102869", "0.5952238", "0.5923666", "0.58200157", "0.5791009", "0.5774546", "0.575687", "0.57353896", "0.5720316", "0.5650112", "0.5590122", "0.5529896", "0.54889554", "0.54316115", "0.54202217", "0.5382546", "0.5348047", "0.53211987", "0.5313823", "0.530327"...
0.66897804
1
Upload metrics to Stackdriver.
def gcp_write_data(project_id: str, stats: BuildStats): client = monitoring_v3.MetricServiceClient() project_name = client.project_path(project_id) now = datetime.datetime.now() for desc_type, value in [ ["buildbots_percent_failed", stats.percent_failed], ["buildbots_builds_successful", stats.successful], ["buildbots_builds_failed", stats.failed], ["buildbots_builds_total", stats.total], ]: series = monitoring_v3.types.TimeSeries() series.metric.type = 'custom.googleapis.com/buildbots_{}'.format(desc_type) series.resource.type = 'global' point = series.points.add() point.value.double_value = value point.interval.end_time.seconds = int(now.timestamp()) client.create_time_series(project_name, [series])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_metrics(metrics_dict, project, dataset, table):\n # Credentials will be loaded from envvar $GOOGLE_APPLICATION_CREDENTIALS.\n bq_client = bigquery.Client(project=project)\n table_ref = bq_client.dataset(dataset).table(table)\n errors = bq_client.insert_rows_json(table_ref, metrics_dict)\n return er...
[ "0.6587916", "0.650881", "0.6472843", "0.6468111", "0.6410721", "0.6152306", "0.6033505", "0.5933085", "0.58030564", "0.5773362", "0.5758092", "0.57214624", "0.56050664", "0.55261976", "0.5523498", "0.5457035", "0.54216254", "0.5412076", "0.54043615", "0.5381582", "0.5369474"...
0.5305429
24
initialize a receptor library by setting the number of receptors, the number of substrates it can respond to, and optional additional parameters in the parameter dictionary
def __init__(self, num_substrates, num_receptors, parameters=None): # the call to the inherited method also sets the default parameters from # this class super(LibraryBinaryNumeric, self).__init__(num_substrates, num_receptors, parameters) # prevent integer overflow in collecting activity patterns assert num_receptors <= self.parameters['max_num_receptors'] <= 63 # check fixed_mixture_size parameter fixed_mixture_size = self.parameters['fixed_mixture_size'] if fixed_mixture_size is False: # special case where we accept False and silently convert to None self.parameters['fixed_mixture_size'] = None elif fixed_mixture_size is not None: # if the value is not None it better is an integer try: fixed_mixture_size = int(fixed_mixture_size) if 0 <= fixed_mixture_size <= self.Ns: self.parameters['fixed_mixture_size'] = fixed_mixture_size else: raise ValueError except (TypeError, ValueError): raise ValueError('`fixed_mixture_size` must either be None or ' 'an integer between 0 and Ns.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, num_params):\r\n self.num_params = num_params", "def __init__(self, num_params):\r\n self.num_params = num_params", "def __init__(self, *args, **kwargs):\n self.specGenerator = WMSpecGenerator()\n self.count = 0\n self.maxWmSpec = kwargs.setdefault('numOfSp...
[ "0.624694", "0.624694", "0.59270716", "0.58812773", "0.5859252", "0.5857603", "0.5856646", "0.5854381", "0.5844939", "0.58047056", "0.5773737", "0.57722926", "0.57650805", "0.57243747", "0.5716238", "0.56949776", "0.5688408", "0.56387156", "0.5638388", "0.5597038", "0.5542545...
0.7083537
0
create random arguments for creating test instances
def get_random_arguments(cls, fixed_mixture_size=None, **kwargs): args = super(LibraryBinaryNumeric, cls).get_random_arguments(**kwargs) if fixed_mixture_size is not None: args['parameters']['fixed_mixture_size'] = fixed_mixture_size return args
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_scenarios(self, params, num_scenarios, random_seed):\n return None", "def generate_params(self, randomize=True):\n pass", "def fixture_wrapper_arguments():\n n_features = 9\n classes = [\"a\", \"b\", \"c\"]\n\n return n_features, classes", "def seed(*args, **kwargs): # real sign...
[ "0.6887592", "0.6532512", "0.65300083", "0.649554", "0.6424966", "0.6268241", "0.6204779", "0.61991894", "0.6059095", "0.6022314", "0.5994968", "0.5982778", "0.59304875", "0.5911756", "0.588763", "0.5865559", "0.5842821", "0.5842821", "0.5842703", "0.58391935", "0.5827592", ...
0.56847954
31
creates a test instance used for consistency tests
def create_test_instance(cls, **kwargs): # create a instance with random parameters obj = super(LibraryBinaryNumeric, cls).create_test_instance(**kwargs) # choose an optimal interaction matrix obj.choose_sensitivity_matrix('auto') return obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_test_object(self):\n return self.orm_cls.testing_create()", "def create_instance(test_id, config, args):\n return TestT1Detail(test_id, config, args)", "def test_new(self):", "def test_new(self):", "def test_constructor(self):\n pass", "def test_create(self):\n pass", "...
[ "0.73254544", "0.70019776", "0.6976491", "0.6976491", "0.6886069", "0.6675423", "0.6637996", "0.65213585", "0.65042526", "0.64920676", "0.64536", "0.6413658", "0.6351893", "0.6320265", "0.62997496", "0.62697804", "0.6266391", "0.62615484", "0.6233441", "0.6232256", "0.6212402...
0.6345531
13
calculate the number of steps to do for `scheme`
def get_steps(self, scheme): if scheme == 'monte_carlo': # calculate the number of steps for a monte-carlo scheme if self.parameters['monte_carlo_steps'] == 'auto': steps_min = self.parameters['monte_carlo_steps_min'] steps_max = self.parameters['monte_carlo_steps_max'] steps = np.clip(10 * 2**self.Nr, steps_min, steps_max) # Here, the factor 10 is an arbitrary scaling factor else: steps = self.parameters['monte_carlo_steps'] elif scheme == 'metropolis': # calculate the number of steps for a metropolis scheme if self.parameters['metropolis_steps'] == 'auto': steps_min = self.parameters['metropolis_steps_min'] steps_max = self.parameters['metropolis_steps_max'] steps = np.clip(10 * 2**self.Nr, steps_min, steps_max) # Here, the factor 10 is an arbitrary scaling factor else: steps = self.parameters['metropolis_steps'] else: raise ValueError('Unknown stepping scheme `%s`' % scheme) return int(steps)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_steps_num():\n return 0", "def decode_step_count(self, board=None):\n # TODO decide which one is better.. not crucial\n # steps = 0\n # for key_pow, val_coor in self.read_bits.items():\n # steps += (self.matrix_board[val_coor] * 2) ** key_pow\n # return step...
[ "0.65601104", "0.65067685", "0.6441884", "0.64148223", "0.6364415", "0.62706876", "0.6186873", "0.61510324", "0.6133444", "0.60866106", "0.60067546", "0.60036564", "0.5896404", "0.58475363", "0.58371323", "0.5806053", "0.5803067", "0.58011645", "0.57761735", "0.5773252", "0.5...
0.7198185
0
creates a interaction matrix with the given properties
def choose_sensitivity_matrix(self, density=0, avoid_correlations=False): shape = (self.Nr, self.Ns) if density == 'auto': # determine optimal parameters for the interaction matrix from .lib_bin_theory import LibraryBinaryUniform theory = LibraryBinaryUniform.from_other(self) density = theory.get_optimal_library()['density'] if density == 0: # simple case of empty matrix self.sens_mat = np.zeros(shape, np.uint8) elif density >= 1: # simple case of full matrix self.sens_mat = np.ones(shape, np.uint8) elif avoid_correlations: # choose receptor substrate interaction randomly but try to avoid # correlations between the receptors self.sens_mat = np.zeros(shape, np.uint8) num_entries = int(round(density * self.Nr * self.Ns)) empty_sens_mat = True while num_entries > 0: # specify the substrates that we want to detect if num_entries >= self.Ns: i_ids = np.arange(self.Ns) num_entries -= self.Ns else: i_ids = np.random.choice(np.arange(self.Ns), num_entries, replace=False) num_entries = 0 if empty_sens_mat: # set the receptors for the substrates a_ids = np.random.randint(0, self.Nr, len(i_ids)) for i, a in zip(i_ids, a_ids): self.sens_mat[a, i] = 1 empty_sens_mat = False else: # choose receptors for each substrate from the ones that # are not activated, yet for i in i_ids: a_ids = np.flatnonzero(self.sens_mat[:, i] == 0) self.sens_mat[random.choice(a_ids), i] = 1 else: # not avoid_correlations: # choose receptor substrate interaction randomly and don't worry # about correlations self.sens_mat = (np.random.random(shape) < density).astype(np.uint8) # save the parameters determining this matrix self.parameters['sensitivity_matrix_params'] = { 'density': density, 'avoid_correlations': avoid_correlations }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interaction_matrix(self):\n\n self.int_M_called = True\n int_M = np.zeros((self.n, self.n))\n for k in range(self.n):\n for j in range(k+1):\n o = self.attribute_interactions(k, j)\n int_M[k, j] = o.rel_total_ig_ab # Store total information gain\n ...
[ "0.6194615", "0.56910884", "0.56631625", "0.5550948", "0.55139613", "0.549342", "0.5372611", "0.5372611", "0.52580357", "0.5247446", "0.5166129", "0.5157592", "0.5156394", "0.514412", "0.51274884", "0.51019424", "0.5082754", "0.50743306", "0.5064459", "0.5027708", "0.49512535...
0.0
-1
return the sorted `sensitivity_matrix` or sorts the internal sensitivity_matrix in place. This function rearranges receptors such that receptors reacting to an equal number of substrates and to similar substrates are close together.
def sort_sensitivity_matrix(self, sensitivity_matrix=None): if sensitivity_matrix is None: sens_mat = self.sens_mat else: sens_mat = sensitivity_matrix data = [(sum(item), list(item)) for item in sens_mat] sens_mat = np.array([item[1] for item in sorted(data)]) if sensitivity_matrix is None: self.sens_mat = sens_mat else: return sens_mat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SortAndFilterSuspects(self, suspects):\n if not suspects or len(suspects) == 1:\n return suspects\n\n suspects.sort(key=lambda suspect: -suspect.confidence)\n max_score = suspects[0].confidence\n min_score = max(suspects[-1].confidence, 0.0)\n if max_score == min_score:\n return []\n\n...
[ "0.54372156", "0.5328664", "0.5222484", "0.4775966", "0.47067013", "0.46552995", "0.46477485", "0.46280968", "0.4625397", "0.46188542", "0.46107998", "0.45985577", "0.45515847", "0.45426014", "0.45174512", "0.45159692", "0.4500111", "0.44911516", "0.44894326", "0.44743133", "...
0.73758173
0
return the number of steps we iterate over
def _iterate_steps(self): mixture_size = self.parameters['fixed_mixture_size'] if mixture_size is None: return 2 ** self.Ns else: return scipy.special.comb(self.Ns, mixture_size, exact=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_steps(self) -> int:\n return len(self.step_points)", "def num_steps(self):\n return self.torsoStepCount() + 1", "def num_steps(self) -> int:\n return self._num_steps", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def n_steps(self) ->...
[ "0.8299782", "0.814433", "0.8137605", "0.79739", "0.79542035", "0.7865278", "0.77747107", "0.74807376", "0.74784815", "0.74234825", "0.7367897", "0.7363563", "0.7347694", "0.7343244", "0.73042005", "0.7163757", "0.7137122", "0.71312535", "0.7065494", "0.7043801", "0.70322317"...
0.6790382
29
iterate over all mixtures and yield the mixture with probability
def _iterate_mixtures(self): if self._iterate_steps > self.parameters['max_steps']: raise RuntimeError('The iteration would take more than %g steps' % self.parameters['max_steps']) hi = self.commonness Jij = self.correlations mixture_size = self.parameters['fixed_mixture_size'] if mixture_size is None: # iterate over all mixtures for c in itertools.product((0, 1), repeat=self.Ns): c = np.array(c, np.uint8) weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c)) yield c, weight_c elif mixture_size == 0: # special case which is not covered by the iteration below yield np.zeros(self.Ns, np.uint8), 1 elif mixture_size == self.Ns: # special case which is not covered by the iteration below yield np.ones(self.Ns, np.uint8), 1 else: # iterate over all mixtures with constant number of substrates c = np.zeros(self.Ns, np.uint8) for nz in itertools.combinations(range(self.Ns), mixture_size): c[:] = 0 c[np.array(nz)] = 1 weight_c = np.exp(np.dot(np.dot(Jij, c) + hi, c)) yield c, weight_c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_binary_mixtures(model, steps, dtype=np.uint):\n mixture_size = model.parameters['fixed_mixture_size']\n \n if not model.is_correlated_mixture and mixture_size is None:\n # use simple monte carlo algorithm\n prob_s = model.substrate_probabilities\n \n for _ i...
[ "0.6160292", "0.6110729", "0.59938663", "0.59472424", "0.58536416", "0.58510166", "0.58116955", "0.5767724", "0.57352465", "0.5675324", "0.5663256", "0.5660493", "0.56528705", "0.55985093", "0.5573837", "0.55732846", "0.5572714", "0.55721015", "0.55552113", "0.55295265", "0.5...
0.73259944
0
returns the number of steps that are sampled
def _sample_steps(self): mixture_size = self.parameters['fixed_mixture_size'] if not self.is_correlated_mixture and mixture_size is None: return self.get_steps('monte_carlo') else: return self.get_steps('metropolis')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sampled_timesteps(self) -> int:\n return self.sampled_timesteps", "def number_of_sample_loops(self) -> int:\n return self.__number_of_sample_loops", "def number_of_steps(self) -> int:\n return len(self.step_points)", "def num_trials(self):", "def get_steps_num():\n retur...
[ "0.75682855", "0.75195956", "0.73266625", "0.72938573", "0.7275451", "0.72581595", "0.71938956", "0.7193268", "0.7035799", "0.70202214", "0.69543606", "0.6949648", "0.69149685", "0.6895572", "0.68642855", "0.68349636", "0.6822851", "0.6780394", "0.6769589", "0.6767999", "0.67...
0.65237594
28
sample mixtures with uniform probability yielding single mixtures
def _sample_mixtures(self, steps=None, dtype=np.uint): if steps is None: steps = self._sample_steps return _sample_binary_mixtures(self, steps, dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())", "def _sample_binary_mixtures(model, steps, dtype=np.uint):\n mixture_size = model.parameters['fixed_mixture_size']\n \n if not model.is_correlated_mixture and mixture_si...
[ "0.7214203", "0.70885044", "0.7082292", "0.6908264", "0.68251544", "0.668024", "0.66489446", "0.66139346", "0.6609883", "0.6569002", "0.6538509", "0.6538509", "0.6516518", "0.64834464", "0.64289886", "0.64258486", "0.6417512", "0.6410464", "0.63893205", "0.63742346", "0.63742...
0.7010635
3
calculates statistics of mixtures. Returns a vector with the frequencies at which substrates are present in mixtures and a matrix of correlations among substrates
def mixture_statistics(self, method='auto'): if method == 'auto': fixed_mixture_size = self.parameters['fixed_mixture_size'] if self.is_correlated_mixture or fixed_mixture_size is not None: # mixture has correlations => we do Metropolis sampling if self.Ns <= self.parameters['brute_force_threshold_Ns']: method = 'brute-force' else: method = 'monte-carlo' else: # the estimate is exact for mixtures without correlations method = 'estimate' if method == 'brute-force' or method == 'brute_force': return self.mixture_statistics_brute_force() elif method == 'monte-carlo' or method == 'monte_carlo': return self.mixture_statistics_monte_carlo() elif method == 'estimate': return self.mixture_statistics_estimate() else: raise ValueError('Unknown method `%s` for mixture statistics' % method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * we...
[ "0.74882096", "0.6869365", "0.63603956", "0.5688248", "0.5665906", "0.54986227", "0.5472821", "0.546352", "0.54602647", "0.54511094", "0.5444543", "0.54376626", "0.5413902", "0.53850305", "0.53826225", "0.5331185", "0.53221416", "0.52429974", "0.52369267", "0.5235607", "0.522...
0.5727949
3
calculates mixture statistics using a brute force algorithm
def mixture_statistics_brute_force(self): Z = 0 hist1d = np.zeros(self.Ns) hist2d = np.zeros((self.Ns, self.Ns)) # iterate over all mixtures for c, weight_c in self._iterate_mixtures(): Z += weight_c hist1d += c * weight_c hist2d += np.outer(c, c) * weight_c # calculate the frequency and the correlations ci_mean = hist1d / Z cij = hist2d / Z cij_corr = cij - np.outer(ci_mean, ci_mean) ci_var = np.diag(cij_corr) return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var, 'cov': cij_corr}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_...
[ "0.603266", "0.6025553", "0.5984378", "0.59416246", "0.58981115", "0.5829733", "0.5794666", "0.5727615", "0.57198894", "0.5643767", "0.5639376", "0.56325966", "0.5592785", "0.55927706", "0.55848724", "0.5584382", "0.5581739", "0.55519193", "0.5520184", "0.55121636", "0.551020...
0.70597595
0
calculates mixture statistics using a metropolis algorithm
def mixture_statistics_monte_carlo(self): return self.concentration_statistics_monte_carlo()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * we...
[ "0.7127375", "0.65244097", "0.64762837", "0.62966573", "0.6231615", "0.62085813", "0.6098288", "0.6089708", "0.6025141", "0.6018902", "0.595598", "0.58740145", "0.5856431", "0.584514", "0.5828641", "0.58285016", "0.5824226", "0.5790562", "0.5787351", "0.5778131", "0.57633173"...
0.6240846
4
estimates the mixture statistics
def mixture_statistics_estimate(self): ci_mean = self.substrate_probabilities if self.is_correlated_mixture: J_ij = self.correlations pi_s = ci_mean bar_pi_s = 1 - pi_s ci_mean = pi_s * (1 + 2*bar_pi_s*np.dot(J_ij, pi_s)) ci_var = ci_mean * (1 - ci_mean) cij_cov = ( np.diag(ci_var) + 2*np.einsum('ij,i,j->ij', J_ij, ci_var, ci_var) ) else: # uncorrelated mixtures ci_var = ci_mean * (1 - ci_mean) cij_cov = np.diag(ci_var) return {'mean': ci_mean, 'std': np.sqrt(ci_var), 'var': ci_var, 'cov': cij_cov}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iterate over all mixtures\n for c, weight_c in self._iterate_mixtures():\n Z += weight_c \n hist1d += c * we...
[ "0.7492281", "0.6477891", "0.639532", "0.63856214", "0.62459314", "0.6159641", "0.6154703", "0.6007116", "0.5989447", "0.5913637", "0.58870196", "0.5853389", "0.58513975", "0.5809787", "0.57763517", "0.5753387", "0.57354677", "0.5716687", "0.570656", "0.56891644", "0.56604356...
0.6841529
1
return the entropy in the mixture distribution
def mixture_entropy(self): mixture_size = self.parameters['fixed_mixture_size'] if self.is_correlated_mixture or mixture_size is not None: # complicated case => run brute force or monte carlo if self.Ns <= self.parameters['brute_force_threshold_Ns']: return self.mixture_entropy_brute_force() else: return self.mixture_entropy_monte_carlo() else: # simple case => calculate explicitly return super(LibraryBinaryNumeric, self).mixture_entropy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mixture_entropy_monte_carlo(self):\n if self.Ns > 63:\n raise ValueError('Mixture entropy estimation only works for fewer '\n 'than 64 substrates.')\n \n # sample mixtures\n base = 2 ** np.arange(0, self.Ns)\n observations = collections....
[ "0.77932525", "0.7712108", "0.7700953", "0.76825845", "0.765476", "0.76226604", "0.7404143", "0.7329493", "0.7265178", "0.72088486", "0.72088397", "0.71819735", "0.7174679", "0.71286225", "0.7125953", "0.70846903", "0.7073766", "0.70677555", "0.70629066", "0.70614755", "0.705...
0.75569916
6
gets the entropy in the mixture distribution using brute force
def mixture_entropy_brute_force(self): Z, sum_wlogw = 0, 0 # Naive implementation of measuring the entropy is # p(c) = w(c) / Z with Z = sum_c w(c) # H_c = -sum_c p(c) * log2(p(c)) # This can be transformed to a more stable implementation: # H_c = log2(Z) - 1/Z * sum_c w(c) * log2(w(c)) for _, weight_c in self._iterate_mixtures(): if weight_c > 0: Z += weight_c sum_wlogw += weight_c * np.log2(weight_c) if Z == 0: return 0 else: return np.log2(Z) - sum_wlogw / Z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entropy(temp,pres):\n g_t = liq_g(1,0,temp,pres)\n s = -g_t\n return s", "def calc_entropy(data_set): #calculates total entropy of the dataset\r\n republicans = 0\r\n democrats = 0\r\n total = 0\r\n for data_point in data_set:\r\n party = data_point.dat_party\r\n if party =...
[ "0.69703406", "0.6953341", "0.69284886", "0.69082105", "0.687003", "0.6861443", "0.6851223", "0.6819869", "0.6818582", "0.68088657", "0.677869", "0.67508334", "0.66926396", "0.66545224", "0.6642097", "0.66395456", "0.6627271", "0.6620591", "0.6618552", "0.66109055", "0.658740...
0.7468018
0
gets the entropy in the mixture distribution using brute force
def mixture_entropy_monte_carlo(self): if self.Ns > 63: raise ValueError('Mixture entropy estimation only works for fewer ' 'than 64 substrates.') # sample mixtures base = 2 ** np.arange(0, self.Ns) observations = collections.Counter() for c in self._sample_mixtures(): observations[np.dot(c, base)] += 1 # estimate entropy from the histogram counts = np.fromiter(observations.values(), np.double, len(observations)) # Naive implementation of measuring the entropy is # ps = counts / self._sample_steps # H = -np.sum(ps * np.log2(ps)) # This can be transformed to a more stable implementation: log_steps = np.log2(self._sample_steps) return -np.sum(counts*(np.log2(counts) - log_steps))/self._sample_steps
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mixture_entropy_brute_force(self):\n Z, sum_wlogw = 0, 0\n\n # Naive implementation of measuring the entropy is\n # p(c) = w(c) / Z with Z = sum_c w(c)\n # H_c = -sum_c p(c) * log2(p(c))\n # This can be transformed to a more stable implementation:\n # H_c =...
[ "0.7468018", "0.69703406", "0.6953341", "0.69284886", "0.69082105", "0.687003", "0.6861443", "0.6851223", "0.6819869", "0.6818582", "0.677869", "0.67508334", "0.66926396", "0.66545224", "0.6642097", "0.66395456", "0.6627271", "0.6620591", "0.6618552", "0.66109055", "0.6587405...
0.68088657
10
calculates the average activity of the receptor as a response to single ligands. `method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto']. If it is 'auto' than the method is chosen automatically based on the problem size.
def receptor_crosstalk(self, method='auto', ret_receptor_activity=False, **kwargs): if method == 'auto': if self.Ns <= self.parameters['brute_force_threshold_Ns']: method = 'brute_force' else: method = 'monte_carlo' if method == 'estimate': # estimate receptor crosstalk directly q_nm = self.receptor_crosstalk_estimate(**kwargs) if ret_receptor_activity: q_n = self.receptor_activity_estimate(**kwargs) else: # calculate receptor crosstalk from the observed probabilities r_n, r_nm = self.receptor_activity(method, ret_correlations=True, **kwargs) q_n = r_n q_nm = r_nm - np.outer(r_n, r_n) if kwargs.get('clip', False): np.clip(q_nm, 0, 1, q_nm) if ret_receptor_activity: return q_n, q_nm else: return q_nm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receptor_activity(self, method='auto', ret_correlations=False, **kwargs):\n if method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n method = 'brute_force'\n else:\n method = 'monte_carlo'\n \n if method...
[ "0.61769646", "0.59880954", "0.58919096", "0.51603234", "0.49008435", "0.48851067", "0.48679084", "0.48065102", "0.47901058", "0.47881097", "0.47419894", "0.47048995", "0.4704445", "0.4698729", "0.46906117", "0.46540016", "0.45315513", "0.45278776", "0.45257092", "0.4525169", ...
0.503741
4
estimates the average activity of the receptor as a response to single ligands. `ret_receptor_activity` determines whether the mean receptor activity will also be returned. `approx_prob` determines whether the probabilities of encountering ligands in mixtures are calculated exactly or only approximative, which should work for small probabilities. `clip` determines whether the estimates will be forced to be in [0, 1]. `ignore_correlations` determines whether correlations in the mixtures will be ignored or not.
def receptor_crosstalk_estimate(self, ret_receptor_activity=False, approx_prob=False, clip=False, ignore_correlations=False): if not ignore_correlations and self.is_correlated_mixture: r_n, r_nm = self.receptor_activity_estimate(ret_correlations=True, approx_prob=approx_prob, clip=clip) q_nm = r_nm - np.outer(r_n, r_n) if clip: np.clip(q_nm, 0, 1, q_nm) if ret_receptor_activity: return r_n, q_nm else: return q_nm raise NotImplementedError('Not implemented for correlated mixtures') S_ni = self.sens_mat p_i = self.substrate_probabilities if approx_prob: # approximate calculation for small p_i q_nm = np.einsum('ni,mi,i->nm', S_ni, S_ni, p_i) if clip: np.clip(q_nm, 0, 1, q_nm) else: # proper calculation of the probabilities S_ni_mask = S_ni.astype(np.bool) q_nm = np.zeros((self.Nr, self.Nr)) for n in range(self.Nr): for m in range(self.Nr): mask = S_ni_mask[n, :] * S_ni_mask[m, :] q_nm[n, m] = 1 - np.product(1 - p_i[mask]) if ret_receptor_activity: q_n = self.receptor_activity_estimate(approx_prob=approx_prob, clip=clip) return q_n, q_nm else: return q_nm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receptor_activity_estimate(self, ret_correlations=False,\n approx_prob=False, clip=False):\n S_ni = self.sens_mat\n p_i = self.substrate_probabilities\n\n # calculate receptor activity assuming uncorrelated mixtures \n if approx_prob:\n ...
[ "0.64386016", "0.49481305", "0.48385146", "0.4779224", "0.46856108", "0.46765503", "0.4662905", "0.46267968", "0.45856437", "0.45664948", "0.43939775", "0.43856367", "0.43828163", "0.436443", "0.43603247", "0.43355826", "0.43281123", "0.43260527", "0.43019044", "0.42894408", ...
0.5905821
1
calculates the average activity of each receptor `method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto']. If it is 'auto' than the method is chosen automatically based on the problem size.
def receptor_activity(self, method='auto', ret_correlations=False, **kwargs): if method == 'auto': if self.Ns <= self.parameters['brute_force_threshold_Ns']: method = 'brute_force' else: method = 'monte_carlo' if method == 'brute_force' or method == 'brute-force': return self.receptor_activity_brute_force(ret_correlations, **kwargs) elif method == 'monte_carlo' or method == 'monte-carlo': return self.receptor_activity_monte_carlo(ret_correlations, **kwargs) elif method == 'estimate': return self.receptor_activity_estimate(ret_correlations, **kwargs) else: raise ValueError('Unknown method `%s`.' % method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receptor_score(self, method='auto', multiprocessing=False):\n init_arguments = self.init_arguments\n init_arguments['parameters']['initialize_state']['sensitivity'] = 'exact'\n init_arguments['parameters']['sensitivity_matrix'] = self.sens_mat\n joblist = [(copy.deepcopy(self.init_a...
[ "0.62602246", "0.5744438", "0.531374", "0.5277215", "0.50603354", "0.50047123", "0.49545625", "0.491439", "0.4823283", "0.48020002", "0.47525263", "0.47253197", "0.47229403", "0.46382526", "0.46355662", "0.4616571", "0.45886013", "0.45580858", "0.45328122", "0.45287707", "0.4...
0.6362825
0
calculates the average activity of each receptor
def receptor_activity_brute_force(self, ret_correlations=False): S_ni = self.sens_mat Z = 0 r_n = np.zeros(self.Nr) if ret_correlations: r_nm = np.zeros((self.Nr, self.Nr)) # iterate over all mixtures for c, prob_c in self._iterate_mixtures(): # get the activity vector associated with m a_n = (np.dot(S_ni, c) >= 1) Z += prob_c r_n[a_n] += prob_c if ret_correlations: r_nm[np.outer(a_n, a_n)] += prob_c # return the normalized output r_n /= Z if ret_correlations: r_nm /= Z return r_n, r_nm else: return r_n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avg_num_visits_patient(self):\n pass", "def average(self):\n return self.summation() / self.count()", "def avg_act(self) -> float:\n return torch.mean(self.units.act)", "def averageTime(self):\n \n pass", "def average_reward(self):\n T = len(self)\n return n...
[ "0.6337728", "0.6317526", "0.6239112", "0.6218929", "0.6072164", "0.6055115", "0.60446227", "0.60446227", "0.60446227", "0.6001991", "0.5984581", "0.59661394", "0.5950102", "0.59168255", "0.5873765", "0.58574045", "0.5838681", "0.58351356", "0.58133924", "0.5779711", "0.57638...
0.0
-1
estimates the average activity of each receptor. `ret_correlations` determines whether the correlations between receptors are returned in addition to the mean activations. `approx_prob` determines whether the probabilities of encountering substrates in mixtures are calculated exactly or only approximative, which should work for small probabilities. `clip` determines whether the estimates will be forced to be in [0, 1].
def receptor_activity_estimate(self, ret_correlations=False, approx_prob=False, clip=False): S_ni = self.sens_mat p_i = self.substrate_probabilities # calculate receptor activity assuming uncorrelated mixtures if approx_prob: # approximate calculation for small p_i r_n = np.dot(S_ni, p_i) if clip: np.clip(r_n, 0, 1, r_n) else: # proper calculation of the probabilities r_n = np.zeros(self.Nr) S_ni_mask = S_ni.astype(np.bool) for n in range(self.Nr): r_n[n] = 1 - np.product(1 - p_i[S_ni_mask[n, :]]) if self.is_correlated_mixture: # add linear correction term for correlated mixtures J_ij = self.correlations p_ni = p_i[None, :] * (1 - S_ni) corr1 = 1 + np.einsum('ij,ni,nj->n', J_ij, p_ni, p_ni) corr2 = 1 + np.einsum('ij,i,j->', J_ij, p_i, p_i) barr_n_0 = 1 - r_n barr_n = barr_n_0 * (1 + corr1 - corr2) r_n = 1 - barr_n if clip: np.clip(r_n, 0, 1, r_n) if ret_correlations: # estimate the correlations from the estimated crosstalk q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob, ignore_correlations=True) if approx_prob: r_nm = np.outer(r_n, r_n) + q_nm else: r_nm = 1 - (1 - q_nm)*(1 - np.outer(r_n, r_n)) if self.is_correlated_mixture: # add one correction term for correlated mixtures p_nmi = np.einsum('i,ni,mi->nmi', p_i, 1 - S_ni, 1 - S_ni) corr1 = 1 + np.einsum('ij,nmi,nmj->nm', J_ij, p_nmi, p_nmi) # corr2 = 1 + np.einsum('ij,i,j->', J_ij, p_i, p_i) # this term has already been calculated above and can be reused # convert r_nm_0 (here given as r_nm) into barr_nm_0 barr_nm_0 = 1 - barr_n_0[:, None] - barr_n_0[None, :] + r_nm # correct barr_nm for the correlations J_ij barr_nm = barr_nm_0 * (1 + corr1 - corr2) # convert barr_nm into r_nm r_nm = 1 - barr_n[:, None] - barr_n[None, :] + barr_nm if clip: np.clip(r_nm, 0, 1, r_nm) return r_n, r_nm else: return r_n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receptor_crosstalk_estimate(self, ret_receptor_activity=False,\n approx_prob=False, clip=False,\n ignore_correlations=False):\n if not ignore_correlations and self.is_correlated_mixture:\n r_n, r_nm = self.receptor_activity...
[ "0.59837115", "0.57311416", "0.5284991", "0.5281454", "0.5232522", "0.5206169", "0.5147281", "0.5128546", "0.50398403", "0.50333416", "0.4890107", "0.4862184", "0.48607644", "0.48548672", "0.4842955", "0.48234457", "0.47857088", "0.47831595", "0.47778708", "0.47652474", "0.47...
0.7158738
0
calculate the mutual information. `excitation_method` can be ['brute_force', 'monte_carlo', 'estimate', 'auto'] If it is 'auto' than the excitation_method is chosen automatically based on the problem size. `ret_prob_activity` determines whether the probabilities of the different outputs are returned or not
def mutual_information(self, excitation_method='auto', **kwargs): if excitation_method == 'auto': if self.Ns <= self.parameters['brute_force_threshold_Ns']: excitation_method = 'brute_force' else: excitation_method = 'monte_carlo' if excitation_method == 'brute_force' or excitation_method == 'brute-force': return self.mutual_information_brute_force(**kwargs) elif excitation_method == 'monte_carlo' or excitation_method == 'monte-carlo': return self.mutual_information_monte_carlo(**kwargs) elif excitation_method == 'estimate': return self.mutual_information_estimate(**kwargs) else: raise ValueError('Unknown excitation_method `%s`.' % excitation_method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information_brute_force(self, ret_prob_activity=False):\n base = 2 ** np.arange(0, self.Nr)\n\n # prob_a contains the probability of finding activity a as an output.\n prob_a = np.zeros(2**self.Nr)\n for c, prob_c in self._iterate_mixtures():\n # get the associated...
[ "0.7677321", "0.6729323", "0.6610971", "0.62965286", "0.5475816", "0.54467", "0.53803253", "0.53383917", "0.5304403", "0.52832675", "0.5273865", "0.52435094", "0.5235883", "0.52353334", "0.51856315", "0.5179638", "0.516652", "0.51329374", "0.5128437", "0.5115408", "0.50964516...
0.78565335
0
calculate the mutual information by constructing all possible mixtures
def mutual_information_brute_force(self, ret_prob_activity=False): base = 2 ** np.arange(0, self.Nr) # prob_a contains the probability of finding activity a as an output. prob_a = np.zeros(2**self.Nr) for c, prob_c in self._iterate_mixtures(): # get the associated output ... a = np.dot(self.sens_mat, c).astype(np.bool) # ... and represent it as a single integer a = np.dot(base, a) prob_a[a] += prob_c # normalize the output to make it a probability distribution prob_a /= prob_a.sum() # calculate the mutual information MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0) if ret_prob_activity: return MI, prob_a else: return MI
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_mixture(self) -> None:\n for mu, sigma in zip(self.mus, self.sigmas):\n self.pdfs.append(norm(mu, sigma))", "def mixture_statistics_brute_force(self):\n \n Z = 0\n hist1d = np.zeros(self.Ns)\n hist2d = np.zeros((self.Ns, self.Ns))\n \n # iter...
[ "0.68045783", "0.6167079", "0.61281043", "0.61196005", "0.60409695", "0.5897231", "0.5874367", "0.5874367", "0.58720165", "0.58610725", "0.5851441", "0.5790839", "0.5739831", "0.57007676", "0.56505895", "0.5636512", "0.56320953", "0.56164134", "0.5601524", "0.5584634", "0.556...
0.6359313
1
calculate the mutual information using a Monte Carlo strategy.
def mutual_information_monte_carlo_extrapolate(self, ret_prob_activity=False): if self.is_correlated_mixture: raise NotImplementedError('Not implemented for correlated mixtures') base = 2 ** np.arange(0, self.Nr) prob_s = self.substrate_probabilities max_steps = self._sample_steps steps, MIs = [], [] # sample mixtures according to the probabilities of finding # substrates count_a = np.zeros(2**self.Nr) step_check = 10000 for step in range(max_steps): # choose a mixture vector according to substrate probabilities m = (np.random.random(self.Ns) < prob_s) # get the associated output ... a = np.dot(self.sens_mat, m).astype(np.bool) # ... and represent it as a single integer a = np.dot(base, a) # increment counter for this output count_a[a] += 1 if step == step_check - 1: # do an extrapolation step # calculate the mutual information from the result pattern prob_a = count_a / step MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0) # save the data steps.append(step) MIs.append(MI) # do the extrapolation if len(steps) >= 3: a2, a1, a0 = MIs[-3:] MI_ext = (a0*a2 - a1*a1)/(a0 - 2*a1 + a2) # MI_ext = self._get_extrapolated_mutual_information(steps, MIs) print((step, MIs[-1], MI_ext)) step_check += 10000 else: # count_a contains the number of times output pattern a was observed. # We can thus construct P_a(a) from count_a. # calculate the mutual information from the result pattern prob_a = count_a / step MI = -sum(pa*np.log2(pa) for pa in prob_a if pa != 0) if ret_prob_activity: return MI, prob_a else: return MI
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_metropolis(self):\n\n # Initialize the posistions for each new Monte Carlo run\n positions = np.random.rand(self.num_p, self.num_d)\n # Initialize the distance matrix\n self.s.positions_distances(positions)\n # check if the wave function is zero\n while True:\n ...
[ "0.67239577", "0.66396886", "0.6380537", "0.6209045", "0.6204946", "0.6074391", "0.6021523", "0.5984577", "0.5833803", "0.58061093", "0.58041483", "0.58038014", "0.58038014", "0.57740736", "0.5725444", "0.57176656", "0.5682403", "0.5668028", "0.5640045", "0.5637014", "0.56200...
0.581717
9
returns a simple estimate of the mutual information. `approx_prob` determines whether the probabilities of encountering substrates in mixtures are calculated exactly or only approximative, which should work for small probabilities.
def mutual_information_estimate(self, approx_prob=False): # this might be not the right approach q_n = self.receptor_activity_estimate(approx_prob=approx_prob) q_nm = self.receptor_crosstalk_estimate(approx_prob=approx_prob) # calculate the approximate mutual information return self._estimate_MI_from_q_values(q_n, q_nm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information_monte_carlo_extrapolate(self, ret_prob_activity=False):\n if self.is_correlated_mixture:\n raise NotImplementedError('Not implemented for correlated mixtures')\n \n base = 2 ** np.arange(0, self.Nr)\n prob_s = self.substrate_probabilities\n\n ...
[ "0.5583002", "0.5569024", "0.55195194", "0.54732513", "0.5439062", "0.5311237", "0.5172197", "0.5165276", "0.51317364", "0.50231713", "0.48275942", "0.4811054", "0.48098183", "0.48028716", "0.4798505", "0.47701705", "0.47494784", "0.47418514", "0.4734358", "0.47275934", "0.47...
0.75532436
0
calculates the usefulness of each receptor, measured by how much information it adds to the total mutual information. `method` determines which method is used to determine the mutual information. `multiprocessing` determines whether multiprocessing is used for determining the mutual informations of all subsystems.
def receptor_score(self, method='auto', multiprocessing=False): init_arguments = self.init_arguments init_arguments['parameters']['initialize_state']['sensitivity'] = 'exact' init_arguments['parameters']['sensitivity_matrix'] = self.sens_mat joblist = [(copy.deepcopy(self.init_arguments), 'mutual_information', {'method': method})] # add one job for each receptor for n in range(self.Nr): init_arguments = self.init_arguments init_arguments['num_receptors'] -= 1 # modify the current state and add it to the job list sens_mat = np.delete(self.sens_mat, n, axis=0) init_arguments['parameters']['sensitivity_matrix'] = sens_mat joblist.append((copy.deepcopy(init_arguments), 'mutual_information', {'method': method})) if multiprocessing: # calculate all results in parallel pool = mp.Pool(processes=self.get_number_of_cores()) results = pool.map(_run_job, joblist) else: # create a generator over which we iterate later results = [_run_job(job) for job in joblist] # find the scores of all receptors scores = results[0] - np.array(results[1:]) return scores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutual_information(self, excitation_method='auto', **kwargs):\n if excitation_method == 'auto':\n if self.Ns <= self.parameters['brute_force_threshold_Ns']:\n excitation_method = 'brute_force'\n else:\n excitation_method = 'monte_carlo'\n ...
[ "0.6172455", "0.5696483", "0.55845743", "0.5454974", "0.5411215", "0.5398892", "0.53274125", "0.5234939", "0.5197039", "0.5172683", "0.51339173", "0.50398976", "0.4996997", "0.4983228", "0.49595007", "0.49515915", "0.49353585", "0.49350932", "0.49265435", "0.49167734", "0.488...
0.72649634
0
optimizes the current library to maximize the result of the target function. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how many optimization steps we try `ret_info` determines whether extra information is returned from the optimization `args` is a dictionary of additional arguments that is passed to the target function `method` determines the method used for optimization. Supported are
def optimize_library(self, target, method='descent', direction='max', **kwargs): if method == 'descent': return self.optimize_library_descent(target, direction, **kwargs) elif method == 'descent_multiple' or method == 'descent-multiple': return self.optimize_library_descent_multiple(target, direction, **kwargs) elif method == 'anneal': return self.optimize_library_anneal(target, direction, **kwargs) else: raise ValueError('Unknown optimization method `%s`' % method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n ...
[ "0.7191519", "0.6706924", "0.62114525", "0.5192495", "0.513346", "0.5125227", "0.5031708", "0.5031085", "0.49857607", "0.4900932", "0.4900832", "0.48536816", "0.48484892", "0.48352584", "0.47795326", "0.4738466", "0.47372732", "0.4712714", "0.47082347", "0.46975875", "0.46776...
0.63557446
2
optimizes the current library to maximize the result of the target function using gradient descent. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how many optimization steps we try `multiprocessing` is a flag deciding whether multiple processes are used to calculate the result. Note that this has an overhead and might actually decrease overall performance for small problems `ret_info` determines whether extra information is returned from the optimization `args` is a dictionary of additional arguments that is passed to the target function
def optimize_library_descent(self, target, direction='max', steps=100, multiprocessing=False, ret_info=False, args=None): # get the target function to call target_function = getattr(self, target) if args is not None: target_function = functools.partial(target_function, **args) # initialize the optimizer value = target_function() value_best, state_best = value, self.sens_mat.copy() if ret_info: # store extra information start_time = time.time() info = {'values': {}} values_count = self.parameters['optimizer_values_count'] values_step = max(1, steps // values_count) if multiprocessing: # run the calculations in multiple processes pool_size = self.get_number_of_cores() pool = mp.Pool(processes=pool_size) if ret_info: values_step = max(1, values_step // pool_size) # iterate for given number of steps for step in range(int(steps) // pool_size): joblist = [] init_arguments = self.init_arguments for _ in range(pool_size): # modify the current state and add it to the job list i = random.randrange(self.sens_mat.size) self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i] params = init_arguments['parameters'] params['sensitivity_matrix'] = self.sens_mat params['initialize_state']['sensitivity'] = 'exact' joblist.append((copy.deepcopy(init_arguments), target)) self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i] # run all the jobs results = pool.map(_run_job, joblist) # find the best result if direction == 'max': res_best = np.argmax(results) if results[res_best] > value_best: value_best = results[res_best] state_best = joblist[res_best][0]['parameters']['sensitivity_matrix'] # use the best state as a basis for the next iteration self.sens_mat = state_best elif direction == 'min': res_best = np.argmin(results) if results[res_best] < value_best: value_best = results[res_best] state_best = joblist[res_best][0]['parameters']['sensitivity_matrix'] # use the best state as a basis for the next iteration self.sens_mat = state_best else: raise ValueError('Unsupported direction `%s`' % direction) if ret_info and step % values_step == 0: info['values'][step * pool_size] = results[res_best] else: # run the calculations in this process for step in range(int(steps)): # modify the current state i = random.randrange(self.sens_mat.size) self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i] # get the value of the new state value = target_function() improved = ((direction == 'max' and value > value_best) or (direction == 'min' and value < value_best)) if improved: # save the state as the new best value value_best, state_best = value, self.sens_mat.copy() else: # undo last change self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i] if ret_info and step % values_step == 0: info['values'][step] = value_best # sort the best state and store it in the current object state_best = self.sort_sensitivity_matrix(state_best) self.sens_mat = state_best.copy() if ret_info: info['total_time'] = time.time() - start_time info['states_considered'] = steps info['performance'] = steps / info['total_time'] return value_best, state_best, info else: return value_best, state_best
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize_library_descent_multiple(self, target, direction='max',\n trials=4, multiprocessing=False,\n ret_error=False, **kwargs):\n \n # pass some parameters down to the optimization function to call\n kwargs...
[ "0.651425", "0.60995185", "0.59783125", "0.5357869", "0.5349476", "0.53483707", "0.5300958", "0.5280729", "0.5255571", "0.51577455", "0.51000774", "0.50822264", "0.5080444", "0.50465363", "0.50355375", "0.50104886", "0.4993036", "0.49904832", "0.49328518", "0.49109367", "0.49...
0.77414405
0
optimizes the current library to maximize the result of the target function using gradient descent from `trials` different staring positions. Only the result from the best run will be returned
def optimize_library_descent_multiple(self, target, direction='max', trials=4, multiprocessing=False, ret_error=False, **kwargs): # pass some parameters down to the optimization function to call kwargs['target'] = target kwargs['direction'] = direction # initialize the list of jobs with an optimization job starting from the # current interaction matrix joblist = [(self.init_arguments, 'optimize_library_descent', kwargs)] sens_mat = self.sens_mat #< store matrix to restore it later # set the ensemble of sensitivity matrices to try self.choose_sensitivity_matrix(density='auto') self.parameters['initialize_state']['sensitivity'] = 'ensemble' # add additional jobs with random initial interaction matrices init_arguments = self.init_arguments for _ in range(trials - 1): joblist.append((copy.deepcopy(init_arguments), 'optimize_library_descent', kwargs)) # restore interaction matrix of this object self.sens_mat = sens_mat if multiprocessing: # calculate all results in parallel pool = mp.Pool(processes=self.get_number_of_cores()) result_iter = pool.imap_unordered(_run_job, joblist) else: # create a generator over which we iterate later result_iter = (_run_job(job) for job in joblist) # find the best result by iterating over all results result_best, values = None, [] for result in result_iter: values.append(result[0]) # check whether this run improved the result if result_best is None: result_best = result elif ((direction == 'max' and result[0] > result_best[0]) or (direction == 'min' and result[0] < result_best[0])): result_best = result # sort the best state and store it in the current object state = self.sort_sensitivity_matrix(result_best[1]) self.sens_mat = state.copy() if ret_error: # replace the best value by a tuple of the best value and its error value_best = result_best[0] value_err = np.abs(value_best - np.median(values)) result_best = ((value_best, value_err), ) + result_best[1:] return result_best
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize(\n # trials,\n random_state=SEED):\n\n space = {\n 'max_depth': scope.int(hp.uniform('max_depth', 5, 15)),\n 'subsample': hp.uniform('subsample', 0.03, 1),\n 'learning_rate' : hp.loguniform('learning_rate', np.log(0.005), np.log(0.5)) - 0.0001,...
[ "0.6879043", "0.6779472", "0.6599153", "0.6580296", "0.6474444", "0.6437695", "0.6382017", "0.6354665", "0.62422895", "0.62376463", "0.62362105", "0.6233735", "0.6200726", "0.6200518", "0.61941844", "0.6183736", "0.61756665", "0.61680037", "0.6155385", "0.61435944", "0.614115...
0.6747765
2
optimizes the current library to maximize the result of the target function using simulated annealing. By default, the function returns the best value and the associated interaction matrix as result. `direction` is either 'min' or 'max' and determines whether a minimum or a maximum is sought. `steps` determines how many optimization steps we try `ret_info` determines whether extra information is returned from the optimization `args` is a dictionary of additional arguments that is passed to the target function
def optimize_library_anneal(self, target, direction='max', steps=100, ret_info=False, args=None): # lazy import from .optimizer import ReceptorOptimizerAnnealer # @UnresolvedImport # prepare the class that manages the simulated annealing annealer = ReceptorOptimizerAnnealer(self, target, direction, args, ret_info=ret_info) annealer.steps = int(steps) annealer.Tmax = self.parameters['anneal_Tmax'] annealer.Tmin = self.parameters['anneal_Tmin'] if self.parameters['verbosity'] == 0: annealer.updates = 0 # do the optimization MI, state = annealer.optimize() # sort the best state and store it in the current object state = self.sort_sensitivity_matrix(state) self.sens_mat = state.copy() if ret_info: return MI, state, annealer.info else: return MI, state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n ...
[ "0.70528316", "0.59501183", "0.5642754", "0.5085643", "0.50599", "0.5013388", "0.5011954", "0.5007961", "0.5003236", "0.49108404", "0.48720664", "0.48720354", "0.48174542", "0.4795054", "0.47906741", "0.47627977", "0.47517008", "0.46950796", "0.46797842", "0.46576664", "0.464...
0.72034293
0
generator function that samples mixtures according to the `model`. `steps` determines how many mixtures are sampled `dtype` determines the dtype of the resulting concentration vector
def _sample_binary_mixtures(model, steps, dtype=np.uint): mixture_size = model.parameters['fixed_mixture_size'] if not model.is_correlated_mixture and mixture_size is None: # use simple monte carlo algorithm prob_s = model.substrate_probabilities for _ in range(int(steps)): # choose a mixture vector according to substrate probabilities yield (np.random.random(model.Ns) < prob_s).astype(dtype) elif mixture_size is None: # go through all mixtures and don't keep the size constant # use metropolis algorithm hi = model.commonness Jij = model.correlations # start with a random concentration vector c = np.random.randint(0, 2, model.Ns).astype(dtype) E_last = -np.dot(np.dot(Jij, c) + hi, c) for _ in range(int(steps)): i = random.randrange(model.Ns) c[i] = 1 - c[i] #< switch the entry Ei = -np.dot(np.dot(Jij, c) + hi, c) if Ei < E_last or random.random() < np.exp(E_last - Ei): # accept the new state E_last = Ei else: # reject the new state and revert to the last one c[i] = 1 - c[i] yield c elif mixture_size == 0: # special case which is not covered by the iteration below c_zero = np.zeros(model.Ns, dtype) for _ in range(model._sample_steps): yield c_zero elif mixture_size == model.Ns: # special case which is not covered by the iteration below c_ones = np.ones(model.Ns, dtype) for _ in range(steps): yield c_ones else: # go through mixtures with keeping their size constant # use metropolis algorithm hi = model.commonness Jij = model.correlations # create random concentration vector with fixed substrate count c = np.r_[np.ones(mixture_size, dtype), np.zeros(model.Ns - mixture_size, dtype)] np.random.shuffle(c) E_last = -np.dot(np.dot(Jij, c) + hi, c) for _ in range(int(steps)): # find the next mixture by swapping two items i0 = random.choice(np.flatnonzero(c == 0)) #< find 0 i1 = random.choice(np.flatnonzero(c)) #< find 1 c[i0], c[i1] = 1, 0 #< swap entries Ei = -np.dot(np.dot(Jij, c) + hi, c) if Ei < E_last or random.random() < np.exp(E_last - Ei): # accept the new state E_last = Ei else: # reject the new state and revert to the last one c[i0], c[i1] = 0, 1 yield c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_mixtures(self, steps=None, dtype=np.uint):\n if steps is None:\n steps = self._sample_steps\n \n return _sample_binary_mixtures(self, steps, dtype)", "def _iterate_mixtures(self):\n \n if self._iterate_steps > self.parameters['max_steps']:\n ra...
[ "0.7261067", "0.6262869", "0.5684544", "0.56400836", "0.55678874", "0.5419031", "0.5419031", "0.5378836", "0.53703797", "0.5349273", "0.5279914", "0.52595407", "0.5239342", "0.52295643", "0.5188037", "0.51616156", "0.515846", "0.5093254", "0.50914216", "0.5061559", "0.5050774...
0.7653979
0
helper function for optimizing the receptor library using multiprocessing
def _run_job(args): # Note that we do not set the seed of the random number generator because # we already modified the interaction matrix before calling this function # and it does not harm us when all sub processes have the same sequence of # random numbers. # create the object ... obj = LibraryBinaryNumeric(**args[0]) # ... get the method to evaluate ... method = getattr(obj, args[1]) # ... and evaluate it if len(args) > 2: return method(**args[2]) else: return method()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _optimise(self):\n pass", "def computeIntercepts():\n pass", "def propose_optimize():\n pass", "def process():", "def reduce_run():", "def test_3():\n \n\n # Functions wrapped by agents\n def f(in_streams, out_streams):\n multiply_and_add(in_streams[0], out_streams[0],\n ...
[ "0.63033754", "0.59346247", "0.5667658", "0.5634969", "0.5632718", "0.5520529", "0.5416026", "0.5395593", "0.5282536", "0.5226786", "0.52209985", "0.5219927", "0.52054894", "0.5177828", "0.51697385", "0.51571566", "0.51315176", "0.51257294", "0.51191217", "0.5107856", "0.5104...
0.0
-1
test the performance of the brute force and the Monte Carlo method
def performance_test(Ns=15, Nr=3): num = 2**Ns hs = np.random.random(Ns) model = LibraryBinaryNumeric(Ns, Nr, hs) start = time.time() model.mutual_information_brute_force() time_brute_force = time.time() - start print('Brute force: %g sec' % time_brute_force) start = time.time() model.mutual_information_monte_carlo(num) time_monte_carlo = time.time() - start print('Monte carlo: %g sec' % time_monte_carlo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n hash_str = '5411ba21c470e12d49f351a2d240e43618032950'\n salt = '0d71906d0f735e6196c80d0a7cb1748e'\n encrypted_code = 'Ul5SR0ISYFxUXl8OOxITFBFWVlIRQVtRXV4bHQs4ExQREhMUERJDRlhcRxwWZltdQhJaRxFTE0BUQUcUXF1XQV1XFB07EhMUE' \\\n 'RITFBFCQV1fRhsTZVdAQBFhRldSV0BHV0dfGhYbOQ=='\n ...
[ "0.6661952", "0.6618818", "0.64001924", "0.63427407", "0.6263836", "0.6232404", "0.6223552", "0.59330684", "0.59220713", "0.59127235", "0.58657014", "0.58575463", "0.5836921", "0.58361584", "0.58269995", "0.5803118", "0.5761818", "0.573099", "0.57286465", "0.57004774", "0.568...
0.6652606
1
Returns Classroom in good representation for user
def __str__(self): return 'Classroom {} has a capacity of {} persons and ' \ 'has the following equipment: {}.'.format( self.number, str(self.capacity), ', '.join(self.equipment))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n return \"Classroom('{}', {}, {})\".format(self.number, self.capacity,\n str(self.equipment))", "def __str__(self):\n return self.room_name", "def __str__(self):\n return self.room.name", "def json(self):\n return...
[ "0.6388747", "0.58620846", "0.5832608", "0.55203956", "0.5508624", "0.5427314", "0.54227597", "0.5291813", "0.5283396", "0.52249587", "0.5194744", "0.5179897", "0.51505965", "0.51442033", "0.5131981", "0.5128399", "0.51204616", "0.5106739", "0.5105534", "0.5094171", "0.507423...
0.5878091
1
Classroom, Classroom > bool Returns True if first room have bigger capacity then second room
def is_larger(self, room2): return self.capacity > room2.capacity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gt__(self, other: Card) -> bool:\n return not self.__le__(other)", "def pareto_better(self, other: \"EvalItem\") -> bool:\n return self.size <= other.size and other.result <= self.result", "def __gt__(self, other):\n return self.weight() > other.weight()", "def __gt__(self, other):...
[ "0.61225253", "0.5890521", "0.5887941", "0.58851635", "0.5883985", "0.5871", "0.5871", "0.58323383", "0.57970667", "0.57674", "0.57477105", "0.57477105", "0.5667831", "0.5658871", "0.56532055", "0.5647148", "0.56317246", "0.56295174", "0.5624304", "0.55907315", "0.5584328", ...
0.73677325
0
Classroom, Classroom > list Returns the equipment in first room which is missing in second
def equipment_differences(self, room2): return sorted(list(set(self.equipment).difference(room2.equipment)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_rooms(self, exclude=[]):\n stmt = Session.query(Lesson.room, Lesson.day, Lesson.order,\n Lesson.schedule_id)\n stmt = stmt.group_by(Lesson.room, Lesson.order, Lesson.day, Lesson.schedule_id)\n stmt = stmt.having(func.count(Lesson.room)>1)\n stmt = stmt.filter(not_(L...
[ "0.6358367", "0.6313788", "0.6150663", "0.60647494", "0.5886441", "0.5728688", "0.5633727", "0.5608109", "0.55511093", "0.53285015", "0.5248454", "0.523609", "0.5177248", "0.5149604", "0.51473695", "0.5124947", "0.5094668", "0.50736094", "0.50731784", "0.50704306", "0.5062421...
0.6854201
0
Returns beautiful representation for programmer
def __repr__(self): return "Classroom('{}', {}, {})".format(self.number, self.capacity, str(self.equipment))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n ...
[ "0.7089211", "0.68339634", "0.6794942", "0.67740595", "0.6765707", "0.66930324", "0.66878825", "0.6668747", "0.6645762", "0.6644278", "0.6640405", "0.660601", "0.6604529", "0.657811", "0.6575636", "0.65749127", "0.6557521", "0.65515167", "0.65472", "0.6546359", "0.65233105", ...
0.0
-1
Get arguments from command line
def getArgumentParser(): parser = argparse.ArgumentParser(description="Script for running optimization for the ZH dark photon SR") parser.add_argument('-i', '--infile', dest='infile', help='Input CSV file', default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv') parser.add_argument('-o', '--output', dest='outdir', help='Output directory for plots, selection lists, etc', default='outdir') parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs') parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range') parser.add_argument('--lower',help='Lower limit for conditional filtering') parser.add_argument('--upper',help='Upper limit for conditional filtering') return parser
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cli_arguments(self):\n pass", "def get_command_line_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-s', '--symbol', help='symbol query')\n\n # no limit should be input as 'max'\n parser.add_argument('-l',\n '--li...
[ "0.77721745", "0.76546174", "0.7628115", "0.75778574", "0.75472254", "0.7456921", "0.74439055", "0.7440344", "0.74382585", "0.7389421", "0.7364026", "0.73364145", "0.73019594", "0.7261001", "0.72259796", "0.722258", "0.71752733", "0.7173149", "0.7171823", "0.71440315", "0.713...
0.0
-1
Calculate pairwise correlation between features. Extra arguments are passed on to DataFrame.corr()
def correlations(data,data_type, **kwds): # simply call df.corr() to get a table of # correlation values if you do not need # the fancy plotting corrmat = data.corr(**kwds) fig, ax1 = plt.subplots(ncols=1, figsize=(6,5)) opts = {'cmap': plt.get_cmap("RdBu"), 'vmin': -1, 'vmax': +1} heatmap1 = ax1.pcolor(corrmat, **opts) plt.colorbar(heatmap1, ax=ax1) ax1.set_title("Correlations "+data_type) labels = corrmat.columns.values for ax in (ax1,): # shift location of ticks to center of the bins ax.set_xticks(np.arange(len(labels))+0.5, minor=False) ax.set_yticks(np.arange(len(labels))+0.5, minor=False) ax.set_xticklabels(labels, minor=False, ha='right', rotation=70) ax.set_yticklabels(labels, minor=False) plt.tight_layout() plt.savefig("Correlations_"+data_type+".pdf")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pairwise_corr(df1, df2):\n res = []\n for i in range(df2.shape[1]):\n res.append(df1.corrwith(df2.ix[:, i]))\n res = pd.concat(res, axis=1)\n res.columns = df2.columns\n return res", "def calculate_feature_corr(self):\n \n return self.train_data.astype(float).corr(method='...
[ "0.74171937", "0.71074754", "0.6666765", "0.66659594", "0.6619614", "0.6587562", "0.6580525", "0.6578581", "0.65783983", "0.65568876", "0.6511679", "0.6491773", "0.6461151", "0.6446981", "0.64354914", "0.63979685", "0.6396546", "0.6372075", "0.6297028", "0.6269612", "0.623975...
0.0
-1
Parse one access line.
def ParseLine(line): fields = line.split() ip = fields[0] datestr = ' '.join(fields[3:5])[1:-1] timestamp = datetime.strptime( datestr, '%d/%b/%Y:%H:%M:%S %z' ).timestamp() command = fields[5][1:] uri = fields[6] protocol = fields[7][:-1] status = int(fields[8]) size = int(fields[9]) meta = [var.strip('"') for var in fields[11:-1]] return { 'timestamp': timestamp, 'ip': ip, 'command': command, 'uri': uri, 'protocol': protocol, 'status': status, 'size': size, 'meta': meta }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_line(self, line):\n raise NotImplementedError", "def parse(cls, line):\r\n raise NotImplementedError", "def parse_line(self, line, time_shift=0.0):\n # The local variables are rather simple.\n # pylint: disable=too-many-locals\n try:\n (\n mode...
[ "0.6672727", "0.62225854", "0.613628", "0.61219925", "0.59706", "0.5941891", "0.5929608", "0.5854193", "0.5786448", "0.577579", "0.5775706", "0.576628", "0.5740041", "0.5700446", "0.56839246", "0.56678885", "0.56678885", "0.56678885", "0.56678885", "0.56296605", "0.5623283", ...
0.5084293
75