body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
1605276693eb9051c872eef4a87d03c4e37b722287653a36765a8ee550e79efa
def get_one_page(self, url): '\n 请求url返回响应结果\n :param url:\n :return:\n ' try: response = requests.get(url, headers=self.generate_random_ua) if (response.status_code == 200): return response.text except Exception as e: print('连接糗事百科失败,错误原因', e) return None
请求url返回响应结果 :param url: :return:
qiushibaike/qiushibaike.py
get_one_page
jumploop/Python3_WebSpider
1
python
def get_one_page(self, url): '\n 请求url返回响应结果\n :param url:\n :return:\n ' try: response = requests.get(url, headers=self.generate_random_ua) if (response.status_code == 200): return response.text except Exception as e: print('连接糗事百科失败,错误原因', e) return None
def get_one_page(self, url): '\n 请求url返回响应结果\n :param url:\n :return:\n ' try: response = requests.get(url, headers=self.generate_random_ua) if (response.status_code == 200): return response.text except Exception as e: print('连接糗事百科失败,错误原因', e) return None<|docstring|>请求url返回响应结果 :param url: :return:<|endoftext|>
468cd31f5ee4a6c35065727db9860d81356424e4143e3bd354a23a79ac70c5e0
@staticmethod def parse_one_page(contents): '\n 解析页面数据,提取数据\n :param content:\n :return:\n ' html = etree.HTML(contents) items = html.xpath('//div[contains(@id,"qiushi_tag")]') pageStories = [] for item in items: author = item.xpath('.//div[@class="author clearfix"]/a[2]/h2/text()')[0].strip() age = item.xpath('.//div[@class="author clearfix"]/div/text()')[0] content = item.xpath('.//a[1]/div[@class="content"]/span[1]/text()') content = '\n'.join([n.strip() for n in content]) funny_number = item.xpath('.//div[@class="stats"]/span[1]/i/text()')[0] comments_number = item.xpath('.//div[@class="stats"]/span[2]/a/i/text()')[0] pageStories.append([author, age, content, funny_number, comments_number]) return pageStories
解析页面数据,提取数据 :param content: :return:
qiushibaike/qiushibaike.py
parse_one_page
jumploop/Python3_WebSpider
1
python
@staticmethod def parse_one_page(contents): '\n 解析页面数据,提取数据\n :param content:\n :return:\n ' html = etree.HTML(contents) items = html.xpath('//div[contains(@id,"qiushi_tag")]') pageStories = [] for item in items: author = item.xpath('.//div[@class="author clearfix"]/a[2]/h2/text()')[0].strip() age = item.xpath('.//div[@class="author clearfix"]/div/text()')[0] content = item.xpath('.//a[1]/div[@class="content"]/span[1]/text()') content = '\n'.join([n.strip() for n in content]) funny_number = item.xpath('.//div[@class="stats"]/span[1]/i/text()')[0] comments_number = item.xpath('.//div[@class="stats"]/span[2]/a/i/text()')[0] pageStories.append([author, age, content, funny_number, comments_number]) return pageStories
@staticmethod def parse_one_page(contents): '\n 解析页面数据,提取数据\n :param content:\n :return:\n ' html = etree.HTML(contents) items = html.xpath('//div[contains(@id,"qiushi_tag")]') pageStories = [] for item in items: author = item.xpath('.//div[@class="author clearfix"]/a[2]/h2/text()')[0].strip() age = item.xpath('.//div[@class="author clearfix"]/div/text()')[0] content = item.xpath('.//a[1]/div[@class="content"]/span[1]/text()') content = '\n'.join([n.strip() for n in content]) funny_number = item.xpath('.//div[@class="stats"]/span[1]/i/text()')[0] comments_number = item.xpath('.//div[@class="stats"]/span[2]/a/i/text()')[0] pageStories.append([author, age, content, funny_number, comments_number]) return pageStories<|docstring|>解析页面数据,提取数据 :param content: :return:<|endoftext|>
6cb20982d733841adaf6b674acdecba8584ef057abf9023a35a7f2d404ba0af7
def write_to_file_by_csv(self, content): '\n 将数据写入文件\n :param content:\n :return:\n ' with open('result.csv', 'w', newline='', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(self.fieldnames) writer.writerows(content)
将数据写入文件 :param content: :return:
qiushibaike/qiushibaike.py
write_to_file_by_csv
jumploop/Python3_WebSpider
1
python
def write_to_file_by_csv(self, content): '\n 将数据写入文件\n :param content:\n :return:\n ' with open('result.csv', 'w', newline=, encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(self.fieldnames) writer.writerows(content)
def write_to_file_by_csv(self, content): '\n 将数据写入文件\n :param content:\n :return:\n ' with open('result.csv', 'w', newline=, encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(self.fieldnames) writer.writerows(content)<|docstring|>将数据写入文件 :param content: :return:<|endoftext|>
cf20d2b6a3cccc8ed009f1248e2365f6f3e9c4025bdf0e8ecb5e2b6592c261b9
def write_to_file_by_pandas(self, content): '\n 通过pandas模块将数据写入文件\n :param content:\n :return:\n ' content = [line for line in content] df = pd.DataFrame(content, columns=self.fieldnames) df.to_excel('results.xlsx', index=False)
通过pandas模块将数据写入文件 :param content: :return:
qiushibaike/qiushibaike.py
write_to_file_by_pandas
jumploop/Python3_WebSpider
1
python
def write_to_file_by_pandas(self, content): '\n 通过pandas模块将数据写入文件\n :param content:\n :return:\n ' content = [line for line in content] df = pd.DataFrame(content, columns=self.fieldnames) df.to_excel('results.xlsx', index=False)
def write_to_file_by_pandas(self, content): '\n 通过pandas模块将数据写入文件\n :param content:\n :return:\n ' content = [line for line in content] df = pd.DataFrame(content, columns=self.fieldnames) df.to_excel('results.xlsx', index=False)<|docstring|>通过pandas模块将数据写入文件 :param content: :return:<|endoftext|>
28d90bb2a77dabc51a68a5e3dc43eca233bdd510da58f9b3165133aa47900909
def run(self): '\n 主方法\n :return:\n ' results = [] urls = [self.url.format(i) for i in range(1, 14)] for url in urls: time.sleep(random.randint(1, 3)) content = self.get_one_page(url) item = self.parse_one_page(content) print(item) results.extend(item) self.write_to_file_by_csv(results) self.write_to_file_by_pandas(results)
主方法 :return:
qiushibaike/qiushibaike.py
run
jumploop/Python3_WebSpider
1
python
def run(self): '\n 主方法\n :return:\n ' results = [] urls = [self.url.format(i) for i in range(1, 14)] for url in urls: time.sleep(random.randint(1, 3)) content = self.get_one_page(url) item = self.parse_one_page(content) print(item) results.extend(item) self.write_to_file_by_csv(results) self.write_to_file_by_pandas(results)
def run(self): '\n 主方法\n :return:\n ' results = [] urls = [self.url.format(i) for i in range(1, 14)] for url in urls: time.sleep(random.randint(1, 3)) content = self.get_one_page(url) item = self.parse_one_page(content) print(item) results.extend(item) self.write_to_file_by_csv(results) self.write_to_file_by_pandas(results)<|docstring|>主方法 :return:<|endoftext|>
91aa9b031d9f2d991b1349c39e66ba11ab59dbc73314dbef523223f38a16d1a1
def load_moonshot_semi_supervised(unlabeled_size=0.1, seed=2666): '\n\n Parameters\n ----------\n unlabeled_size :\n (Default value = 0.1)\n seed :\n (Default value = 2666)\n\n Returns\n -------\n\n ' moonshot_labeled = pinot.data.moonshot() moonshot_unlabeled = pinot.data.datasets.UnlabeledDataset().from_txt((os.path.dirname(utils.__file__) + '/moonshot_activity_synthetic.txt'), unlabeled_size, seed=seed)() np.random.seed(seed) moonshot_labeled.extend(moonshot_unlabeled) np.random.shuffle(moonshot_labeled) return moonshot_labeled
Parameters ---------- unlabeled_size : (Default value = 0.1) seed : (Default value = 2666) Returns -------
pinot/data/unlabeled_datasets.py
load_moonshot_semi_supervised
choderalab/pinot
13
python
def load_moonshot_semi_supervised(unlabeled_size=0.1, seed=2666): '\n\n Parameters\n ----------\n unlabeled_size :\n (Default value = 0.1)\n seed :\n (Default value = 2666)\n\n Returns\n -------\n\n ' moonshot_labeled = pinot.data.moonshot() moonshot_unlabeled = pinot.data.datasets.UnlabeledDataset().from_txt((os.path.dirname(utils.__file__) + '/moonshot_activity_synthetic.txt'), unlabeled_size, seed=seed)() np.random.seed(seed) moonshot_labeled.extend(moonshot_unlabeled) np.random.shuffle(moonshot_labeled) return moonshot_labeled
def load_moonshot_semi_supervised(unlabeled_size=0.1, seed=2666): '\n\n Parameters\n ----------\n unlabeled_size :\n (Default value = 0.1)\n seed :\n (Default value = 2666)\n\n Returns\n -------\n\n ' moonshot_labeled = pinot.data.moonshot() moonshot_unlabeled = pinot.data.datasets.UnlabeledDataset().from_txt((os.path.dirname(utils.__file__) + '/moonshot_activity_synthetic.txt'), unlabeled_size, seed=seed)() np.random.seed(seed) moonshot_labeled.extend(moonshot_unlabeled) np.random.shuffle(moonshot_labeled) return moonshot_labeled<|docstring|>Parameters ---------- unlabeled_size : (Default value = 0.1) seed : (Default value = 2666) Returns -------<|endoftext|>
8558f5e8303d32e0afc20e3495ed675c450c4dc2fbd254bcad2dc1023e2f4e72
def load_esol_semi_supervised(unlabeled_size=0.1, seed=2666): '\n\n Parameters\n ----------\n unlabeled_size :\n (Default value = 0.1)\n seed :\n (Default value = 2666)\n\n Returns\n -------\n\n ' esol_labeled = pinot.data.esol() esol_unlabeled = utils.load_unlabeled_data((os.path.dirname(utils.__file__) + '/esol_synthetic_smiles.txt'), unlabeled_size, seed=seed)() np.random.seed(seed) esol_labeled.extend(esol_unlabeled) np.random.shuffle(esol_labeled) return esol_labeled
Parameters ---------- unlabeled_size : (Default value = 0.1) seed : (Default value = 2666) Returns -------
pinot/data/unlabeled_datasets.py
load_esol_semi_supervised
choderalab/pinot
13
python
def load_esol_semi_supervised(unlabeled_size=0.1, seed=2666): '\n\n Parameters\n ----------\n unlabeled_size :\n (Default value = 0.1)\n seed :\n (Default value = 2666)\n\n Returns\n -------\n\n ' esol_labeled = pinot.data.esol() esol_unlabeled = utils.load_unlabeled_data((os.path.dirname(utils.__file__) + '/esol_synthetic_smiles.txt'), unlabeled_size, seed=seed)() np.random.seed(seed) esol_labeled.extend(esol_unlabeled) np.random.shuffle(esol_labeled) return esol_labeled
def load_esol_semi_supervised(unlabeled_size=0.1, seed=2666): '\n\n Parameters\n ----------\n unlabeled_size :\n (Default value = 0.1)\n seed :\n (Default value = 2666)\n\n Returns\n -------\n\n ' esol_labeled = pinot.data.esol() esol_unlabeled = utils.load_unlabeled_data((os.path.dirname(utils.__file__) + '/esol_synthetic_smiles.txt'), unlabeled_size, seed=seed)() np.random.seed(seed) esol_labeled.extend(esol_unlabeled) np.random.shuffle(esol_labeled) return esol_labeled<|docstring|>Parameters ---------- unlabeled_size : (Default value = 0.1) seed : (Default value = 2666) Returns -------<|endoftext|>
47d2dc790cfec4096f40b408ab2e50cfb62bab2b95e8fd1a5e3b770bdda4765c
def version(filename): 'Extract the version number from the dictionary file name.' match = dict_version_re.match(filename) if (match is None): message.warning('Found a dictionary with a malformed name: {}'.format(filename)) return None return tuple((int(n) for n in match.group('version').split('-')))
Extract the version number from the dictionary file name.
luminos/browser/webengine/Spell.py
version
linuxaddict89/luminos
0
python
def version(filename): match = dict_version_re.match(filename) if (match is None): message.warning('Found a dictionary with a malformed name: {}'.format(filename)) return None return tuple((int(n) for n in match.group('version').split('-')))
def version(filename): match = dict_version_re.match(filename) if (match is None): message.warning('Found a dictionary with a malformed name: {}'.format(filename)) return None return tuple((int(n) for n in match.group('version').split('-')))<|docstring|>Extract the version number from the dictionary file name.<|endoftext|>
4e25247a13c08076166afe07a363400cf83a4b7380fed74433a0c991ff1d9027
def dictionary_dir(old=False): "Return the path (str) to the QtWebEngine's dictionaries directory." if (qtutils.version_check('5.10', compiled=False) and (not old)): datapath = standarddir.data() else: datapath = QLibraryInfo.location(QLibraryInfo.DataPath) return os.path.join(datapath, 'qtwebengine_dictionaries')
Return the path (str) to the QtWebEngine's dictionaries directory.
luminos/browser/webengine/Spell.py
dictionary_dir
linuxaddict89/luminos
0
python
def dictionary_dir(old=False): if (qtutils.version_check('5.10', compiled=False) and (not old)): datapath = standarddir.data() else: datapath = QLibraryInfo.location(QLibraryInfo.DataPath) return os.path.join(datapath, 'qtwebengine_dictionaries')
def dictionary_dir(old=False): if (qtutils.version_check('5.10', compiled=False) and (not old)): datapath = standarddir.data() else: datapath = QLibraryInfo.location(QLibraryInfo.DataPath) return os.path.join(datapath, 'qtwebengine_dictionaries')<|docstring|>Return the path (str) to the QtWebEngine's dictionaries directory.<|endoftext|>
06699badca172b0cebd13d6c159964e8a6a2d992c914c5a2201a034c8eba2da8
def local_files(code): 'Return all installed dictionaries for the given code.\n\n The returned dictionaries are sorted by version, therefore the latest will\n be the first element. The list will be empty if no dictionaries are found.\n ' pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code)) matching_dicts = glob.glob(pathname) versioned_dicts = [] for matching_dict in matching_dicts: parsed_version = version(matching_dict) if (parsed_version is not None): filename = os.path.basename(matching_dict) log.config.debug('Found file for dict {}: {}'.format(code, filename)) versioned_dicts.append((parsed_version, filename)) return [filename for (version, filename) in sorted(versioned_dicts, reverse=True)]
Return all installed dictionaries for the given code. The returned dictionaries are sorted by version, therefore the latest will be the first element. The list will be empty if no dictionaries are found.
luminos/browser/webengine/Spell.py
local_files
linuxaddict89/luminos
0
python
def local_files(code): 'Return all installed dictionaries for the given code.\n\n The returned dictionaries are sorted by version, therefore the latest will\n be the first element. The list will be empty if no dictionaries are found.\n ' pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code)) matching_dicts = glob.glob(pathname) versioned_dicts = [] for matching_dict in matching_dicts: parsed_version = version(matching_dict) if (parsed_version is not None): filename = os.path.basename(matching_dict) log.config.debug('Found file for dict {}: {}'.format(code, filename)) versioned_dicts.append((parsed_version, filename)) return [filename for (version, filename) in sorted(versioned_dicts, reverse=True)]
def local_files(code): 'Return all installed dictionaries for the given code.\n\n The returned dictionaries are sorted by version, therefore the latest will\n be the first element. The list will be empty if no dictionaries are found.\n ' pathname = os.path.join(dictionary_dir(), '{}*.bdic'.format(code)) matching_dicts = glob.glob(pathname) versioned_dicts = [] for matching_dict in matching_dicts: parsed_version = version(matching_dict) if (parsed_version is not None): filename = os.path.basename(matching_dict) log.config.debug('Found file for dict {}: {}'.format(code, filename)) versioned_dicts.append((parsed_version, filename)) return [filename for (version, filename) in sorted(versioned_dicts, reverse=True)]<|docstring|>Return all installed dictionaries for the given code. The returned dictionaries are sorted by version, therefore the latest will be the first element. The list will be empty if no dictionaries are found.<|endoftext|>
2d2792aeaf637ce674081958e01b045c74ce5269f6005e5c96b58ae0ad00d193
def local_filename(code): 'Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n ' all_installed = local_files(code) return (os.path.splitext(all_installed[0])[0] if all_installed else None)
Return the newest installed dictionary for the given code. Return the filename of the installed dictionary with the highest version number or None if the dictionary is not installed.
luminos/browser/webengine/Spell.py
local_filename
linuxaddict89/luminos
0
python
def local_filename(code): 'Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n ' all_installed = local_files(code) return (os.path.splitext(all_installed[0])[0] if all_installed else None)
def local_filename(code): 'Return the newest installed dictionary for the given code.\n\n Return the filename of the installed dictionary with the highest version\n number or None if the dictionary is not installed.\n ' all_installed = local_files(code) return (os.path.splitext(all_installed[0])[0] if all_installed else None)<|docstring|>Return the newest installed dictionary for the given code. Return the filename of the installed dictionary with the highest version number or None if the dictionary is not installed.<|endoftext|>
4386382a6a7715683cc7cda7d185d020555d69add1b3d183570d30e4e77048d6
def init(): 'Initialize the dictionary path if supported.' if qtutils.version_check('5.10', compiled=False): new_dir = dictionary_dir() old_dir = dictionary_dir(old=True) os.environ['QTWEBENGINE_DICTIONARIES_PATH'] = new_dir try: if (os.path.exists(old_dir) and (not os.path.exists(new_dir))): shutil.copytree(old_dir, new_dir) except OSError: log.misc.exception('Failed to copy old dictionaries')
Initialize the dictionary path if supported.
luminos/browser/webengine/Spell.py
init
linuxaddict89/luminos
0
python
def init(): if qtutils.version_check('5.10', compiled=False): new_dir = dictionary_dir() old_dir = dictionary_dir(old=True) os.environ['QTWEBENGINE_DICTIONARIES_PATH'] = new_dir try: if (os.path.exists(old_dir) and (not os.path.exists(new_dir))): shutil.copytree(old_dir, new_dir) except OSError: log.misc.exception('Failed to copy old dictionaries')
def init(): if qtutils.version_check('5.10', compiled=False): new_dir = dictionary_dir() old_dir = dictionary_dir(old=True) os.environ['QTWEBENGINE_DICTIONARIES_PATH'] = new_dir try: if (os.path.exists(old_dir) and (not os.path.exists(new_dir))): shutil.copytree(old_dir, new_dir) except OSError: log.misc.exception('Failed to copy old dictionaries')<|docstring|>Initialize the dictionary path if supported.<|endoftext|>
cc06da47a12d635c870c86ea64d9de9bac629bc7b649a4bea7e5c8b7be89802d
def customer_image_file_path(instance, file_name): 'Generate file path for new customer image' ext = file_name.split('.')[(- 1)] file_name = f'{uuid.uuid4()}.{ext}' return os.path.join('images/', file_name)
Generate file path for new customer image
billing_shop/apps/clients/models/clients.py
customer_image_file_path
sandoval19/build_crew
0
python
def customer_image_file_path(instance, file_name): ext = file_name.split('.')[(- 1)] file_name = f'{uuid.uuid4()}.{ext}' return os.path.join('images/', file_name)
def customer_image_file_path(instance, file_name): ext = file_name.split('.')[(- 1)] file_name = f'{uuid.uuid4()}.{ext}' return os.path.join('images/', file_name)<|docstring|>Generate file path for new customer image<|endoftext|>
0428aa6039ba9cb7173e6d7d9ffc88d41299ddbe8115f6774d5162b057a95259
def generate_unique_anonymous_username(): '\n Generate an unique username for a player. Check in database if the username already exists.\n TODO: check in db if a user with the generated username already exists\n ' unique_id = get_random_string(length=10) new_username = ('u_%s' % unique_id) return new_username
Generate an unique username for a player. Check in database if the username already exists. TODO: check in db if a user with the generated username already exists
web/utils.py
generate_unique_anonymous_username
NejcZupec/tictactoe
1
python
def generate_unique_anonymous_username(): '\n Generate an unique username for a player. Check in database if the username already exists.\n TODO: check in db if a user with the generated username already exists\n ' unique_id = get_random_string(length=10) new_username = ('u_%s' % unique_id) return new_username
def generate_unique_anonymous_username(): '\n Generate an unique username for a player. Check in database if the username already exists.\n TODO: check in db if a user with the generated username already exists\n ' unique_id = get_random_string(length=10) new_username = ('u_%s' % unique_id) return new_username<|docstring|>Generate an unique username for a player. Check in database if the username already exists. TODO: check in db if a user with the generated username already exists<|endoftext|>
5212bc37b7af3f6478baf91708147f5f4e972b7868316ad29e720144910d0f48
def create_new_game(p1_type, p2_type): '\n Generate two random players and create a new Game instance.\n ' player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type) player2 = Player.objects.create(username=generate_unique_anonymous_username(), type=p2_type) return Game.objects.create(player1=player1, player2=player2)
Generate two random players and create a new Game instance.
web/utils.py
create_new_game
NejcZupec/tictactoe
1
python
def create_new_game(p1_type, p2_type): '\n \n ' player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type) player2 = Player.objects.create(username=generate_unique_anonymous_username(), type=p2_type) return Game.objects.create(player1=player1, player2=player2)
def create_new_game(p1_type, p2_type): '\n \n ' player1 = Player.objects.create(username=generate_unique_anonymous_username(), type=p1_type) player2 = Player.objects.create(username=generate_unique_anonymous_username(), type=p2_type) return Game.objects.create(player1=player1, player2=player2)<|docstring|>Generate two random players and create a new Game instance.<|endoftext|>
f4f7acfacc3270e66c237768148fcad3edc883902e78fe9af47ac151817f3410
def download_content(url, dst, proxy=None, verbose=True): "\n\n Download web content.\n\n Parameters\n ----------\n url: str\n Content url.\n\n dst: str\n Destination for file saving.\n\n proxy: dict\n Dictionary with 'https' as key and a string indicating\n the https proxy as value. Defaults to None, indicating\n that the env variable https_proxy will be searched. In\n case of not found, the proxy will be set to empty.\n\n verbose: bool\n Controls whether logging should be done on console.\n\n " import sys import shutil import requests if verbose: requests.packages.urllib3.add_stderr_logger() if verbose: print('Downloading from {}'.format(url)) if (not proxy): try: proxy = dict(https=os.environ['https_proxy']) except: print('https proxy not used.') __res = requests.get(url, verify=False, stream=True, proxies=None) __res.raw.decode_content = True with open(dst, 'wb') as __f: (__l, __t) = (0, __res.headers.get('content-length')) for __chunk in __res.iter_content(chunk_size=4096): if __chunk: __f.write(__chunk) __l += len(__chunk) if verbose: sys.stdout.write('Progress: {}/{}\r'.format(__l, __t)) print()
Download web content. Parameters ---------- url: str Content url. dst: str Destination for file saving. proxy: dict Dictionary with 'https' as key and a string indicating the https proxy as value. Defaults to None, indicating that the env variable https_proxy will be searched. In case of not found, the proxy will be set to empty. verbose: bool Controls whether logging should be done on console.
lib/utils/utils.py
download_content
jonathanzjl/cam-vision
0
python
def download_content(url, dst, proxy=None, verbose=True): "\n\n Download web content.\n\n Parameters\n ----------\n url: str\n Content url.\n\n dst: str\n Destination for file saving.\n\n proxy: dict\n Dictionary with 'https' as key and a string indicating\n the https proxy as value. Defaults to None, indicating\n that the env variable https_proxy will be searched. In\n case of not found, the proxy will be set to empty.\n\n verbose: bool\n Controls whether logging should be done on console.\n\n " import sys import shutil import requests if verbose: requests.packages.urllib3.add_stderr_logger() if verbose: print('Downloading from {}'.format(url)) if (not proxy): try: proxy = dict(https=os.environ['https_proxy']) except: print('https proxy not used.') __res = requests.get(url, verify=False, stream=True, proxies=None) __res.raw.decode_content = True with open(dst, 'wb') as __f: (__l, __t) = (0, __res.headers.get('content-length')) for __chunk in __res.iter_content(chunk_size=4096): if __chunk: __f.write(__chunk) __l += len(__chunk) if verbose: sys.stdout.write('Progress: {}/{}\r'.format(__l, __t)) print()
def download_content(url, dst, proxy=None, verbose=True): "\n\n Download web content.\n\n Parameters\n ----------\n url: str\n Content url.\n\n dst: str\n Destination for file saving.\n\n proxy: dict\n Dictionary with 'https' as key and a string indicating\n the https proxy as value. Defaults to None, indicating\n that the env variable https_proxy will be searched. In\n case of not found, the proxy will be set to empty.\n\n verbose: bool\n Controls whether logging should be done on console.\n\n " import sys import shutil import requests if verbose: requests.packages.urllib3.add_stderr_logger() if verbose: print('Downloading from {}'.format(url)) if (not proxy): try: proxy = dict(https=os.environ['https_proxy']) except: print('https proxy not used.') __res = requests.get(url, verify=False, stream=True, proxies=None) __res.raw.decode_content = True with open(dst, 'wb') as __f: (__l, __t) = (0, __res.headers.get('content-length')) for __chunk in __res.iter_content(chunk_size=4096): if __chunk: __f.write(__chunk) __l += len(__chunk) if verbose: sys.stdout.write('Progress: {}/{}\r'.format(__l, __t)) print()<|docstring|>Download web content. Parameters ---------- url: str Content url. dst: str Destination for file saving. proxy: dict Dictionary with 'https' as key and a string indicating the https proxy as value. Defaults to None, indicating that the env variable https_proxy will be searched. In case of not found, the proxy will be set to empty. verbose: bool Controls whether logging should be done on console.<|endoftext|>
11286e6b7b205b27dccb87e3e2d2031580bb0e893071a29a28e7b1ce4b757a67
def download_yoolov3tiny_weights(dst, proxy=None, verbose=True): "\n\n Download YOLOv3-Tiny weight file from official darknet\n website.\n\n Parameters\n ----------\n dst: str\n Destination for file saving.\n\n proxy: dict\n Dictionary with 'https' as key and a string indicating\n the https proxy as value. Defaults to None, indicating\n that the env variable https_proxy will be searched. In\n case of not found, the proxy will be set to empty.\n\n verbose: bool\n Controls whether logging should be done on console.\n\n " __url = 'https://pjreddie.com/media/files/yolov3-tiny.weights' download_content(__url, dst, proxy, verbose)
Download YOLOv3-Tiny weight file from official darknet website. Parameters ---------- dst: str Destination for file saving. proxy: dict Dictionary with 'https' as key and a string indicating the https proxy as value. Defaults to None, indicating that the env variable https_proxy will be searched. In case of not found, the proxy will be set to empty. verbose: bool Controls whether logging should be done on console.
lib/utils/utils.py
download_yoolov3tiny_weights
jonathanzjl/cam-vision
0
python
def download_yoolov3tiny_weights(dst, proxy=None, verbose=True): "\n\n Download YOLOv3-Tiny weight file from official darknet\n website.\n\n Parameters\n ----------\n dst: str\n Destination for file saving.\n\n proxy: dict\n Dictionary with 'https' as key and a string indicating\n the https proxy as value. Defaults to None, indicating\n that the env variable https_proxy will be searched. In\n case of not found, the proxy will be set to empty.\n\n verbose: bool\n Controls whether logging should be done on console.\n\n " __url = 'https://pjreddie.com/media/files/yolov3-tiny.weights' download_content(__url, dst, proxy, verbose)
def download_yoolov3tiny_weights(dst, proxy=None, verbose=True): "\n\n Download YOLOv3-Tiny weight file from official darknet\n website.\n\n Parameters\n ----------\n dst: str\n Destination for file saving.\n\n proxy: dict\n Dictionary with 'https' as key and a string indicating\n the https proxy as value. Defaults to None, indicating\n that the env variable https_proxy will be searched. In\n case of not found, the proxy will be set to empty.\n\n verbose: bool\n Controls whether logging should be done on console.\n\n " __url = 'https://pjreddie.com/media/files/yolov3-tiny.weights' download_content(__url, dst, proxy, verbose)<|docstring|>Download YOLOv3-Tiny weight file from official darknet website. Parameters ---------- dst: str Destination for file saving. proxy: dict Dictionary with 'https' as key and a string indicating the https proxy as value. Defaults to None, indicating that the env variable https_proxy will be searched. In case of not found, the proxy will be set to empty. verbose: bool Controls whether logging should be done on console.<|endoftext|>
b23020a4a5323bac9d34b65f69c299dd28e196bee8ffce1b50cf72f298e474fb
def print_mat(mat, width=10, prec=4): '\n A nice printer for floating point\n matrices.\n\n Parameters\n ----------\n mat: 2D matrix\n An input 2D matrix to print.\n\n width: int\n Minimum width for each element to print.\n\n prec: int\n Floating point precision for each element\n to print.\n\n ' for __indx in range(mat.shape[0]): __str = ('{:{width}.{prec}f} ' * mat.shape[1]) print(__str.format(*mat[(__indx, :)], width=width, prec=prec))
A nice printer for floating point matrices. Parameters ---------- mat: 2D matrix An input 2D matrix to print. width: int Minimum width for each element to print. prec: int Floating point precision for each element to print.
lib/utils/utils.py
print_mat
jonathanzjl/cam-vision
0
python
def print_mat(mat, width=10, prec=4): '\n A nice printer for floating point\n matrices.\n\n Parameters\n ----------\n mat: 2D matrix\n An input 2D matrix to print.\n\n width: int\n Minimum width for each element to print.\n\n prec: int\n Floating point precision for each element\n to print.\n\n ' for __indx in range(mat.shape[0]): __str = ('{:{width}.{prec}f} ' * mat.shape[1]) print(__str.format(*mat[(__indx, :)], width=width, prec=prec))
def print_mat(mat, width=10, prec=4): '\n A nice printer for floating point\n matrices.\n\n Parameters\n ----------\n mat: 2D matrix\n An input 2D matrix to print.\n\n width: int\n Minimum width for each element to print.\n\n prec: int\n Floating point precision for each element\n to print.\n\n ' for __indx in range(mat.shape[0]): __str = ('{:{width}.{prec}f} ' * mat.shape[1]) print(__str.format(*mat[(__indx, :)], width=width, prec=prec))<|docstring|>A nice printer for floating point matrices. Parameters ---------- mat: 2D matrix An input 2D matrix to print. width: int Minimum width for each element to print. prec: int Floating point precision for each element to print.<|endoftext|>
f146be34658f0bb4f992c8fa2b82cb087fe395fbeeed361e7f95969654315f44
def read_txt_as_strs(txt_path, strip=' ', cmnt=None): '\n\n Read a txt file. Each line will be treated\n as a string.\n\n Empty lines will be skipped. Spaces will be\n automatically stripped.\n\n Parameters\n ----------\n txt_path: str\n Path to the txt file.\n\n strip: bool\n Character(s) stripped from the beginning and\n the end of each line. Defaults to whitespace.\n Use `None` to indicate no-op.\n\n cmnt: str\n Comment character. If a line (after stripping,\n if strip character(s) is not `None`) starts with\n `cmnt` will not be read.\n\n Returns\n ----------\n list\n A list of strings, where each string represents\n a line in the txt file.\n\n ' __lines = [] with open(txt_path, 'r') as __f: for __l in __f.readlines(): if (strip is not None): __l = __l.strip().strip(strip) if (not __l): continue if (cmnt is not None): if __l.startswith(cmnt): continue __lines.append(__l.strip('\n')) return __lines
Read a txt file. Each line will be treated as a string. Empty lines will be skipped. Spaces will be automatically stripped. Parameters ---------- txt_path: str Path to the txt file. strip: bool Character(s) stripped from the beginning and the end of each line. Defaults to whitespace. Use `None` to indicate no-op. cmnt: str Comment character. If a line (after stripping, if strip character(s) is not `None`) starts with `cmnt` will not be read. Returns ---------- list A list of strings, where each string represents a line in the txt file.
lib/utils/utils.py
read_txt_as_strs
jonathanzjl/cam-vision
0
python
def read_txt_as_strs(txt_path, strip=' ', cmnt=None): '\n\n Read a txt file. Each line will be treated\n as a string.\n\n Empty lines will be skipped. Spaces will be\n automatically stripped.\n\n Parameters\n ----------\n txt_path: str\n Path to the txt file.\n\n strip: bool\n Character(s) stripped from the beginning and\n the end of each line. Defaults to whitespace.\n Use `None` to indicate no-op.\n\n cmnt: str\n Comment character. If a line (after stripping,\n if strip character(s) is not `None`) starts with\n `cmnt` will not be read.\n\n Returns\n ----------\n list\n A list of strings, where each string represents\n a line in the txt file.\n\n ' __lines = [] with open(txt_path, 'r') as __f: for __l in __f.readlines(): if (strip is not None): __l = __l.strip().strip(strip) if (not __l): continue if (cmnt is not None): if __l.startswith(cmnt): continue __lines.append(__l.strip('\n')) return __lines
def read_txt_as_strs(txt_path, strip=' ', cmnt=None): '\n\n Read a txt file. Each line will be treated\n as a string.\n\n Empty lines will be skipped. Spaces will be\n automatically stripped.\n\n Parameters\n ----------\n txt_path: str\n Path to the txt file.\n\n strip: bool\n Character(s) stripped from the beginning and\n the end of each line. Defaults to whitespace.\n Use `None` to indicate no-op.\n\n cmnt: str\n Comment character. If a line (after stripping,\n if strip character(s) is not `None`) starts with\n `cmnt` will not be read.\n\n Returns\n ----------\n list\n A list of strings, where each string represents\n a line in the txt file.\n\n ' __lines = [] with open(txt_path, 'r') as __f: for __l in __f.readlines(): if (strip is not None): __l = __l.strip().strip(strip) if (not __l): continue if (cmnt is not None): if __l.startswith(cmnt): continue __lines.append(__l.strip('\n')) return __lines<|docstring|>Read a txt file. Each line will be treated as a string. Empty lines will be skipped. Spaces will be automatically stripped. Parameters ---------- txt_path: str Path to the txt file. strip: bool Character(s) stripped from the beginning and the end of each line. Defaults to whitespace. Use `None` to indicate no-op. cmnt: str Comment character. If a line (after stripping, if strip character(s) is not `None`) starts with `cmnt` will not be read. Returns ---------- list A list of strings, where each string represents a line in the txt file.<|endoftext|>
dd49f3fe44b580c35b0d9d171b7347d5b675d4644a388ae414f1136addfc4a04
def load_img(img_path, target_size, normalize=True): '\n\n Load image for TF prediction mode.\n\n Parameters\n ----------\n img_path: str\n Path to image file.\n\n target_size: int\n Target square size for image resizing.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n Returns\n ----------\n np.ndarray\n Tensor with rank 4, to be used for\n TF model prediction.\n\n ' img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) if (target_size is not None): img = cv2.resize(img, dsize=(target_size, target_size)) if normalize: img = (img.astype(np.float32) / 255.0) return np.expand_dims(img.astype(np.float32), axis=0)
Load image for TF prediction mode. Parameters ---------- img_path: str Path to image file. target_size: int Target square size for image resizing. normalize: bool Whether input image should be divided by 255. Returns ---------- np.ndarray Tensor with rank 4, to be used for TF model prediction.
lib/utils/utils.py
load_img
jonathanzjl/cam-vision
0
python
def load_img(img_path, target_size, normalize=True): '\n\n Load image for TF prediction mode.\n\n Parameters\n ----------\n img_path: str\n Path to image file.\n\n target_size: int\n Target square size for image resizing.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n Returns\n ----------\n np.ndarray\n Tensor with rank 4, to be used for\n TF model prediction.\n\n ' img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) if (target_size is not None): img = cv2.resize(img, dsize=(target_size, target_size)) if normalize: img = (img.astype(np.float32) / 255.0) return np.expand_dims(img.astype(np.float32), axis=0)
def load_img(img_path, target_size, normalize=True): '\n\n Load image for TF prediction mode.\n\n Parameters\n ----------\n img_path: str\n Path to image file.\n\n target_size: int\n Target square size for image resizing.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n Returns\n ----------\n np.ndarray\n Tensor with rank 4, to be used for\n TF model prediction.\n\n ' img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) if (target_size is not None): img = cv2.resize(img, dsize=(target_size, target_size)) if normalize: img = (img.astype(np.float32) / 255.0) return np.expand_dims(img.astype(np.float32), axis=0)<|docstring|>Load image for TF prediction mode. Parameters ---------- img_path: str Path to image file. target_size: int Target square size for image resizing. normalize: bool Whether input image should be divided by 255. Returns ---------- np.ndarray Tensor with rank 4, to be used for TF model prediction.<|endoftext|>
18933248c5606bef95d8395c9a715b9f622c51cb0cddae7e84b3bad1b4750726
def make_predict_inp(img, target_size=None, normalize=True, permute_br=True, letter_box=None, to_channel_first=False): '\n\n Transform an image for prediction mode. Pixel\n values will be rescaled to between 0 and 1.\n\n Parameters\n ----------\n img: np.ndarray\n An input image array. Assumed to be RGB image.\n\n target_size: int\n Target square size for image resizing. Defaults\n to None, i.e. no resizing.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n permute_br: bool\n Whether permutation of the Blue and Red\n channels should be performed. This is generally\n needed when the input image is read using OpenCV,\n since OpenCV uses BGR ordering, while most neural\n networks assumes input to be RGB ordering. Defaults\n to True.\n\n letter_box: tuple or None\n Side length and fill value of the square box when\n performing letter box transformation of the image.\n Default to None indicating no letter box transformation.\n\n to_channel_first: bool\n When set to true, the input image will be\n converted to `channel_first` ordering. Defaults to\n False.\n\n Returns\n ----------\n np.ndarray\n Tensor with rank 4, to be used for\n model prediction.\n\n ' if target_size: img = cv2.resize(img, dsize=(target_size, target_size)) if normalize: img = (img.astype(np.float32) / 255.0) if permute_br: (img[(:, :, 0)], img[(:, :, 2)]) = (img[(:, :, 2)], img[(:, :, 0)].copy()) if letter_box: (img, shift, ratio) = letterbox_image(img, letter_box[0], fill=letter_box[1], normalize=False) img = np.expand_dims(img.astype(np.float32), axis=0) if to_channel_first: img = img.transpose(0, 3, 1, 2) if letter_box: return (img, shift, ratio) return img
Transform an image for prediction mode. Pixel values will be rescaled to between 0 and 1. Parameters ---------- img: np.ndarray An input image array. Assumed to be RGB image. target_size: int Target square size for image resizing. Defaults to None, i.e. no resizing. normalize: bool Whether input image should be divided by 255. permute_br: bool Whether permutation of the Blue and Red channels should be performed. This is generally needed when the input image is read using OpenCV, since OpenCV uses BGR ordering, while most neural networks assumes input to be RGB ordering. Defaults to True. letter_box: tuple or None Side length and fill value of the square box when performing letter box transformation of the image. Default to None indicating no letter box transformation. to_channel_first: bool When set to true, the input image will be converted to `channel_first` ordering. Defaults to False. Returns ---------- np.ndarray Tensor with rank 4, to be used for model prediction.
lib/utils/utils.py
make_predict_inp
jonathanzjl/cam-vision
0
python
def make_predict_inp(img, target_size=None, normalize=True, permute_br=True, letter_box=None, to_channel_first=False): '\n\n Transform an image for prediction mode. Pixel\n values will be rescaled to between 0 and 1.\n\n Parameters\n ----------\n img: np.ndarray\n An input image array. Assumed to be RGB image.\n\n target_size: int\n Target square size for image resizing. Defaults\n to None, i.e. no resizing.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n permute_br: bool\n Whether permutation of the Blue and Red\n channels should be performed. This is generally\n needed when the input image is read using OpenCV,\n since OpenCV uses BGR ordering, while most neural\n networks assumes input to be RGB ordering. Defaults\n to True.\n\n letter_box: tuple or None\n Side length and fill value of the square box when\n performing letter box transformation of the image.\n Default to None indicating no letter box transformation.\n\n to_channel_first: bool\n When set to true, the input image will be\n converted to `channel_first` ordering. Defaults to\n False.\n\n Returns\n ----------\n np.ndarray\n Tensor with rank 4, to be used for\n model prediction.\n\n ' if target_size: img = cv2.resize(img, dsize=(target_size, target_size)) if normalize: img = (img.astype(np.float32) / 255.0) if permute_br: (img[(:, :, 0)], img[(:, :, 2)]) = (img[(:, :, 2)], img[(:, :, 0)].copy()) if letter_box: (img, shift, ratio) = letterbox_image(img, letter_box[0], fill=letter_box[1], normalize=False) img = np.expand_dims(img.astype(np.float32), axis=0) if to_channel_first: img = img.transpose(0, 3, 1, 2) if letter_box: return (img, shift, ratio) return img
def make_predict_inp(img, target_size=None, normalize=True, permute_br=True, letter_box=None, to_channel_first=False): '\n\n Transform an image for prediction mode. Pixel\n values will be rescaled to between 0 and 1.\n\n Parameters\n ----------\n img: np.ndarray\n An input image array. Assumed to be RGB image.\n\n target_size: int\n Target square size for image resizing. Defaults\n to None, i.e. no resizing.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n permute_br: bool\n Whether permutation of the Blue and Red\n channels should be performed. This is generally\n needed when the input image is read using OpenCV,\n since OpenCV uses BGR ordering, while most neural\n networks assumes input to be RGB ordering. Defaults\n to True.\n\n letter_box: tuple or None\n Side length and fill value of the square box when\n performing letter box transformation of the image.\n Default to None indicating no letter box transformation.\n\n to_channel_first: bool\n When set to true, the input image will be\n converted to `channel_first` ordering. Defaults to\n False.\n\n Returns\n ----------\n np.ndarray\n Tensor with rank 4, to be used for\n model prediction.\n\n ' if target_size: img = cv2.resize(img, dsize=(target_size, target_size)) if normalize: img = (img.astype(np.float32) / 255.0) if permute_br: (img[(:, :, 0)], img[(:, :, 2)]) = (img[(:, :, 2)], img[(:, :, 0)].copy()) if letter_box: (img, shift, ratio) = letterbox_image(img, letter_box[0], fill=letter_box[1], normalize=False) img = np.expand_dims(img.astype(np.float32), axis=0) if to_channel_first: img = img.transpose(0, 3, 1, 2) if letter_box: return (img, shift, ratio) return img<|docstring|>Transform an image for prediction mode. Pixel values will be rescaled to between 0 and 1. Parameters ---------- img: np.ndarray An input image array. Assumed to be RGB image. target_size: int Target square size for image resizing. Defaults to None, i.e. no resizing. normalize: bool Whether input image should be divided by 255. permute_br: bool Whether permutation of the Blue and Red channels should be performed. This is generally needed when the input image is read using OpenCV, since OpenCV uses BGR ordering, while most neural networks assumes input to be RGB ordering. Defaults to True. letter_box: tuple or None Side length and fill value of the square box when performing letter box transformation of the image. Default to None indicating no letter box transformation. to_channel_first: bool When set to true, the input image will be converted to `channel_first` ordering. Defaults to False. Returns ---------- np.ndarray Tensor with rank 4, to be used for model prediction.<|endoftext|>
7929c0099a2e5f39faee7a38162674eec8bdb8ae6214da16f802f8f6991b5ca1
def predict_top(model, img, top_classes, label_dict): '\n\n Run prediction on input image and get\n prediction scores and class indices for\n `top_classes` classes.\n\n Parameters\n ----------\n model: tf.keras.models.Model\n A keras model.\n\n img: np.ndarray\n Input image in form of 4D tensor.\n\n top_classes: int\n Number of top classes for prediction.\n\n label_dict: dict\n Dictionary with keys the prediction indices\n (int) and values the corresponding class\n labels (str).\n\n Returns\n ----------\n tuple\n Tuple with 3 elements:\n - a list of predicted class labels descending\n order,\n - a list with corresponding prediction\n scores.\n\n ' scores = np.squeeze(model.predict(img)) top_indx = np.argsort(scores)[(- top_classes):] top_scrs = scores[top_indx] top_labs = [label_dict[indx] for indx in top_indx] return (list(reversed(top_labs)), list(reversed(top_scrs)))
Run prediction on input image and get prediction scores and class indices for `top_classes` classes. Parameters ---------- model: tf.keras.models.Model A keras model. img: np.ndarray Input image in form of 4D tensor. top_classes: int Number of top classes for prediction. label_dict: dict Dictionary with keys the prediction indices (int) and values the corresponding class labels (str). Returns ---------- tuple Tuple with 3 elements: - a list of predicted class labels descending order, - a list with corresponding prediction scores.
lib/utils/utils.py
predict_top
jonathanzjl/cam-vision
0
python
def predict_top(model, img, top_classes, label_dict): '\n\n Run prediction on input image and get\n prediction scores and class indices for\n `top_classes` classes.\n\n Parameters\n ----------\n model: tf.keras.models.Model\n A keras model.\n\n img: np.ndarray\n Input image in form of 4D tensor.\n\n top_classes: int\n Number of top classes for prediction.\n\n label_dict: dict\n Dictionary with keys the prediction indices\n (int) and values the corresponding class\n labels (str).\n\n Returns\n ----------\n tuple\n Tuple with 3 elements:\n - a list of predicted class labels descending\n order,\n - a list with corresponding prediction\n scores.\n\n ' scores = np.squeeze(model.predict(img)) top_indx = np.argsort(scores)[(- top_classes):] top_scrs = scores[top_indx] top_labs = [label_dict[indx] for indx in top_indx] return (list(reversed(top_labs)), list(reversed(top_scrs)))
def predict_top(model, img, top_classes, label_dict): '\n\n Run prediction on input image and get\n prediction scores and class indices for\n `top_classes` classes.\n\n Parameters\n ----------\n model: tf.keras.models.Model\n A keras model.\n\n img: np.ndarray\n Input image in form of 4D tensor.\n\n top_classes: int\n Number of top classes for prediction.\n\n label_dict: dict\n Dictionary with keys the prediction indices\n (int) and values the corresponding class\n labels (str).\n\n Returns\n ----------\n tuple\n Tuple with 3 elements:\n - a list of predicted class labels descending\n order,\n - a list with corresponding prediction\n scores.\n\n ' scores = np.squeeze(model.predict(img)) top_indx = np.argsort(scores)[(- top_classes):] top_scrs = scores[top_indx] top_labs = [label_dict[indx] for indx in top_indx] return (list(reversed(top_labs)), list(reversed(top_scrs)))<|docstring|>Run prediction on input image and get prediction scores and class indices for `top_classes` classes. Parameters ---------- model: tf.keras.models.Model A keras model. img: np.ndarray Input image in form of 4D tensor. top_classes: int Number of top classes for prediction. label_dict: dict Dictionary with keys the prediction indices (int) and values the corresponding class labels (str). Returns ---------- tuple Tuple with 3 elements: - a list of predicted class labels descending order, - a list with corresponding prediction scores.<|endoftext|>
d9af481db73556e58e514f0602304e500207388520da0fe3dd04aa54d44cf799
def get_imagenet_dict(txt_path): '\n\n Make ImageNet ground truth dict.\n The ground truth dictionay maps\n a class index to its label.\n\n The .txt file can be found at:\n https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a\n\n Parameters\n ----------\n txt_path: str\n Path to the txt file with ImageNet\n class index-to-label mappings.\n\n Returns\n ----------\n dict\n A dictionay with class indices as\n keys and their corresponding ImageNet\n labels as values.\n\n ' imagenet_dict = {} with open(txt_path, 'r') as f: for __l in f.readlines(): (key, val) = __l.split(':') __v = val.strip()[:(- 1)].replace("'", '') imagenet_dict[int(key)] = __v return imagenet_dict
Make ImageNet ground truth dict. The ground truth dictionay maps a class index to its label. The .txt file can be found at: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a Parameters ---------- txt_path: str Path to the txt file with ImageNet class index-to-label mappings. Returns ---------- dict A dictionay with class indices as keys and their corresponding ImageNet labels as values.
lib/utils/utils.py
get_imagenet_dict
jonathanzjl/cam-vision
0
python
def get_imagenet_dict(txt_path): '\n\n Make ImageNet ground truth dict.\n The ground truth dictionay maps\n a class index to its label.\n\n The .txt file can be found at:\n https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a\n\n Parameters\n ----------\n txt_path: str\n Path to the txt file with ImageNet\n class index-to-label mappings.\n\n Returns\n ----------\n dict\n A dictionay with class indices as\n keys and their corresponding ImageNet\n labels as values.\n\n ' imagenet_dict = {} with open(txt_path, 'r') as f: for __l in f.readlines(): (key, val) = __l.split(':') __v = val.strip()[:(- 1)].replace("'", ) imagenet_dict[int(key)] = __v return imagenet_dict
def get_imagenet_dict(txt_path): '\n\n Make ImageNet ground truth dict.\n The ground truth dictionay maps\n a class index to its label.\n\n The .txt file can be found at:\n https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a\n\n Parameters\n ----------\n txt_path: str\n Path to the txt file with ImageNet\n class index-to-label mappings.\n\n Returns\n ----------\n dict\n A dictionay with class indices as\n keys and their corresponding ImageNet\n labels as values.\n\n ' imagenet_dict = {} with open(txt_path, 'r') as f: for __l in f.readlines(): (key, val) = __l.split(':') __v = val.strip()[:(- 1)].replace("'", ) imagenet_dict[int(key)] = __v return imagenet_dict<|docstring|>Make ImageNet ground truth dict. The ground truth dictionay maps a class index to its label. The .txt file can be found at: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a Parameters ---------- txt_path: str Path to the txt file with ImageNet class index-to-label mappings. Returns ---------- dict A dictionay with class indices as keys and their corresponding ImageNet labels as values.<|endoftext|>
d3df50c856658bed26a06cbf0d67e06c509e0d678ee063c5b30b26907792790c
def classify_frame(model, frame, target_size, top_classes, label_dict, normalize=True, permute_br=True, to_channel_first=False, verbose=True): '\n\n Run classification on input frame.\n\n Parameters\n ----------\n model: tf.keras.models.Model\n A keras model.\n\n frame: np.ndarray\n An input image frame.\n\n target_size: int\n Target square image size for resizing.\n None indicates no resizing.\n\n top_classes: int\n Number of top classes for prediction.\n\n label_dict: dict\n Dictionary with keys the prediction indices\n (int) and values the corresponding class\n labels (str).\n\n normalize: bool\n Whether input image should be divided by 255.\n\n permute_br: bool\n Whether permutation of the Blue and Red\n channels should be performed. This is generally\n needed when the input image is read using OpenCV,\n since OpenCV uses BGR ordering, while most neural\n networks assumes input to be RGB ordering. Defaults\n to True.\n\n to_channel_first: bool\n When set to true, the input image will be\n converted to `channel_first` ordering. Defaults to\n False.\n\n verbose: bool\n Controls console print verbosity. Defaults\n to True.\n\n Returns\n ----------\n tuple\n Tuple with 3 elements:\n - a list of predicted class labels with\n prediction scores in descending order,\n - a list with corresponding prediction\n scores.\n\n ' inp = make_predict_inp(frame, target_size, normalize, permute_br, to_channel_first) (top_labs, top_scrs) = predict_top(model, inp, top_classes, label_dict) if verbose: print('\nframe size: {} x {}'.format(inp.shape[1], inp.shape[2])) print('top {} predictions: '.format(top_classes)) for __c in range(top_classes): print('{:25s}: {:.3f}'.format(top_labs[__c][:min(len(top_labs[__c]), 20)], top_scrs[__c])) return (top_labs, top_scrs)
Run classification on input frame. Parameters ---------- model: tf.keras.models.Model A keras model. frame: np.ndarray An input image frame. target_size: int Target square image size for resizing. None indicates no resizing. top_classes: int Number of top classes for prediction. label_dict: dict Dictionary with keys the prediction indices (int) and values the corresponding class labels (str). normalize: bool Whether input image should be divided by 255. permute_br: bool Whether permutation of the Blue and Red channels should be performed. This is generally needed when the input image is read using OpenCV, since OpenCV uses BGR ordering, while most neural networks assumes input to be RGB ordering. Defaults to True. to_channel_first: bool When set to true, the input image will be converted to `channel_first` ordering. Defaults to False. verbose: bool Controls console print verbosity. Defaults to True. Returns ---------- tuple Tuple with 3 elements: - a list of predicted class labels with prediction scores in descending order, - a list with corresponding prediction scores.
lib/utils/utils.py
classify_frame
jonathanzjl/cam-vision
0
python
def classify_frame(model, frame, target_size, top_classes, label_dict, normalize=True, permute_br=True, to_channel_first=False, verbose=True): '\n\n Run classification on input frame.\n\n Parameters\n ----------\n model: tf.keras.models.Model\n A keras model.\n\n frame: np.ndarray\n An input image frame.\n\n target_size: int\n Target square image size for resizing.\n None indicates no resizing.\n\n top_classes: int\n Number of top classes for prediction.\n\n label_dict: dict\n Dictionary with keys the prediction indices\n (int) and values the corresponding class\n labels (str).\n\n normalize: bool\n Whether input image should be divided by 255.\n\n permute_br: bool\n Whether permutation of the Blue and Red\n channels should be performed. This is generally\n needed when the input image is read using OpenCV,\n since OpenCV uses BGR ordering, while most neural\n networks assumes input to be RGB ordering. Defaults\n to True.\n\n to_channel_first: bool\n When set to true, the input image will be\n converted to `channel_first` ordering. Defaults to\n False.\n\n verbose: bool\n Controls console print verbosity. Defaults\n to True.\n\n Returns\n ----------\n tuple\n Tuple with 3 elements:\n - a list of predicted class labels with\n prediction scores in descending order,\n - a list with corresponding prediction\n scores.\n\n ' inp = make_predict_inp(frame, target_size, normalize, permute_br, to_channel_first) (top_labs, top_scrs) = predict_top(model, inp, top_classes, label_dict) if verbose: print('\nframe size: {} x {}'.format(inp.shape[1], inp.shape[2])) print('top {} predictions: '.format(top_classes)) for __c in range(top_classes): print('{:25s}: {:.3f}'.format(top_labs[__c][:min(len(top_labs[__c]), 20)], top_scrs[__c])) return (top_labs, top_scrs)
def classify_frame(model, frame, target_size, top_classes, label_dict, normalize=True, permute_br=True, to_channel_first=False, verbose=True): '\n\n Run classification on input frame.\n\n Parameters\n ----------\n model: tf.keras.models.Model\n A keras model.\n\n frame: np.ndarray\n An input image frame.\n\n target_size: int\n Target square image size for resizing.\n None indicates no resizing.\n\n top_classes: int\n Number of top classes for prediction.\n\n label_dict: dict\n Dictionary with keys the prediction indices\n (int) and values the corresponding class\n labels (str).\n\n normalize: bool\n Whether input image should be divided by 255.\n\n permute_br: bool\n Whether permutation of the Blue and Red\n channels should be performed. This is generally\n needed when the input image is read using OpenCV,\n since OpenCV uses BGR ordering, while most neural\n networks assumes input to be RGB ordering. Defaults\n to True.\n\n to_channel_first: bool\n When set to true, the input image will be\n converted to `channel_first` ordering. Defaults to\n False.\n\n verbose: bool\n Controls console print verbosity. Defaults\n to True.\n\n Returns\n ----------\n tuple\n Tuple with 3 elements:\n - a list of predicted class labels with\n prediction scores in descending order,\n - a list with corresponding prediction\n scores.\n\n ' inp = make_predict_inp(frame, target_size, normalize, permute_br, to_channel_first) (top_labs, top_scrs) = predict_top(model, inp, top_classes, label_dict) if verbose: print('\nframe size: {} x {}'.format(inp.shape[1], inp.shape[2])) print('top {} predictions: '.format(top_classes)) for __c in range(top_classes): print('{:25s}: {:.3f}'.format(top_labs[__c][:min(len(top_labs[__c]), 20)], top_scrs[__c])) return (top_labs, top_scrs)<|docstring|>Run classification on input frame. Parameters ---------- model: tf.keras.models.Model A keras model. frame: np.ndarray An input image frame. target_size: int Target square image size for resizing. None indicates no resizing. top_classes: int Number of top classes for prediction. label_dict: dict Dictionary with keys the prediction indices (int) and values the corresponding class labels (str). normalize: bool Whether input image should be divided by 255. permute_br: bool Whether permutation of the Blue and Red channels should be performed. This is generally needed when the input image is read using OpenCV, since OpenCV uses BGR ordering, while most neural networks assumes input to be RGB ordering. Defaults to True. to_channel_first: bool When set to true, the input image will be converted to `channel_first` ordering. Defaults to False. verbose: bool Controls console print verbosity. Defaults to True. Returns ---------- tuple Tuple with 3 elements: - a list of predicted class labels with prediction scores in descending order, - a list with corresponding prediction scores.<|endoftext|>
23ec06474e3a5bd772c46a2928c76cd9c78a6bf91f329568bac3ba1be090b7e4
def load_dkn_weights(w_path, dtype, skip_bytes=20): '\n\n Load Darknet weight file.\n\n Parameters\n ----------\n w_path: str\n Path to the weight file.\n\n dtype: str or datatype\n Data type of stored weights.\n\n skip_bytes: int\n Number of bytes to skip. Darknet weight\n file starts with 5 x int32 (20 bytes) header\n elements.\n\n Returns\n ----------\n np.array\n Weight array.\n\n ' with open(w_path, 'rb') as __wf: __wf.seek(skip_bytes, 0) __content = np.fromfile(__wf, dtype) return __content
Load Darknet weight file. Parameters ---------- w_path: str Path to the weight file. dtype: str or datatype Data type of stored weights. skip_bytes: int Number of bytes to skip. Darknet weight file starts with 5 x int32 (20 bytes) header elements. Returns ---------- np.array Weight array.
lib/utils/utils.py
load_dkn_weights
jonathanzjl/cam-vision
0
python
def load_dkn_weights(w_path, dtype, skip_bytes=20): '\n\n Load Darknet weight file.\n\n Parameters\n ----------\n w_path: str\n Path to the weight file.\n\n dtype: str or datatype\n Data type of stored weights.\n\n skip_bytes: int\n Number of bytes to skip. Darknet weight\n file starts with 5 x int32 (20 bytes) header\n elements.\n\n Returns\n ----------\n np.array\n Weight array.\n\n ' with open(w_path, 'rb') as __wf: __wf.seek(skip_bytes, 0) __content = np.fromfile(__wf, dtype) return __content
def load_dkn_weights(w_path, dtype, skip_bytes=20): '\n\n Load Darknet weight file.\n\n Parameters\n ----------\n w_path: str\n Path to the weight file.\n\n dtype: str or datatype\n Data type of stored weights.\n\n skip_bytes: int\n Number of bytes to skip. Darknet weight\n file starts with 5 x int32 (20 bytes) header\n elements.\n\n Returns\n ----------\n np.array\n Weight array.\n\n ' with open(w_path, 'rb') as __wf: __wf.seek(skip_bytes, 0) __content = np.fromfile(__wf, dtype) return __content<|docstring|>Load Darknet weight file. Parameters ---------- w_path: str Path to the weight file. dtype: str or datatype Data type of stored weights. skip_bytes: int Number of bytes to skip. Darknet weight file starts with 5 x int32 (20 bytes) header elements. Returns ---------- np.array Weight array.<|endoftext|>
64a02305751c0e0c51ff32c729fcf1b30505d35c7dd193f0df6fbf5d567ef6a1
def load_img_folder(folder, ext, permute_br=True, normalize=True, loader=None): '\n\n Load all images inside given folder.\n\n Parameters\n ----------\n folder: str\n Absolute folder to image folder.\n\n ext: str\n Image file extension. Must be recognizable by\n OpenCV.\n\n permute_br: bool\n Whether blue and red channel permutation should\n be performed.\n\n normalize: bool\n Indicating whether the image pixel value should\n be divided by 255.0.\n\n loader: function\n An image loader. Defaults to None, in which\n OpenCV `imread` is used.\n\n Returns\n ----------\n tuple(list)\n The image (np.array) list and path list.\n\n ' __imgs = [] __plst = glob.glob(os.path.join(folder, '*.{}'.format(ext))) for __p in __plst: if loader: __img = loader(__p) else: __img = cv2.imread(__p, cv2.IMREAD_UNCHANGED) if normalize: __img = (__img / 255.0) if permute_br: (__img[(:, :, 0)], __img[(:, :, 2)]) = (__img[(:, :, 2)], __img[(:, :, 0)].copy()) __imgs.append(__img) return (__imgs, __plst)
Load all images inside given folder. Parameters ---------- folder: str Absolute folder to image folder. ext: str Image file extension. Must be recognizable by OpenCV. permute_br: bool Whether blue and red channel permutation should be performed. normalize: bool Indicating whether the image pixel value should be divided by 255.0. loader: function An image loader. Defaults to None, in which OpenCV `imread` is used. Returns ---------- tuple(list) The image (np.array) list and path list.
lib/utils/utils.py
load_img_folder
jonathanzjl/cam-vision
0
python
def load_img_folder(folder, ext, permute_br=True, normalize=True, loader=None): '\n\n Load all images inside given folder.\n\n Parameters\n ----------\n folder: str\n Absolute folder to image folder.\n\n ext: str\n Image file extension. Must be recognizable by\n OpenCV.\n\n permute_br: bool\n Whether blue and red channel permutation should\n be performed.\n\n normalize: bool\n Indicating whether the image pixel value should\n be divided by 255.0.\n\n loader: function\n An image loader. Defaults to None, in which\n OpenCV `imread` is used.\n\n Returns\n ----------\n tuple(list)\n The image (np.array) list and path list.\n\n ' __imgs = [] __plst = glob.glob(os.path.join(folder, '*.{}'.format(ext))) for __p in __plst: if loader: __img = loader(__p) else: __img = cv2.imread(__p, cv2.IMREAD_UNCHANGED) if normalize: __img = (__img / 255.0) if permute_br: (__img[(:, :, 0)], __img[(:, :, 2)]) = (__img[(:, :, 2)], __img[(:, :, 0)].copy()) __imgs.append(__img) return (__imgs, __plst)
def load_img_folder(folder, ext, permute_br=True, normalize=True, loader=None): '\n\n Load all images inside given folder.\n\n Parameters\n ----------\n folder: str\n Absolute folder to image folder.\n\n ext: str\n Image file extension. Must be recognizable by\n OpenCV.\n\n permute_br: bool\n Whether blue and red channel permutation should\n be performed.\n\n normalize: bool\n Indicating whether the image pixel value should\n be divided by 255.0.\n\n loader: function\n An image loader. Defaults to None, in which\n OpenCV `imread` is used.\n\n Returns\n ----------\n tuple(list)\n The image (np.array) list and path list.\n\n ' __imgs = [] __plst = glob.glob(os.path.join(folder, '*.{}'.format(ext))) for __p in __plst: if loader: __img = loader(__p) else: __img = cv2.imread(__p, cv2.IMREAD_UNCHANGED) if normalize: __img = (__img / 255.0) if permute_br: (__img[(:, :, 0)], __img[(:, :, 2)]) = (__img[(:, :, 2)], __img[(:, :, 0)].copy()) __imgs.append(__img) return (__imgs, __plst)<|docstring|>Load all images inside given folder. Parameters ---------- folder: str Absolute folder to image folder. ext: str Image file extension. Must be recognizable by OpenCV. permute_br: bool Whether blue and red channel permutation should be performed. normalize: bool Indicating whether the image pixel value should be divided by 255.0. loader: function An image loader. Defaults to None, in which OpenCV `imread` is used. Returns ---------- tuple(list) The image (np.array) list and path list.<|endoftext|>
41f829b3f8895c13ac6b09a3a89e584075004983f9c3e52f6712f52d830d42e8
def letterbox_image(img, frame_size, fill=0.5, normalize=True): '\n\n Letter box an input image.\n\n Image will be centered into a squared frame,\n where the longer side of the image is resized\n to the frame size and the shorter side is resized\n by keepng the same aspect ratio.\n\n Parameters\n ----------\n img: np.array\n The input image. Assumed to be rank-3, channel-last.\n\n frame_size: int\n Size of the square frame.\n\n fill: float\n Value used to fill empty border. Defaults to 0.5.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n Returns\n ----------\n tuple\n 1. The letter boxed image.\n 2. Horizontal and vertical shift in number of pixels\n with respect to square frame size.\n 3. The resize ratio: box size/ original longer side.\n\n ' if normalize: img = (img.astype(np.float32) / 255.0) __lindx = np.argmax(img.shape[:2]) __ratio = (frame_size / img.shape[:2][__lindx]) __rsize = (np.array(img.shape[:2]) * __ratio).astype(np.int32) __shift = (np.array(([frame_size] * 2), dtype=np.int32) - __rsize) __shift = (__shift / 2).astype(np.int32) __rsimg = torch.nn.functional.interpolate(torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0), size=(__rsize[0], __rsize[1]), mode='bilinear', align_corners=True) __rsimg = __rsimg.squeeze().permute(1, 2, 0) __ltbox = (np.ones((frame_size, frame_size, img.shape[2])) * fill) __ltbox[(__shift[0]:(__shift[0] + __rsize[0]), __shift[1]:(__shift[1] + __rsize[1]), :)] = __rsimg return (__ltbox.astype(np.float32), np.flip(__shift, axis=0).copy(), __ratio)
Letter box an input image. Image will be centered into a squared frame, where the longer side of the image is resized to the frame size and the shorter side is resized by keepng the same aspect ratio. Parameters ---------- img: np.array The input image. Assumed to be rank-3, channel-last. frame_size: int Size of the square frame. fill: float Value used to fill empty border. Defaults to 0.5. normalize: bool Whether input image should be divided by 255. Returns ---------- tuple 1. The letter boxed image. 2. Horizontal and vertical shift in number of pixels with respect to square frame size. 3. The resize ratio: box size/ original longer side.
lib/utils/utils.py
letterbox_image
jonathanzjl/cam-vision
0
python
def letterbox_image(img, frame_size, fill=0.5, normalize=True): '\n\n Letter box an input image.\n\n Image will be centered into a squared frame,\n where the longer side of the image is resized\n to the frame size and the shorter side is resized\n by keepng the same aspect ratio.\n\n Parameters\n ----------\n img: np.array\n The input image. Assumed to be rank-3, channel-last.\n\n frame_size: int\n Size of the square frame.\n\n fill: float\n Value used to fill empty border. Defaults to 0.5.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n Returns\n ----------\n tuple\n 1. The letter boxed image.\n 2. Horizontal and vertical shift in number of pixels\n with respect to square frame size.\n 3. The resize ratio: box size/ original longer side.\n\n ' if normalize: img = (img.astype(np.float32) / 255.0) __lindx = np.argmax(img.shape[:2]) __ratio = (frame_size / img.shape[:2][__lindx]) __rsize = (np.array(img.shape[:2]) * __ratio).astype(np.int32) __shift = (np.array(([frame_size] * 2), dtype=np.int32) - __rsize) __shift = (__shift / 2).astype(np.int32) __rsimg = torch.nn.functional.interpolate(torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0), size=(__rsize[0], __rsize[1]), mode='bilinear', align_corners=True) __rsimg = __rsimg.squeeze().permute(1, 2, 0) __ltbox = (np.ones((frame_size, frame_size, img.shape[2])) * fill) __ltbox[(__shift[0]:(__shift[0] + __rsize[0]), __shift[1]:(__shift[1] + __rsize[1]), :)] = __rsimg return (__ltbox.astype(np.float32), np.flip(__shift, axis=0).copy(), __ratio)
def letterbox_image(img, frame_size, fill=0.5, normalize=True): '\n\n Letter box an input image.\n\n Image will be centered into a squared frame,\n where the longer side of the image is resized\n to the frame size and the shorter side is resized\n by keepng the same aspect ratio.\n\n Parameters\n ----------\n img: np.array\n The input image. Assumed to be rank-3, channel-last.\n\n frame_size: int\n Size of the square frame.\n\n fill: float\n Value used to fill empty border. Defaults to 0.5.\n\n normalize: bool\n Whether input image should be divided by 255.\n\n Returns\n ----------\n tuple\n 1. The letter boxed image.\n 2. Horizontal and vertical shift in number of pixels\n with respect to square frame size.\n 3. The resize ratio: box size/ original longer side.\n\n ' if normalize: img = (img.astype(np.float32) / 255.0) __lindx = np.argmax(img.shape[:2]) __ratio = (frame_size / img.shape[:2][__lindx]) __rsize = (np.array(img.shape[:2]) * __ratio).astype(np.int32) __shift = (np.array(([frame_size] * 2), dtype=np.int32) - __rsize) __shift = (__shift / 2).astype(np.int32) __rsimg = torch.nn.functional.interpolate(torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0), size=(__rsize[0], __rsize[1]), mode='bilinear', align_corners=True) __rsimg = __rsimg.squeeze().permute(1, 2, 0) __ltbox = (np.ones((frame_size, frame_size, img.shape[2])) * fill) __ltbox[(__shift[0]:(__shift[0] + __rsize[0]), __shift[1]:(__shift[1] + __rsize[1]), :)] = __rsimg return (__ltbox.astype(np.float32), np.flip(__shift, axis=0).copy(), __ratio)<|docstring|>Letter box an input image. Image will be centered into a squared frame, where the longer side of the image is resized to the frame size and the shorter side is resized by keepng the same aspect ratio. Parameters ---------- img: np.array The input image. Assumed to be rank-3, channel-last. frame_size: int Size of the square frame. fill: float Value used to fill empty border. Defaults to 0.5. normalize: bool Whether input image should be divided by 255. Returns ---------- tuple 1. The letter boxed image. 2. Horizontal and vertical shift in number of pixels with respect to square frame size. 3. The resize ratio: box size/ original longer side.<|endoftext|>
f21f16227c86cfe4135f94a19a1444caa454cadcac7b0f9cc19ee8477098175a
def correct_bboxes(dets, shift, ratio): '\n\n Correct bounding box centers and scales\n to match original input image before\n letter boxing.\n\n Parameters\n ----------\n dets: torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n shift: np.array\n Horizontal and vertical shift in number of pixels\n with respect to square frame size.\n\n ratio\n The resize ratio: box size/ original longer side.\n\n ' dets[(:2, :)] -= torch.from_numpy(shift).float().view(2, 1) dets[(:4, :)] /= ratio
Correct bounding box centers and scales to match original input image before letter boxing. Parameters ---------- dets: torch.tensor A rank-2 tensor, where each col is a size-6 vector representing a detection bounding box. The meaning of each element in the vector is as follows: 1. bbox begin point x coordinate. 2. bbox begin point y coordinate. 3. bbox width. 4. bbox height. 5. max proba = max class proba * objectness score. 6. class index of the corresponding max proba. shift: np.array Horizontal and vertical shift in number of pixels with respect to square frame size. ratio The resize ratio: box size/ original longer side.
lib/utils/utils.py
correct_bboxes
jonathanzjl/cam-vision
0
python
def correct_bboxes(dets, shift, ratio): '\n\n Correct bounding box centers and scales\n to match original input image before\n letter boxing.\n\n Parameters\n ----------\n dets: torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n shift: np.array\n Horizontal and vertical shift in number of pixels\n with respect to square frame size.\n\n ratio\n The resize ratio: box size/ original longer side.\n\n ' dets[(:2, :)] -= torch.from_numpy(shift).float().view(2, 1) dets[(:4, :)] /= ratio
def correct_bboxes(dets, shift, ratio): '\n\n Correct bounding box centers and scales\n to match original input image before\n letter boxing.\n\n Parameters\n ----------\n dets: torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n shift: np.array\n Horizontal and vertical shift in number of pixels\n with respect to square frame size.\n\n ratio\n The resize ratio: box size/ original longer side.\n\n ' dets[(:2, :)] -= torch.from_numpy(shift).float().view(2, 1) dets[(:4, :)] /= ratio<|docstring|>Correct bounding box centers and scales to match original input image before letter boxing. Parameters ---------- dets: torch.tensor A rank-2 tensor, where each col is a size-6 vector representing a detection bounding box. The meaning of each element in the vector is as follows: 1. bbox begin point x coordinate. 2. bbox begin point y coordinate. 3. bbox width. 4. bbox height. 5. max proba = max class proba * objectness score. 6. class index of the corresponding max proba. shift: np.array Horizontal and vertical shift in number of pixels with respect to square frame size. ratio The resize ratio: box size/ original longer side.<|endoftext|>
a1edcf48f0951e192bbb833f8169f0b9d12c562e5c90b0992a09b5f42e5b188e
def nms(dets, nms_thresh): '\n\n Do non-maximum suppression.\n\n Parameters\n ----------\n dets: torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n nms_thresh: float\n NMS threshold.\n\n Returns\n ----------\n torch.tensor\n New bounding box attributed with boxes having\n high IOU with the top prediction sppressed.\n\n ' __book = {} (__, __ord) = torch.sort(dets[(4, :)], descending=True) (__dets, __cls) = (dets[(:, __ord)], set(dets[((- 1), :)])) for (__i, __c) in enumerate(__dets[((- 1), :)]): if (int(__c) not in __book): __book[int(__c)] = __i else: __iou = compute_iou(__dets[(:4, __i)], __dets[(:4, __book[int(__c)])]) if (__iou > nms_thresh): __dets[(4, __i)] = (- 1) return __dets[(:, (__dets[(4, :)] >= 0))]
Do non-maximum suppression. Parameters ---------- dets: torch.tensor A rank-2 tensor, where each col is a size-6 vector representing a detection bounding box. The meaning of each element in the vector is as follows: 1. bbox begin point x coordinate. 2. bbox begin point y coordinate. 3. bbox width. 4. bbox height. 5. max proba = max class proba * objectness score. 6. class index of the corresponding max proba. nms_thresh: float NMS threshold. Returns ---------- torch.tensor New bounding box attributed with boxes having high IOU with the top prediction sppressed.
lib/utils/utils.py
nms
jonathanzjl/cam-vision
0
python
def nms(dets, nms_thresh): '\n\n Do non-maximum suppression.\n\n Parameters\n ----------\n dets: torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n nms_thresh: float\n NMS threshold.\n\n Returns\n ----------\n torch.tensor\n New bounding box attributed with boxes having\n high IOU with the top prediction sppressed.\n\n ' __book = {} (__, __ord) = torch.sort(dets[(4, :)], descending=True) (__dets, __cls) = (dets[(:, __ord)], set(dets[((- 1), :)])) for (__i, __c) in enumerate(__dets[((- 1), :)]): if (int(__c) not in __book): __book[int(__c)] = __i else: __iou = compute_iou(__dets[(:4, __i)], __dets[(:4, __book[int(__c)])]) if (__iou > nms_thresh): __dets[(4, __i)] = (- 1) return __dets[(:, (__dets[(4, :)] >= 0))]
def nms(dets, nms_thresh): '\n\n Do non-maximum suppression.\n\n Parameters\n ----------\n dets: torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n nms_thresh: float\n NMS threshold.\n\n Returns\n ----------\n torch.tensor\n New bounding box attributed with boxes having\n high IOU with the top prediction sppressed.\n\n ' __book = {} (__, __ord) = torch.sort(dets[(4, :)], descending=True) (__dets, __cls) = (dets[(:, __ord)], set(dets[((- 1), :)])) for (__i, __c) in enumerate(__dets[((- 1), :)]): if (int(__c) not in __book): __book[int(__c)] = __i else: __iou = compute_iou(__dets[(:4, __i)], __dets[(:4, __book[int(__c)])]) if (__iou > nms_thresh): __dets[(4, __i)] = (- 1) return __dets[(:, (__dets[(4, :)] >= 0))]<|docstring|>Do non-maximum suppression. Parameters ---------- dets: torch.tensor A rank-2 tensor, where each col is a size-6 vector representing a detection bounding box. The meaning of each element in the vector is as follows: 1. bbox begin point x coordinate. 2. bbox begin point y coordinate. 3. bbox width. 4. bbox height. 5. max proba = max class proba * objectness score. 6. class index of the corresponding max proba. nms_thresh: float NMS threshold. Returns ---------- torch.tensor New bounding box attributed with boxes having high IOU with the top prediction sppressed.<|endoftext|>
0209748ba636f48d0ddb946a50617106be0d99db3e375be6b91dd352b0ee0296
def compute_iou(lhs, rhs): '\n\n Compute the intersection over union of two\n bounding boxes.\n\n Parameters\n ----------\n lhs: torch.tensor\n Bounding box 1.\n\n rhs: torch.tensor\n Bounding box 2.\n\n Returns\n ----------\n float\n The intersection over union.\n\n ' __beg = np.array([max(lhs[0], rhs[0]), max(lhs[1], rhs[1])]) __end = np.array([min((lhs[0] + lhs[2]), (rhs[0] + rhs[2])), min((lhs[1] + lhs[3]), (rhs[1] + rhs[3]))]) __num = np.prod((__end - __beg)) if (__num <= 0): return 0 __den = (((lhs[2] * lhs[3]) + (rhs[2] * rhs[3])) - __num) return (__num / __den)
Compute the intersection over union of two bounding boxes. Parameters ---------- lhs: torch.tensor Bounding box 1. rhs: torch.tensor Bounding box 2. Returns ---------- float The intersection over union.
lib/utils/utils.py
compute_iou
jonathanzjl/cam-vision
0
python
def compute_iou(lhs, rhs): '\n\n Compute the intersection over union of two\n bounding boxes.\n\n Parameters\n ----------\n lhs: torch.tensor\n Bounding box 1.\n\n rhs: torch.tensor\n Bounding box 2.\n\n Returns\n ----------\n float\n The intersection over union.\n\n ' __beg = np.array([max(lhs[0], rhs[0]), max(lhs[1], rhs[1])]) __end = np.array([min((lhs[0] + lhs[2]), (rhs[0] + rhs[2])), min((lhs[1] + lhs[3]), (rhs[1] + rhs[3]))]) __num = np.prod((__end - __beg)) if (__num <= 0): return 0 __den = (((lhs[2] * lhs[3]) + (rhs[2] * rhs[3])) - __num) return (__num / __den)
def compute_iou(lhs, rhs): '\n\n Compute the intersection over union of two\n bounding boxes.\n\n Parameters\n ----------\n lhs: torch.tensor\n Bounding box 1.\n\n rhs: torch.tensor\n Bounding box 2.\n\n Returns\n ----------\n float\n The intersection over union.\n\n ' __beg = np.array([max(lhs[0], rhs[0]), max(lhs[1], rhs[1])]) __end = np.array([min((lhs[0] + lhs[2]), (rhs[0] + rhs[2])), min((lhs[1] + lhs[3]), (rhs[1] + rhs[3]))]) __num = np.prod((__end - __beg)) if (__num <= 0): return 0 __den = (((lhs[2] * lhs[3]) + (rhs[2] * rhs[3])) - __num) return (__num / __den)<|docstring|>Compute the intersection over union of two bounding boxes. Parameters ---------- lhs: torch.tensor Bounding box 1. rhs: torch.tensor Bounding box 2. Returns ---------- float The intersection over union.<|endoftext|>
b1f48b117ee01bbe0a48569ab83d99e7c203e000292cd8edb35299612acf7caa
def detect_frame(model, frame, obj_thresh=0.5, nms_thresh=None, box_correction=None): '\n\n Detect objects in a frame.\n\n Parameters\n ----------\n model: YOLO\n The YOLO detector model.\n\n frame: torch.tensor\n The input frame as a torch rank-4 tensor.\n\n obj_thresh: float\n Threshold on objectiveness and class\n probabilities.\n\n nms_thresh: float\n Threshold on IOU used during nms.\n\n box_correction: tuple or None\n A tuple of (shift, ratio) parameters used to\n perform letter box correction to bring the\n centers and scale of the bounding boxes to\n match the original image before letter boxing.\n This need to be set if bounding boxes are plotted\n into the original image before letter boxing.\n Defaults to None, indicating no correction.\n\n Returns\n ----------\n torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n ' __detections = model(frame) __boxes = [] for __d in __detections: __p = __d.permute(0, 2, 1, 3).contiguous().view(__d.shape[2], (- 1)) (__mprb, __midx) = torch.max(__p[(5:, :)], dim=0) __p[(4, :)] *= __mprb __b = torch.cat([__p[(:5, :)], __midx.type(torch.FloatTensor).unsqueeze(0)], 0) __b = __b[(:, (__b[(4, :)] > obj_thresh))] if __b.numel(): __b[(:2, :)] -= (__b[(2:4, :)] / 2.0) __boxes.append(__b) if (len(__boxes) == 0): return None __dets = torch.cat(__boxes, dim=1) if nms_thresh: __dets = nms(__dets, nms_thresh) if box_correction: correct_bboxes(__dets, *box_correction) return __dets
Detect objects in a frame. Parameters ---------- model: YOLO The YOLO detector model. frame: torch.tensor The input frame as a torch rank-4 tensor. obj_thresh: float Threshold on objectiveness and class probabilities. nms_thresh: float Threshold on IOU used during nms. box_correction: tuple or None A tuple of (shift, ratio) parameters used to perform letter box correction to bring the centers and scale of the bounding boxes to match the original image before letter boxing. This need to be set if bounding boxes are plotted into the original image before letter boxing. Defaults to None, indicating no correction. Returns ---------- torch.tensor A rank-2 tensor, where each col is a size-6 vector representing a detection bounding box. The meaning of each element in the vector is as follows: 1. bbox begin point x coordinate. 2. bbox begin point y coordinate. 3. bbox width. 4. bbox height. 5. max proba = max class proba * objectness score. 6. class index of the corresponding max proba.
lib/utils/utils.py
detect_frame
jonathanzjl/cam-vision
0
python
def detect_frame(model, frame, obj_thresh=0.5, nms_thresh=None, box_correction=None): '\n\n Detect objects in a frame.\n\n Parameters\n ----------\n model: YOLO\n The YOLO detector model.\n\n frame: torch.tensor\n The input frame as a torch rank-4 tensor.\n\n obj_thresh: float\n Threshold on objectiveness and class\n probabilities.\n\n nms_thresh: float\n Threshold on IOU used during nms.\n\n box_correction: tuple or None\n A tuple of (shift, ratio) parameters used to\n perform letter box correction to bring the\n centers and scale of the bounding boxes to\n match the original image before letter boxing.\n This need to be set if bounding boxes are plotted\n into the original image before letter boxing.\n Defaults to None, indicating no correction.\n\n Returns\n ----------\n torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n ' __detections = model(frame) __boxes = [] for __d in __detections: __p = __d.permute(0, 2, 1, 3).contiguous().view(__d.shape[2], (- 1)) (__mprb, __midx) = torch.max(__p[(5:, :)], dim=0) __p[(4, :)] *= __mprb __b = torch.cat([__p[(:5, :)], __midx.type(torch.FloatTensor).unsqueeze(0)], 0) __b = __b[(:, (__b[(4, :)] > obj_thresh))] if __b.numel(): __b[(:2, :)] -= (__b[(2:4, :)] / 2.0) __boxes.append(__b) if (len(__boxes) == 0): return None __dets = torch.cat(__boxes, dim=1) if nms_thresh: __dets = nms(__dets, nms_thresh) if box_correction: correct_bboxes(__dets, *box_correction) return __dets
def detect_frame(model, frame, obj_thresh=0.5, nms_thresh=None, box_correction=None): '\n\n Detect objects in a frame.\n\n Parameters\n ----------\n model: YOLO\n The YOLO detector model.\n\n frame: torch.tensor\n The input frame as a torch rank-4 tensor.\n\n obj_thresh: float\n Threshold on objectiveness and class\n probabilities.\n\n nms_thresh: float\n Threshold on IOU used during nms.\n\n box_correction: tuple or None\n A tuple of (shift, ratio) parameters used to\n perform letter box correction to bring the\n centers and scale of the bounding boxes to\n match the original image before letter boxing.\n This need to be set if bounding boxes are plotted\n into the original image before letter boxing.\n Defaults to None, indicating no correction.\n\n Returns\n ----------\n torch.tensor\n A rank-2 tensor, where each col is a size-6\n vector representing a detection bounding box.\n The meaning of each element in the vector is\n as follows:\n 1. bbox begin point x coordinate.\n 2. bbox begin point y coordinate.\n 3. bbox width.\n 4. bbox height.\n 5. max proba = max class proba * objectness score.\n 6. class index of the corresponding max proba.\n\n ' __detections = model(frame) __boxes = [] for __d in __detections: __p = __d.permute(0, 2, 1, 3).contiguous().view(__d.shape[2], (- 1)) (__mprb, __midx) = torch.max(__p[(5:, :)], dim=0) __p[(4, :)] *= __mprb __b = torch.cat([__p[(:5, :)], __midx.type(torch.FloatTensor).unsqueeze(0)], 0) __b = __b[(:, (__b[(4, :)] > obj_thresh))] if __b.numel(): __b[(:2, :)] -= (__b[(2:4, :)] / 2.0) __boxes.append(__b) if (len(__boxes) == 0): return None __dets = torch.cat(__boxes, dim=1) if nms_thresh: __dets = nms(__dets, nms_thresh) if box_correction: correct_bboxes(__dets, *box_correction) return __dets<|docstring|>Detect objects in a frame. Parameters ---------- model: YOLO The YOLO detector model. frame: torch.tensor The input frame as a torch rank-4 tensor. obj_thresh: float Threshold on objectiveness and class probabilities. nms_thresh: float Threshold on IOU used during nms. box_correction: tuple or None A tuple of (shift, ratio) parameters used to perform letter box correction to bring the centers and scale of the bounding boxes to match the original image before letter boxing. This need to be set if bounding boxes are plotted into the original image before letter boxing. Defaults to None, indicating no correction. Returns ---------- torch.tensor A rank-2 tensor, where each col is a size-6 vector representing a detection bounding box. The meaning of each element in the vector is as follows: 1. bbox begin point x coordinate. 2. bbox begin point y coordinate. 3. bbox width. 4. bbox height. 5. max proba = max class proba * objectness score. 6. class index of the corresponding max proba.<|endoftext|>
a3e59ea570463414ef9d26de334c41b1393057cbc4ae9cb2d5c363677e776e26
@nb.njit('uint64(uint8, uint8, uint8)') def separate_n_nb(packed, n, chunk_bits): '\n A relatively inefficient generalization of the "separate bits"\n step of Morton encoding. Assuming that each of the `n` coordinates\n has `chunk_bits` bits, we can "space out" each bit of each coordinate\n `n` spaces at a time.\n\n >>> for i in range(8):\n ... print(i,\n ... format(separate_n_nb(i, 3, 3), \'#012b\'),\n ... format(separate_n_nb(i, 3, 3) << 1, \'#012b\'),\n ... format(separate_n_nb(i, 3, 3) << 2, \'#012b\'))\n 0 0b0000000000 0b0000000000 0b0000000000\n 1 0b0000000001 0b0000000010 0b0000000100\n 2 0b0000001000 0b0000010000 0b0000100000\n 3 0b0000001001 0b0000010010 0b0000100100\n 4 0b0001000000 0b0010000000 0b0100000000\n 5 0b0001000001 0b0010000010 0b0100000100\n 6 0b0001001000 0b0010010000 0b0100100000\n 7 0b0001001001 0b0010010010 0b0100100100\n\n :param packed: packed tensor\n :param n: number of components that we will eventually want to Morton code\n :param chunk_bits: the number of bits that represent each coordinate\n :return: spaced-out bit representation, ready to be interleaved\n ' a = nb.uint64(packed) a = (a & nb.uint64(255)) x = 0 for i in range(chunk_bits): bit_to_set = (nb.uint64(1) << nb.uint64((i * n))) x |= ((a << nb.uint64(((n - 1) * i))) & bit_to_set) return x
A relatively inefficient generalization of the "separate bits" step of Morton encoding. Assuming that each of the `n` coordinates has `chunk_bits` bits, we can "space out" each bit of each coordinate `n` spaces at a time. >>> for i in range(8): ... print(i, ... format(separate_n_nb(i, 3, 3), '#012b'), ... format(separate_n_nb(i, 3, 3) << 1, '#012b'), ... format(separate_n_nb(i, 3, 3) << 2, '#012b')) 0 0b0000000000 0b0000000000 0b0000000000 1 0b0000000001 0b0000000010 0b0000000100 2 0b0000001000 0b0000010000 0b0000100000 3 0b0000001001 0b0000010010 0b0000100100 4 0b0001000000 0b0010000000 0b0100000000 5 0b0001000001 0b0010000010 0b0100000100 6 0b0001001000 0b0010010000 0b0100100000 7 0b0001001001 0b0010010010 0b0100100100 :param packed: packed tensor :param n: number of components that we will eventually want to Morton code :param chunk_bits: the number of bits that represent each coordinate :return: spaced-out bit representation, ready to be interleaved
morton.py
separate_n_nb
AnimatedRNG/lsc
0
python
@nb.njit('uint64(uint8, uint8, uint8)') def separate_n_nb(packed, n, chunk_bits): '\n A relatively inefficient generalization of the "separate bits"\n step of Morton encoding. Assuming that each of the `n` coordinates\n has `chunk_bits` bits, we can "space out" each bit of each coordinate\n `n` spaces at a time.\n\n >>> for i in range(8):\n ... print(i,\n ... format(separate_n_nb(i, 3, 3), \'#012b\'),\n ... format(separate_n_nb(i, 3, 3) << 1, \'#012b\'),\n ... format(separate_n_nb(i, 3, 3) << 2, \'#012b\'))\n 0 0b0000000000 0b0000000000 0b0000000000\n 1 0b0000000001 0b0000000010 0b0000000100\n 2 0b0000001000 0b0000010000 0b0000100000\n 3 0b0000001001 0b0000010010 0b0000100100\n 4 0b0001000000 0b0010000000 0b0100000000\n 5 0b0001000001 0b0010000010 0b0100000100\n 6 0b0001001000 0b0010010000 0b0100100000\n 7 0b0001001001 0b0010010010 0b0100100100\n\n :param packed: packed tensor\n :param n: number of components that we will eventually want to Morton code\n :param chunk_bits: the number of bits that represent each coordinate\n :return: spaced-out bit representation, ready to be interleaved\n ' a = nb.uint64(packed) a = (a & nb.uint64(255)) x = 0 for i in range(chunk_bits): bit_to_set = (nb.uint64(1) << nb.uint64((i * n))) x |= ((a << nb.uint64(((n - 1) * i))) & bit_to_set) return x
@nb.njit('uint64(uint8, uint8, uint8)') def separate_n_nb(packed, n, chunk_bits): '\n A relatively inefficient generalization of the "separate bits"\n step of Morton encoding. Assuming that each of the `n` coordinates\n has `chunk_bits` bits, we can "space out" each bit of each coordinate\n `n` spaces at a time.\n\n >>> for i in range(8):\n ... print(i,\n ... format(separate_n_nb(i, 3, 3), \'#012b\'),\n ... format(separate_n_nb(i, 3, 3) << 1, \'#012b\'),\n ... format(separate_n_nb(i, 3, 3) << 2, \'#012b\'))\n 0 0b0000000000 0b0000000000 0b0000000000\n 1 0b0000000001 0b0000000010 0b0000000100\n 2 0b0000001000 0b0000010000 0b0000100000\n 3 0b0000001001 0b0000010010 0b0000100100\n 4 0b0001000000 0b0010000000 0b0100000000\n 5 0b0001000001 0b0010000010 0b0100000100\n 6 0b0001001000 0b0010010000 0b0100100000\n 7 0b0001001001 0b0010010010 0b0100100100\n\n :param packed: packed tensor\n :param n: number of components that we will eventually want to Morton code\n :param chunk_bits: the number of bits that represent each coordinate\n :return: spaced-out bit representation, ready to be interleaved\n ' a = nb.uint64(packed) a = (a & nb.uint64(255)) x = 0 for i in range(chunk_bits): bit_to_set = (nb.uint64(1) << nb.uint64((i * n))) x |= ((a << nb.uint64(((n - 1) * i))) & bit_to_set) return x<|docstring|>A relatively inefficient generalization of the "separate bits" step of Morton encoding. Assuming that each of the `n` coordinates has `chunk_bits` bits, we can "space out" each bit of each coordinate `n` spaces at a time. >>> for i in range(8): ... print(i, ... format(separate_n_nb(i, 3, 3), '#012b'), ... format(separate_n_nb(i, 3, 3) << 1, '#012b'), ... format(separate_n_nb(i, 3, 3) << 2, '#012b')) 0 0b0000000000 0b0000000000 0b0000000000 1 0b0000000001 0b0000000010 0b0000000100 2 0b0000001000 0b0000010000 0b0000100000 3 0b0000001001 0b0000010010 0b0000100100 4 0b0001000000 0b0010000000 0b0100000000 5 0b0001000001 0b0010000010 0b0100000100 6 0b0001001000 0b0010010000 0b0100100000 7 0b0001001001 0b0010010010 0b0100100100 :param packed: packed tensor :param n: number of components that we will eventually want to Morton code :param chunk_bits: the number of bits that represent each coordinate :return: spaced-out bit representation, ready to be interleaved<|endoftext|>
3434840cac354ebbc3ee887e2e90976fb692dea23839f49078cd42ad38207d9a
@nb.njit('uint64(uint8[:], uint8)') def encode_single_coord(coord, chunk_bits): '\n Encodes a coordinate in ℝⁿ in ℝ¹ using Morton ordering, assuming that\n the size of each dimension is 0..2^{chunk_bits}\n\n >>> morton_offsets = set()\n >>> for i in range(16):\n ... for j in range(16):\n ... morton_offsets.add(encode_single_coord(\n ... np.array([i, j], dtype=np.uint8),\n ... 4))\n >>> morton_offsets == {i for i in range(256)}\n True\n\n Here we demonstrate that there is mapping from coordinates in a 16x16 square\n to the numbers 0..255\n\n :param coord: coordinate to encode, numba array of type uint8, size <= 8\n :param chunk_bits: coordinate dimensions\n :return: Morton-coded offset of type uint64\n ' assert (coord.shape[0] <= 8) x = nb.uint64(0) for i in range(coord.shape[0]): x += (separate_n_nb(coord[i], coord.shape[0], chunk_bits) << i) return x
Encodes a coordinate in ℝⁿ in ℝ¹ using Morton ordering, assuming that the size of each dimension is 0..2^{chunk_bits} >>> morton_offsets = set() >>> for i in range(16): ... for j in range(16): ... morton_offsets.add(encode_single_coord( ... np.array([i, j], dtype=np.uint8), ... 4)) >>> morton_offsets == {i for i in range(256)} True Here we demonstrate that there is mapping from coordinates in a 16x16 square to the numbers 0..255 :param coord: coordinate to encode, numba array of type uint8, size <= 8 :param chunk_bits: coordinate dimensions :return: Morton-coded offset of type uint64
morton.py
encode_single_coord
AnimatedRNG/lsc
0
python
@nb.njit('uint64(uint8[:], uint8)') def encode_single_coord(coord, chunk_bits): '\n Encodes a coordinate in ℝⁿ in ℝ¹ using Morton ordering, assuming that\n the size of each dimension is 0..2^{chunk_bits}\n\n >>> morton_offsets = set()\n >>> for i in range(16):\n ... for j in range(16):\n ... morton_offsets.add(encode_single_coord(\n ... np.array([i, j], dtype=np.uint8),\n ... 4))\n >>> morton_offsets == {i for i in range(256)}\n True\n\n Here we demonstrate that there is mapping from coordinates in a 16x16 square\n to the numbers 0..255\n\n :param coord: coordinate to encode, numba array of type uint8, size <= 8\n :param chunk_bits: coordinate dimensions\n :return: Morton-coded offset of type uint64\n ' assert (coord.shape[0] <= 8) x = nb.uint64(0) for i in range(coord.shape[0]): x += (separate_n_nb(coord[i], coord.shape[0], chunk_bits) << i) return x
@nb.njit('uint64(uint8[:], uint8)') def encode_single_coord(coord, chunk_bits): '\n Encodes a coordinate in ℝⁿ in ℝ¹ using Morton ordering, assuming that\n the size of each dimension is 0..2^{chunk_bits}\n\n >>> morton_offsets = set()\n >>> for i in range(16):\n ... for j in range(16):\n ... morton_offsets.add(encode_single_coord(\n ... np.array([i, j], dtype=np.uint8),\n ... 4))\n >>> morton_offsets == {i for i in range(256)}\n True\n\n Here we demonstrate that there is mapping from coordinates in a 16x16 square\n to the numbers 0..255\n\n :param coord: coordinate to encode, numba array of type uint8, size <= 8\n :param chunk_bits: coordinate dimensions\n :return: Morton-coded offset of type uint64\n ' assert (coord.shape[0] <= 8) x = nb.uint64(0) for i in range(coord.shape[0]): x += (separate_n_nb(coord[i], coord.shape[0], chunk_bits) << i) return x<|docstring|>Encodes a coordinate in ℝⁿ in ℝ¹ using Morton ordering, assuming that the size of each dimension is 0..2^{chunk_bits} >>> morton_offsets = set() >>> for i in range(16): ... for j in range(16): ... morton_offsets.add(encode_single_coord( ... np.array([i, j], dtype=np.uint8), ... 4)) >>> morton_offsets == {i for i in range(256)} True Here we demonstrate that there is mapping from coordinates in a 16x16 square to the numbers 0..255 :param coord: coordinate to encode, numba array of type uint8, size <= 8 :param chunk_bits: coordinate dimensions :return: Morton-coded offset of type uint64<|endoftext|>
67cdc69d9e657bf849bbbeee41f035524ee55804420ba176448cb7ea326cddf9
@nb.njit('uint8[:](uint64, uint8, uint8)') def decode_single_coord(offset, n, chunk_bits): '\n The reverse of the Morton encode function above\n\n >>> verify_decode = set()\n >>> for i in range(16):\n ... for j in range(16):\n ... coord = np.array([i, j], dtype=np.uint8)\n ... encoded = encode_single_coord(coord, 4)\n ... decoded = decode_single_coord(encoded, 2, 4)\n ... verify_decode.add(np.array_equal(coord, decoded))\n >>> all(v for v in verify_decode)\n True\n\n :param offset: morton encoded offset\n :param n: dimensionality of coordinates\n :param chunk_bits: size of the coordinate dimensions\n ' coord = np.zeros(n, dtype=np.uint8) for i in range(n): coord[i] = pack_n_nb((offset >> i), n, chunk_bits) return coord
The reverse of the Morton encode function above >>> verify_decode = set() >>> for i in range(16): ... for j in range(16): ... coord = np.array([i, j], dtype=np.uint8) ... encoded = encode_single_coord(coord, 4) ... decoded = decode_single_coord(encoded, 2, 4) ... verify_decode.add(np.array_equal(coord, decoded)) >>> all(v for v in verify_decode) True :param offset: morton encoded offset :param n: dimensionality of coordinates :param chunk_bits: size of the coordinate dimensions
morton.py
decode_single_coord
AnimatedRNG/lsc
0
python
@nb.njit('uint8[:](uint64, uint8, uint8)') def decode_single_coord(offset, n, chunk_bits): '\n The reverse of the Morton encode function above\n\n >>> verify_decode = set()\n >>> for i in range(16):\n ... for j in range(16):\n ... coord = np.array([i, j], dtype=np.uint8)\n ... encoded = encode_single_coord(coord, 4)\n ... decoded = decode_single_coord(encoded, 2, 4)\n ... verify_decode.add(np.array_equal(coord, decoded))\n >>> all(v for v in verify_decode)\n True\n\n :param offset: morton encoded offset\n :param n: dimensionality of coordinates\n :param chunk_bits: size of the coordinate dimensions\n ' coord = np.zeros(n, dtype=np.uint8) for i in range(n): coord[i] = pack_n_nb((offset >> i), n, chunk_bits) return coord
@nb.njit('uint8[:](uint64, uint8, uint8)') def decode_single_coord(offset, n, chunk_bits): '\n The reverse of the Morton encode function above\n\n >>> verify_decode = set()\n >>> for i in range(16):\n ... for j in range(16):\n ... coord = np.array([i, j], dtype=np.uint8)\n ... encoded = encode_single_coord(coord, 4)\n ... decoded = decode_single_coord(encoded, 2, 4)\n ... verify_decode.add(np.array_equal(coord, decoded))\n >>> all(v for v in verify_decode)\n True\n\n :param offset: morton encoded offset\n :param n: dimensionality of coordinates\n :param chunk_bits: size of the coordinate dimensions\n ' coord = np.zeros(n, dtype=np.uint8) for i in range(n): coord[i] = pack_n_nb((offset >> i), n, chunk_bits) return coord<|docstring|>The reverse of the Morton encode function above >>> verify_decode = set() >>> for i in range(16): ... for j in range(16): ... coord = np.array([i, j], dtype=np.uint8) ... encoded = encode_single_coord(coord, 4) ... decoded = decode_single_coord(encoded, 2, 4) ... verify_decode.add(np.array_equal(coord, decoded)) >>> all(v for v in verify_decode) True :param offset: morton encoded offset :param n: dimensionality of coordinates :param chunk_bits: size of the coordinate dimensions<|endoftext|>
29960a3fc043eca1d7f7e320721d2385051c7618be9ef089c7d2e2f4d66c1cca
@nb.njit def morton_encode_nb(coords): '\n >>> x, y = np.arange(8), np.arange(8)\n >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing=\'ij\')\n >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 8, 8)\n\n For the sake of clarity, let\'s inspect these values\n\n >>> with np.printoptions(formatter={\'float\': lambda x: "{0:0.3f}".format(x)}):\n ... print(inp)\n [[[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000]\n [1.000 1.414 2.236 3.162 4.123 5.099 6.083 7.071]\n [2.000 2.236 2.828 3.606 4.472 5.385 6.325 7.280]\n [3.000 3.162 3.606 4.243 5.000 5.831 6.708 7.616]\n [4.000 4.123 4.472 5.000 5.657 6.403 7.211 8.062]\n [5.000 5.099 5.385 5.831 6.403 7.071 7.810 8.602]\n [6.000 6.083 6.325 6.708 7.211 7.810 8.485 9.220]\n [7.000 7.071 7.280 7.616 8.062 8.602 9.220 9.899]]]\n\n We rearrange them according to the Morton encoding, and then reshape\n the resulting array into the same dimensions as the chunks. You can see\n that (for the most part) groups similar frequency ranges close to each\n other (TODO: perhaps this isn\'t the best visualization...)\n\n >>> with np.printoptions(formatter={\'float\': lambda x: "{0:0.3f}".format(x)}):\n ... print(morton_encode_nb(inp)[0].reshape(8, 8))\n [[0.000 1.000 1.000 1.414 2.000 3.000 2.236 3.162]\n [2.000 2.236 3.000 3.162 2.828 3.606 3.606 4.243]\n [4.000 5.000 4.123 5.099 6.000 7.000 6.083 7.071]\n [4.472 5.385 5.000 5.831 6.325 7.280 6.708 7.616]\n [4.000 4.123 5.000 5.099 4.472 5.000 5.385 5.831]\n [6.000 6.083 7.000 7.071 6.325 6.708 7.280 7.616]\n [5.657 6.403 6.403 7.071 7.211 8.062 7.810 8.602]\n [7.211 7.810 8.062 8.602 8.485 9.220 9.220 9.899]]\n\n :param coords: coords is [BS, chunk1, chunk2, chunk3...]. All chunks\n must have the same size!\n :return: rearranged array of size [BS, product of chunk sizes]\n ' assert (len(coords.shape) <= 9) n = (len(coords.shape) - 1) bs = coords.shape[0] chunk_size = nb.uint8(coords.shape[n]) total_chunk_size = nb.int64((chunk_size ** n)) chunk_bits = log2i((chunk_size - 1)) ind = np.zeros(n, dtype=nb.uint8) output = np.zeros((bs, total_chunk_size), dtype=coords.dtype) for (index, x) in np.ndenumerate(coords): for i in range(n): ind[i] = np.uint8(index[(1 + i)]) morton_offset = encode_single_coord(ind, chunk_bits) output[(index[0], morton_offset)] = x return output
>>> x, y = np.arange(8), np.arange(8) >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 8, 8) For the sake of clarity, let's inspect these values >>> with np.printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}): ... print(inp) [[[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000] [1.000 1.414 2.236 3.162 4.123 5.099 6.083 7.071] [2.000 2.236 2.828 3.606 4.472 5.385 6.325 7.280] [3.000 3.162 3.606 4.243 5.000 5.831 6.708 7.616] [4.000 4.123 4.472 5.000 5.657 6.403 7.211 8.062] [5.000 5.099 5.385 5.831 6.403 7.071 7.810 8.602] [6.000 6.083 6.325 6.708 7.211 7.810 8.485 9.220] [7.000 7.071 7.280 7.616 8.062 8.602 9.220 9.899]]] We rearrange them according to the Morton encoding, and then reshape the resulting array into the same dimensions as the chunks. You can see that (for the most part) groups similar frequency ranges close to each other (TODO: perhaps this isn't the best visualization...) >>> with np.printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}): ... print(morton_encode_nb(inp)[0].reshape(8, 8)) [[0.000 1.000 1.000 1.414 2.000 3.000 2.236 3.162] [2.000 2.236 3.000 3.162 2.828 3.606 3.606 4.243] [4.000 5.000 4.123 5.099 6.000 7.000 6.083 7.071] [4.472 5.385 5.000 5.831 6.325 7.280 6.708 7.616] [4.000 4.123 5.000 5.099 4.472 5.000 5.385 5.831] [6.000 6.083 7.000 7.071 6.325 6.708 7.280 7.616] [5.657 6.403 6.403 7.071 7.211 8.062 7.810 8.602] [7.211 7.810 8.062 8.602 8.485 9.220 9.220 9.899]] :param coords: coords is [BS, chunk1, chunk2, chunk3...]. All chunks must have the same size! :return: rearranged array of size [BS, product of chunk sizes]
morton.py
morton_encode_nb
AnimatedRNG/lsc
0
python
@nb.njit def morton_encode_nb(coords): '\n >>> x, y = np.arange(8), np.arange(8)\n >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing=\'ij\')\n >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 8, 8)\n\n For the sake of clarity, let\'s inspect these values\n\n >>> with np.printoptions(formatter={\'float\': lambda x: "{0:0.3f}".format(x)}):\n ... print(inp)\n [[[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000]\n [1.000 1.414 2.236 3.162 4.123 5.099 6.083 7.071]\n [2.000 2.236 2.828 3.606 4.472 5.385 6.325 7.280]\n [3.000 3.162 3.606 4.243 5.000 5.831 6.708 7.616]\n [4.000 4.123 4.472 5.000 5.657 6.403 7.211 8.062]\n [5.000 5.099 5.385 5.831 6.403 7.071 7.810 8.602]\n [6.000 6.083 6.325 6.708 7.211 7.810 8.485 9.220]\n [7.000 7.071 7.280 7.616 8.062 8.602 9.220 9.899]]]\n\n We rearrange them according to the Morton encoding, and then reshape\n the resulting array into the same dimensions as the chunks. You can see\n that (for the most part) groups similar frequency ranges close to each\n other (TODO: perhaps this isn\'t the best visualization...)\n\n >>> with np.printoptions(formatter={\'float\': lambda x: "{0:0.3f}".format(x)}):\n ... print(morton_encode_nb(inp)[0].reshape(8, 8))\n [[0.000 1.000 1.000 1.414 2.000 3.000 2.236 3.162]\n [2.000 2.236 3.000 3.162 2.828 3.606 3.606 4.243]\n [4.000 5.000 4.123 5.099 6.000 7.000 6.083 7.071]\n [4.472 5.385 5.000 5.831 6.325 7.280 6.708 7.616]\n [4.000 4.123 5.000 5.099 4.472 5.000 5.385 5.831]\n [6.000 6.083 7.000 7.071 6.325 6.708 7.280 7.616]\n [5.657 6.403 6.403 7.071 7.211 8.062 7.810 8.602]\n [7.211 7.810 8.062 8.602 8.485 9.220 9.220 9.899]]\n\n :param coords: coords is [BS, chunk1, chunk2, chunk3...]. All chunks\n must have the same size!\n :return: rearranged array of size [BS, product of chunk sizes]\n ' assert (len(coords.shape) <= 9) n = (len(coords.shape) - 1) bs = coords.shape[0] chunk_size = nb.uint8(coords.shape[n]) total_chunk_size = nb.int64((chunk_size ** n)) chunk_bits = log2i((chunk_size - 1)) ind = np.zeros(n, dtype=nb.uint8) output = np.zeros((bs, total_chunk_size), dtype=coords.dtype) for (index, x) in np.ndenumerate(coords): for i in range(n): ind[i] = np.uint8(index[(1 + i)]) morton_offset = encode_single_coord(ind, chunk_bits) output[(index[0], morton_offset)] = x return output
@nb.njit def morton_encode_nb(coords): '\n >>> x, y = np.arange(8), np.arange(8)\n >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing=\'ij\')\n >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 8, 8)\n\n For the sake of clarity, let\'s inspect these values\n\n >>> with np.printoptions(formatter={\'float\': lambda x: "{0:0.3f}".format(x)}):\n ... print(inp)\n [[[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000]\n [1.000 1.414 2.236 3.162 4.123 5.099 6.083 7.071]\n [2.000 2.236 2.828 3.606 4.472 5.385 6.325 7.280]\n [3.000 3.162 3.606 4.243 5.000 5.831 6.708 7.616]\n [4.000 4.123 4.472 5.000 5.657 6.403 7.211 8.062]\n [5.000 5.099 5.385 5.831 6.403 7.071 7.810 8.602]\n [6.000 6.083 6.325 6.708 7.211 7.810 8.485 9.220]\n [7.000 7.071 7.280 7.616 8.062 8.602 9.220 9.899]]]\n\n We rearrange them according to the Morton encoding, and then reshape\n the resulting array into the same dimensions as the chunks. You can see\n that (for the most part) groups similar frequency ranges close to each\n other (TODO: perhaps this isn\'t the best visualization...)\n\n >>> with np.printoptions(formatter={\'float\': lambda x: "{0:0.3f}".format(x)}):\n ... print(morton_encode_nb(inp)[0].reshape(8, 8))\n [[0.000 1.000 1.000 1.414 2.000 3.000 2.236 3.162]\n [2.000 2.236 3.000 3.162 2.828 3.606 3.606 4.243]\n [4.000 5.000 4.123 5.099 6.000 7.000 6.083 7.071]\n [4.472 5.385 5.000 5.831 6.325 7.280 6.708 7.616]\n [4.000 4.123 5.000 5.099 4.472 5.000 5.385 5.831]\n [6.000 6.083 7.000 7.071 6.325 6.708 7.280 7.616]\n [5.657 6.403 6.403 7.071 7.211 8.062 7.810 8.602]\n [7.211 7.810 8.062 8.602 8.485 9.220 9.220 9.899]]\n\n :param coords: coords is [BS, chunk1, chunk2, chunk3...]. All chunks\n must have the same size!\n :return: rearranged array of size [BS, product of chunk sizes]\n ' assert (len(coords.shape) <= 9) n = (len(coords.shape) - 1) bs = coords.shape[0] chunk_size = nb.uint8(coords.shape[n]) total_chunk_size = nb.int64((chunk_size ** n)) chunk_bits = log2i((chunk_size - 1)) ind = np.zeros(n, dtype=nb.uint8) output = np.zeros((bs, total_chunk_size), dtype=coords.dtype) for (index, x) in np.ndenumerate(coords): for i in range(n): ind[i] = np.uint8(index[(1 + i)]) morton_offset = encode_single_coord(ind, chunk_bits) output[(index[0], morton_offset)] = x return output<|docstring|>>>> x, y = np.arange(8), np.arange(8) >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 8, 8) For the sake of clarity, let's inspect these values >>> with np.printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}): ... print(inp) [[[0.000 1.000 2.000 3.000 4.000 5.000 6.000 7.000] [1.000 1.414 2.236 3.162 4.123 5.099 6.083 7.071] [2.000 2.236 2.828 3.606 4.472 5.385 6.325 7.280] [3.000 3.162 3.606 4.243 5.000 5.831 6.708 7.616] [4.000 4.123 4.472 5.000 5.657 6.403 7.211 8.062] [5.000 5.099 5.385 5.831 6.403 7.071 7.810 8.602] [6.000 6.083 6.325 6.708 7.211 7.810 8.485 9.220] [7.000 7.071 7.280 7.616 8.062 8.602 9.220 9.899]]] We rearrange them according to the Morton encoding, and then reshape the resulting array into the same dimensions as the chunks. You can see that (for the most part) groups similar frequency ranges close to each other (TODO: perhaps this isn't the best visualization...) >>> with np.printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)}): ... print(morton_encode_nb(inp)[0].reshape(8, 8)) [[0.000 1.000 1.000 1.414 2.000 3.000 2.236 3.162] [2.000 2.236 3.000 3.162 2.828 3.606 3.606 4.243] [4.000 5.000 4.123 5.099 6.000 7.000 6.083 7.071] [4.472 5.385 5.000 5.831 6.325 7.280 6.708 7.616] [4.000 4.123 5.000 5.099 4.472 5.000 5.385 5.831] [6.000 6.083 7.000 7.071 6.325 6.708 7.280 7.616] [5.657 6.403 6.403 7.071 7.211 8.062 7.810 8.602] [7.211 7.810 8.062 8.602 8.485 9.220 9.220 9.899]] :param coords: coords is [BS, chunk1, chunk2, chunk3...]. All chunks must have the same size! :return: rearranged array of size [BS, product of chunk sizes]<|endoftext|>
d0391f801e0deb333572ae383dc20fb46156af9baae3779f00a720700f840945
@nb.njit def morton_decode_nb(offsets, output): "\n >>> x, y = np.arange(64), np.arange(64)\n >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')\n >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 64, 64)\n >>> recon = np.zeros_like(inp)\n\n >>> morton_decode_nb(morton_encode_nb(inp), recon)\n >>> (inp - recon).max() < 1e-5\n True\n\n This function is basically the inverse of `morton_encode_nb`, converting\n from 1D offsets to ND coordinates. It stores the results in `output\n\n :param offsets: [BS, product of chunk sizes] encoded offsets\n :param output: output is [BS, chunk1, chunk2, chunk3...]. All chunks must\n have the same size!\n " bs = offsets.shape[0] assert (bs == output.shape[0]) n = (len(output.shape) - 1) assert (n <= 8) chunk_size = output.shape[1] ind = np.zeros(n, dtype=nb.uint8) chunk_bits = log2i((chunk_size - 1)) for (index, _) in np.ndenumerate(output): for i in range(n): ind[i] = np.uint8(index[(1 + i)]) morton_offset = encode_single_coord(ind, chunk_bits) output[index] = offsets[(index[0], morton_offset)]
>>> x, y = np.arange(64), np.arange(64) >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 64, 64) >>> recon = np.zeros_like(inp) >>> morton_decode_nb(morton_encode_nb(inp), recon) >>> (inp - recon).max() < 1e-5 True This function is basically the inverse of `morton_encode_nb`, converting from 1D offsets to ND coordinates. It stores the results in `output :param offsets: [BS, product of chunk sizes] encoded offsets :param output: output is [BS, chunk1, chunk2, chunk3...]. All chunks must have the same size!
morton.py
morton_decode_nb
AnimatedRNG/lsc
0
python
@nb.njit def morton_decode_nb(offsets, output): "\n >>> x, y = np.arange(64), np.arange(64)\n >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')\n >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 64, 64)\n >>> recon = np.zeros_like(inp)\n\n >>> morton_decode_nb(morton_encode_nb(inp), recon)\n >>> (inp - recon).max() < 1e-5\n True\n\n This function is basically the inverse of `morton_encode_nb`, converting\n from 1D offsets to ND coordinates. It stores the results in `output\n\n :param offsets: [BS, product of chunk sizes] encoded offsets\n :param output: output is [BS, chunk1, chunk2, chunk3...]. All chunks must\n have the same size!\n " bs = offsets.shape[0] assert (bs == output.shape[0]) n = (len(output.shape) - 1) assert (n <= 8) chunk_size = output.shape[1] ind = np.zeros(n, dtype=nb.uint8) chunk_bits = log2i((chunk_size - 1)) for (index, _) in np.ndenumerate(output): for i in range(n): ind[i] = np.uint8(index[(1 + i)]) morton_offset = encode_single_coord(ind, chunk_bits) output[index] = offsets[(index[0], morton_offset)]
@nb.njit def morton_decode_nb(offsets, output): "\n >>> x, y = np.arange(64), np.arange(64)\n >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij')\n >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 64, 64)\n >>> recon = np.zeros_like(inp)\n\n >>> morton_decode_nb(morton_encode_nb(inp), recon)\n >>> (inp - recon).max() < 1e-5\n True\n\n This function is basically the inverse of `morton_encode_nb`, converting\n from 1D offsets to ND coordinates. It stores the results in `output\n\n :param offsets: [BS, product of chunk sizes] encoded offsets\n :param output: output is [BS, chunk1, chunk2, chunk3...]. All chunks must\n have the same size!\n " bs = offsets.shape[0] assert (bs == output.shape[0]) n = (len(output.shape) - 1) assert (n <= 8) chunk_size = output.shape[1] ind = np.zeros(n, dtype=nb.uint8) chunk_bits = log2i((chunk_size - 1)) for (index, _) in np.ndenumerate(output): for i in range(n): ind[i] = np.uint8(index[(1 + i)]) morton_offset = encode_single_coord(ind, chunk_bits) output[index] = offsets[(index[0], morton_offset)]<|docstring|>>>> x, y = np.arange(64), np.arange(64) >>> xv, yv = np.meshgrid(x, y, sparse=False, indexing='ij') >>> inp = np.sqrt(xv ** 2 + yv ** 2).reshape(1, 64, 64) >>> recon = np.zeros_like(inp) >>> morton_decode_nb(morton_encode_nb(inp), recon) >>> (inp - recon).max() < 1e-5 True This function is basically the inverse of `morton_encode_nb`, converting from 1D offsets to ND coordinates. It stores the results in `output :param offsets: [BS, product of chunk sizes] encoded offsets :param output: output is [BS, chunk1, chunk2, chunk3...]. All chunks must have the same size!<|endoftext|>
ce75354e5c403f7637dc56d58cfd7cf9cae0aa8f57268ea402b781a5d3308b62
def detect_windows(self, images_windows): '\n Do windowed detection over given images and windows. Windows are\n extracted then warped to the input dimensions of the net.\n\n Parameters\n ----------\n images_windows: (image filename, window list) iterable.\n context_crop: size of context border to crop in pixels.\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n ' window_inputs = [] for (image_fname, windows) in images_windows: image = caffe.io.load_image(image_fname).astype(np.float32) for window in windows: window_inputs.append(self.crop(image, window)) in_ = self.inputs[0] caffe_in = np.zeros(((len(window_inputs), window_inputs[0].shape[2]) + self.blobs[in_].data.shape[2:]), dtype=np.float32) for (ix, window_in) in enumerate(window_inputs): caffe_in[ix] = self.transformer.preprocess(in_, window_in) out = self.forward_all(**{in_: caffe_in}) predictions = out[self.outputs[0]] detections = [] ix = 0 for (image_fname, windows) in images_windows: for window in windows: detections.append({'window': window, 'prediction': predictions[ix], 'filename': image_fname}) ix += 1 return detections
Do windowed detection over given images and windows. Windows are extracted then warped to the input dimensions of the net. Parameters ---------- images_windows: (image filename, window list) iterable. context_crop: size of context border to crop in pixels. Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts.
python/caffe/detector.py
detect_windows
raytroop/caffe
36,275
python
def detect_windows(self, images_windows): '\n Do windowed detection over given images and windows. Windows are\n extracted then warped to the input dimensions of the net.\n\n Parameters\n ----------\n images_windows: (image filename, window list) iterable.\n context_crop: size of context border to crop in pixels.\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n ' window_inputs = [] for (image_fname, windows) in images_windows: image = caffe.io.load_image(image_fname).astype(np.float32) for window in windows: window_inputs.append(self.crop(image, window)) in_ = self.inputs[0] caffe_in = np.zeros(((len(window_inputs), window_inputs[0].shape[2]) + self.blobs[in_].data.shape[2:]), dtype=np.float32) for (ix, window_in) in enumerate(window_inputs): caffe_in[ix] = self.transformer.preprocess(in_, window_in) out = self.forward_all(**{in_: caffe_in}) predictions = out[self.outputs[0]] detections = [] ix = 0 for (image_fname, windows) in images_windows: for window in windows: detections.append({'window': window, 'prediction': predictions[ix], 'filename': image_fname}) ix += 1 return detections
def detect_windows(self, images_windows): '\n Do windowed detection over given images and windows. Windows are\n extracted then warped to the input dimensions of the net.\n\n Parameters\n ----------\n images_windows: (image filename, window list) iterable.\n context_crop: size of context border to crop in pixels.\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n ' window_inputs = [] for (image_fname, windows) in images_windows: image = caffe.io.load_image(image_fname).astype(np.float32) for window in windows: window_inputs.append(self.crop(image, window)) in_ = self.inputs[0] caffe_in = np.zeros(((len(window_inputs), window_inputs[0].shape[2]) + self.blobs[in_].data.shape[2:]), dtype=np.float32) for (ix, window_in) in enumerate(window_inputs): caffe_in[ix] = self.transformer.preprocess(in_, window_in) out = self.forward_all(**{in_: caffe_in}) predictions = out[self.outputs[0]] detections = [] ix = 0 for (image_fname, windows) in images_windows: for window in windows: detections.append({'window': window, 'prediction': predictions[ix], 'filename': image_fname}) ix += 1 return detections<|docstring|>Do windowed detection over given images and windows. Windows are extracted then warped to the input dimensions of the net. Parameters ---------- images_windows: (image filename, window list) iterable. context_crop: size of context border to crop in pixels. Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts.<|endoftext|>
041edc0924a6af4db8ba04e76bc5a04d463f69d850a31241f9222ef1a6d5886f
def detect_selective_search(self, image_fnames): '\n Do windowed detection over Selective Search proposals by extracting\n the crop and warping to the input dimensions of the net.\n\n Parameters\n ----------\n image_fnames: list\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n ' import selective_search_ijcv_with_python as selective_search image_fnames = [os.path.abspath(f) for f in image_fnames] windows_list = selective_search.get_windows(image_fnames, cmd='selective_search_rcnn') return self.detect_windows(zip(image_fnames, windows_list))
Do windowed detection over Selective Search proposals by extracting the crop and warping to the input dimensions of the net. Parameters ---------- image_fnames: list Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts.
python/caffe/detector.py
detect_selective_search
raytroop/caffe
36,275
python
def detect_selective_search(self, image_fnames): '\n Do windowed detection over Selective Search proposals by extracting\n the crop and warping to the input dimensions of the net.\n\n Parameters\n ----------\n image_fnames: list\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n ' import selective_search_ijcv_with_python as selective_search image_fnames = [os.path.abspath(f) for f in image_fnames] windows_list = selective_search.get_windows(image_fnames, cmd='selective_search_rcnn') return self.detect_windows(zip(image_fnames, windows_list))
def detect_selective_search(self, image_fnames): '\n Do windowed detection over Selective Search proposals by extracting\n the crop and warping to the input dimensions of the net.\n\n Parameters\n ----------\n image_fnames: list\n\n Returns\n -------\n detections: list of {filename: image filename, window: crop coordinates,\n predictions: prediction vector} dicts.\n ' import selective_search_ijcv_with_python as selective_search image_fnames = [os.path.abspath(f) for f in image_fnames] windows_list = selective_search.get_windows(image_fnames, cmd='selective_search_rcnn') return self.detect_windows(zip(image_fnames, windows_list))<|docstring|>Do windowed detection over Selective Search proposals by extracting the crop and warping to the input dimensions of the net. Parameters ---------- image_fnames: list Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts.<|endoftext|>
5ac097739c4c85253b500f00ca85b36c4f9cd4e9d58eb6368d2803c0f3376405
def crop(self, im, window): '\n Crop a window from the image for detection. Include surrounding context\n according to the `context_pad` configuration.\n\n Parameters\n ----------\n im: H x W x K image ndarray to crop.\n window: bounding box coordinates as ymin, xmin, ymax, xmax.\n\n Returns\n -------\n crop: cropped window.\n ' crop = im[(window[0]:window[2], window[1]:window[3])] if self.context_pad: box = window.copy() crop_size = self.blobs[self.inputs[0]].width scale = (crop_size / ((1.0 * crop_size) - (self.context_pad * 2))) half_h = (((box[2] - box[0]) + 1) / 2.0) half_w = (((box[3] - box[1]) + 1) / 2.0) center = ((box[0] + half_h), (box[1] + half_w)) scaled_dims = (scale * np.array(((- half_h), (- half_w), half_h, half_w))) box = np.round((np.tile(center, 2) + scaled_dims)) full_h = ((box[2] - box[0]) + 1) full_w = ((box[3] - box[1]) + 1) scale_h = (crop_size / full_h) scale_w = (crop_size / full_w) pad_y = round((max(0, (- box[0])) * scale_h)) pad_x = round((max(0, (- box[1])) * scale_w)) (im_h, im_w) = im.shape[:2] box = np.clip(box, 0.0, [im_h, im_w, im_h, im_w]) clip_h = ((box[2] - box[0]) + 1) clip_w = ((box[3] - box[1]) + 1) assert ((clip_h > 0) and (clip_w > 0)) crop_h = round((clip_h * scale_h)) crop_w = round((clip_w * scale_w)) if ((pad_y + crop_h) > crop_size): crop_h = (crop_size - pad_y) if ((pad_x + crop_w) > crop_size): crop_w = (crop_size - pad_x) context_crop = im[(box[0]:box[2], box[1]:box[3])] context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w)) crop = (np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean) crop[(pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w))] = context_crop return crop
Crop a window from the image for detection. Include surrounding context according to the `context_pad` configuration. Parameters ---------- im: H x W x K image ndarray to crop. window: bounding box coordinates as ymin, xmin, ymax, xmax. Returns ------- crop: cropped window.
python/caffe/detector.py
crop
raytroop/caffe
36,275
python
def crop(self, im, window): '\n Crop a window from the image for detection. Include surrounding context\n according to the `context_pad` configuration.\n\n Parameters\n ----------\n im: H x W x K image ndarray to crop.\n window: bounding box coordinates as ymin, xmin, ymax, xmax.\n\n Returns\n -------\n crop: cropped window.\n ' crop = im[(window[0]:window[2], window[1]:window[3])] if self.context_pad: box = window.copy() crop_size = self.blobs[self.inputs[0]].width scale = (crop_size / ((1.0 * crop_size) - (self.context_pad * 2))) half_h = (((box[2] - box[0]) + 1) / 2.0) half_w = (((box[3] - box[1]) + 1) / 2.0) center = ((box[0] + half_h), (box[1] + half_w)) scaled_dims = (scale * np.array(((- half_h), (- half_w), half_h, half_w))) box = np.round((np.tile(center, 2) + scaled_dims)) full_h = ((box[2] - box[0]) + 1) full_w = ((box[3] - box[1]) + 1) scale_h = (crop_size / full_h) scale_w = (crop_size / full_w) pad_y = round((max(0, (- box[0])) * scale_h)) pad_x = round((max(0, (- box[1])) * scale_w)) (im_h, im_w) = im.shape[:2] box = np.clip(box, 0.0, [im_h, im_w, im_h, im_w]) clip_h = ((box[2] - box[0]) + 1) clip_w = ((box[3] - box[1]) + 1) assert ((clip_h > 0) and (clip_w > 0)) crop_h = round((clip_h * scale_h)) crop_w = round((clip_w * scale_w)) if ((pad_y + crop_h) > crop_size): crop_h = (crop_size - pad_y) if ((pad_x + crop_w) > crop_size): crop_w = (crop_size - pad_x) context_crop = im[(box[0]:box[2], box[1]:box[3])] context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w)) crop = (np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean) crop[(pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w))] = context_crop return crop
def crop(self, im, window): '\n Crop a window from the image for detection. Include surrounding context\n according to the `context_pad` configuration.\n\n Parameters\n ----------\n im: H x W x K image ndarray to crop.\n window: bounding box coordinates as ymin, xmin, ymax, xmax.\n\n Returns\n -------\n crop: cropped window.\n ' crop = im[(window[0]:window[2], window[1]:window[3])] if self.context_pad: box = window.copy() crop_size = self.blobs[self.inputs[0]].width scale = (crop_size / ((1.0 * crop_size) - (self.context_pad * 2))) half_h = (((box[2] - box[0]) + 1) / 2.0) half_w = (((box[3] - box[1]) + 1) / 2.0) center = ((box[0] + half_h), (box[1] + half_w)) scaled_dims = (scale * np.array(((- half_h), (- half_w), half_h, half_w))) box = np.round((np.tile(center, 2) + scaled_dims)) full_h = ((box[2] - box[0]) + 1) full_w = ((box[3] - box[1]) + 1) scale_h = (crop_size / full_h) scale_w = (crop_size / full_w) pad_y = round((max(0, (- box[0])) * scale_h)) pad_x = round((max(0, (- box[1])) * scale_w)) (im_h, im_w) = im.shape[:2] box = np.clip(box, 0.0, [im_h, im_w, im_h, im_w]) clip_h = ((box[2] - box[0]) + 1) clip_w = ((box[3] - box[1]) + 1) assert ((clip_h > 0) and (clip_w > 0)) crop_h = round((clip_h * scale_h)) crop_w = round((clip_w * scale_w)) if ((pad_y + crop_h) > crop_size): crop_h = (crop_size - pad_y) if ((pad_x + crop_w) > crop_size): crop_w = (crop_size - pad_x) context_crop = im[(box[0]:box[2], box[1]:box[3])] context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w)) crop = (np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean) crop[(pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w))] = context_crop return crop<|docstring|>Crop a window from the image for detection. Include surrounding context according to the `context_pad` configuration. Parameters ---------- im: H x W x K image ndarray to crop. window: bounding box coordinates as ymin, xmin, ymax, xmax. Returns ------- crop: cropped window.<|endoftext|>
437017d115d6e4a198f32630da47d0a536bcd713011102ef050a297aee9ebb18
def configure_crop(self, context_pad): '\n Configure crop dimensions and amount of context for cropping.\n If context is included, make the special input mean for context padding.\n\n Parameters\n ----------\n context_pad : amount of context for cropping.\n ' in_ = self.inputs[0] tpose = self.transformer.transpose[in_] inv_tpose = [tpose[t] for t in tpose] self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose] self.context_pad = context_pad if self.context_pad: in_ = self.inputs[0] transpose = self.transformer.transpose.get(in_) channel_order = self.transformer.channel_swap.get(in_) raw_scale = self.transformer.raw_scale.get(in_) mean = self.transformer.mean.get(in_) if (mean is not None): inv_transpose = [transpose[t] for t in transpose] crop_mean = mean.copy().transpose(inv_transpose) if (channel_order is not None): channel_order_inverse = [channel_order.index(i) for i in range(crop_mean.shape[2])] crop_mean = crop_mean[(:, :, channel_order_inverse)] if (raw_scale is not None): crop_mean /= raw_scale self.crop_mean = crop_mean else: self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
Configure crop dimensions and amount of context for cropping. If context is included, make the special input mean for context padding. Parameters ---------- context_pad : amount of context for cropping.
python/caffe/detector.py
configure_crop
raytroop/caffe
36,275
python
def configure_crop(self, context_pad): '\n Configure crop dimensions and amount of context for cropping.\n If context is included, make the special input mean for context padding.\n\n Parameters\n ----------\n context_pad : amount of context for cropping.\n ' in_ = self.inputs[0] tpose = self.transformer.transpose[in_] inv_tpose = [tpose[t] for t in tpose] self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose] self.context_pad = context_pad if self.context_pad: in_ = self.inputs[0] transpose = self.transformer.transpose.get(in_) channel_order = self.transformer.channel_swap.get(in_) raw_scale = self.transformer.raw_scale.get(in_) mean = self.transformer.mean.get(in_) if (mean is not None): inv_transpose = [transpose[t] for t in transpose] crop_mean = mean.copy().transpose(inv_transpose) if (channel_order is not None): channel_order_inverse = [channel_order.index(i) for i in range(crop_mean.shape[2])] crop_mean = crop_mean[(:, :, channel_order_inverse)] if (raw_scale is not None): crop_mean /= raw_scale self.crop_mean = crop_mean else: self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
def configure_crop(self, context_pad): '\n Configure crop dimensions and amount of context for cropping.\n If context is included, make the special input mean for context padding.\n\n Parameters\n ----------\n context_pad : amount of context for cropping.\n ' in_ = self.inputs[0] tpose = self.transformer.transpose[in_] inv_tpose = [tpose[t] for t in tpose] self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose] self.context_pad = context_pad if self.context_pad: in_ = self.inputs[0] transpose = self.transformer.transpose.get(in_) channel_order = self.transformer.channel_swap.get(in_) raw_scale = self.transformer.raw_scale.get(in_) mean = self.transformer.mean.get(in_) if (mean is not None): inv_transpose = [transpose[t] for t in transpose] crop_mean = mean.copy().transpose(inv_transpose) if (channel_order is not None): channel_order_inverse = [channel_order.index(i) for i in range(crop_mean.shape[2])] crop_mean = crop_mean[(:, :, channel_order_inverse)] if (raw_scale is not None): crop_mean /= raw_scale self.crop_mean = crop_mean else: self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)<|docstring|>Configure crop dimensions and amount of context for cropping. If context is included, make the special input mean for context padding. Parameters ---------- context_pad : amount of context for cropping.<|endoftext|>
46ec2f9a685dae1253decf4df5e26d050322aa85bd16cbf66ddb99c27c7b6bae
def lon360to180(lon): '\n\tConverts longitude values in the range [0,360]\n\tto longitude values in the range [-180,+180].\n\t' lon = np.asanyarray(lon) return (((lon + 180.0) % 360.0) - 180.0)
Converts longitude values in the range [0,360] to longitude values in the range [-180,+180].
calc_deriv/201e-calc_vortbdgt_daily.py
lon360to180
apaloczy/AntarcticaVorticityBudget
1
python
def lon360to180(lon): '\n\tConverts longitude values in the range [0,360]\n\tto longitude values in the range [-180,+180].\n\t' lon = np.asanyarray(lon) return (((lon + 180.0) % 360.0) - 180.0)
def lon360to180(lon): '\n\tConverts longitude values in the range [0,360]\n\tto longitude values in the range [-180,+180].\n\t' lon = np.asanyarray(lon) return (((lon + 180.0) % 360.0) - 180.0)<|docstring|>Converts longitude values in the range [0,360] to longitude values in the range [-180,+180].<|endoftext|>
b817905d83e8a82d5d83302fac332f78b077bd5d64f357914ceb2ede15c79a0f
def test_post_now_application_nda_happy_path(self, test_client, db_session, auth_headers): 'Should return a new NoW NDA' mine = MineFactory() APPLICATION_NDA_DATA['minenumber'] = mine.mine_no post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) post_data = json.loads(post_resp.data.decode()) assert (post_resp.status_code == 201), post_resp.response assert (post_data['messageid'] == APPLICATION_NDA_DATA['messageid']) assert (post_data['application_nda_guid'] is not None) assert (post_data['mine_guid'] == str(mine.mine_guid))
Should return a new NoW NDA
services/core-api/tests/now_submissions/resources/test_application_nda_list_resource.py
test_post_now_application_nda_happy_path
bcgov/mds
25
python
def test_post_now_application_nda_happy_path(self, test_client, db_session, auth_headers): mine = MineFactory() APPLICATION_NDA_DATA['minenumber'] = mine.mine_no post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) post_data = json.loads(post_resp.data.decode()) assert (post_resp.status_code == 201), post_resp.response assert (post_data['messageid'] == APPLICATION_NDA_DATA['messageid']) assert (post_data['application_nda_guid'] is not None) assert (post_data['mine_guid'] == str(mine.mine_guid))
def test_post_now_application_nda_happy_path(self, test_client, db_session, auth_headers): mine = MineFactory() APPLICATION_NDA_DATA['minenumber'] = mine.mine_no post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) post_data = json.loads(post_resp.data.decode()) assert (post_resp.status_code == 201), post_resp.response assert (post_data['messageid'] == APPLICATION_NDA_DATA['messageid']) assert (post_data['application_nda_guid'] is not None) assert (post_data['mine_guid'] == str(mine.mine_guid))<|docstring|>Should return a new NoW NDA<|endoftext|>
965c4647735b377c138271416df52f8586dbbe80b5a2df9ac302e80ea6a2e1b4
def test_post_now_application_messageid_in_use(self, test_client, db_session, auth_headers): 'Should return a 400 messageid in use for NDA' mine = MineFactory() application = NOWApplicationNDAFactory(mine=mine) APPLICATION_NDA_DATA['minenumber'] = mine.mine_no APPLICATION_NDA_DATA['messageid'] = application.messageid post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) assert (post_resp.status_code == 400), post_resp.response
Should return a 400 messageid in use for NDA
services/core-api/tests/now_submissions/resources/test_application_nda_list_resource.py
test_post_now_application_messageid_in_use
bcgov/mds
25
python
def test_post_now_application_messageid_in_use(self, test_client, db_session, auth_headers): mine = MineFactory() application = NOWApplicationNDAFactory(mine=mine) APPLICATION_NDA_DATA['minenumber'] = mine.mine_no APPLICATION_NDA_DATA['messageid'] = application.messageid post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) assert (post_resp.status_code == 400), post_resp.response
def test_post_now_application_messageid_in_use(self, test_client, db_session, auth_headers): mine = MineFactory() application = NOWApplicationNDAFactory(mine=mine) APPLICATION_NDA_DATA['minenumber'] = mine.mine_no APPLICATION_NDA_DATA['messageid'] = application.messageid post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) assert (post_resp.status_code == 400), post_resp.response<|docstring|>Should return a 400 messageid in use for NDA<|endoftext|>
73f8caae406c9505f1728f318bb0c74924ce1e00fbe73e1350d38e9a94626086
def test_post_now_application_no_mine_found(self, test_client, db_session, auth_headers): 'Should return a 400 mine not found for NDA' APPLICATION_NDA_DATA['minenumber'] = '1234567' post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) assert (post_resp.status_code == 400), post_resp.response
Should return a 400 mine not found for NDA
services/core-api/tests/now_submissions/resources/test_application_nda_list_resource.py
test_post_now_application_no_mine_found
bcgov/mds
25
python
def test_post_now_application_no_mine_found(self, test_client, db_session, auth_headers): APPLICATION_NDA_DATA['minenumber'] = '1234567' post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) assert (post_resp.status_code == 400), post_resp.response
def test_post_now_application_no_mine_found(self, test_client, db_session, auth_headers): APPLICATION_NDA_DATA['minenumber'] = '1234567' post_resp = test_client.post('/now-submissions/applications-nda', json=APPLICATION_NDA_DATA, headers=auth_headers['nros_vfcbc_auth_header']) assert (post_resp.status_code == 400), post_resp.response<|docstring|>Should return a 400 mine not found for NDA<|endoftext|>
17d509abfd8a4c010cb47859d3c55d7e544f55ccdac5a7a1b03841fb3c5602c5
def minMutation(self, start, end, bank): '\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n ' if ((len(start) < 1) or (len(end) < 1) or (len(bank) < 1) or (not (end in bank))): return (- 1) queue = [] queue.append((start, 0)) bankSet = set(bank) while queue: (current_mutation, current_level) = queue.pop(0) if (current_mutation == end): return current_level for index in range(len(current_mutation)): for char in 'ACGT': mutation = ((current_mutation[:index] + char) + current_mutation[(index + 1):]) if (mutation in bankSet): bankSet.remove(mutation) queue.append((mutation, (current_level + 1))) return (- 1)
:type start: str :type end: str :type bank: List[str] :rtype: int
solutions/0433_MinimumGeneticMutation.py
minMutation
alexwawl/leetcode-solutions-javascript-python
11
python
def minMutation(self, start, end, bank): '\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n ' if ((len(start) < 1) or (len(end) < 1) or (len(bank) < 1) or (not (end in bank))): return (- 1) queue = [] queue.append((start, 0)) bankSet = set(bank) while queue: (current_mutation, current_level) = queue.pop(0) if (current_mutation == end): return current_level for index in range(len(current_mutation)): for char in 'ACGT': mutation = ((current_mutation[:index] + char) + current_mutation[(index + 1):]) if (mutation in bankSet): bankSet.remove(mutation) queue.append((mutation, (current_level + 1))) return (- 1)
def minMutation(self, start, end, bank): '\n :type start: str\n :type end: str\n :type bank: List[str]\n :rtype: int\n ' if ((len(start) < 1) or (len(end) < 1) or (len(bank) < 1) or (not (end in bank))): return (- 1) queue = [] queue.append((start, 0)) bankSet = set(bank) while queue: (current_mutation, current_level) = queue.pop(0) if (current_mutation == end): return current_level for index in range(len(current_mutation)): for char in 'ACGT': mutation = ((current_mutation[:index] + char) + current_mutation[(index + 1):]) if (mutation in bankSet): bankSet.remove(mutation) queue.append((mutation, (current_level + 1))) return (- 1)<|docstring|>:type start: str :type end: str :type bank: List[str] :rtype: int<|endoftext|>
66fbb5470b02a119e48d610656138fa9cc8b8874ab07b56e304947b65471f09c
def __init__(__self__, *, endpoint: str, name: str): '\n :param str endpoint: Specifies the endpoint of the action.\n :param str name: Specifies the name of the action.\n ' pulumi.set(__self__, 'endpoint', endpoint) pulumi.set(__self__, 'name', name)
:param str endpoint: Specifies the endpoint of the action. :param str name: Specifies the name of the action.
sdk/python/pulumi_azure/core/outputs.py
__init__
suresh198526/pulumi-azure
0
python
def __init__(__self__, *, endpoint: str, name: str): '\n :param str endpoint: Specifies the endpoint of the action.\n :param str name: Specifies the name of the action.\n ' pulumi.set(__self__, 'endpoint', endpoint) pulumi.set(__self__, 'name', name)
def __init__(__self__, *, endpoint: str, name: str): '\n :param str endpoint: Specifies the endpoint of the action.\n :param str name: Specifies the name of the action.\n ' pulumi.set(__self__, 'endpoint', endpoint) pulumi.set(__self__, 'name', name)<|docstring|>:param str endpoint: Specifies the endpoint of the action. :param str name: Specifies the name of the action.<|endoftext|>
5a5a08451321bd197fa93441b4f1c9585a3cb16c8d43c14ad57d79c1cc637afa
@property @pulumi.getter def endpoint(self) -> str: '\n Specifies the endpoint of the action.\n ' return pulumi.get(self, 'endpoint')
Specifies the endpoint of the action.
sdk/python/pulumi_azure/core/outputs.py
endpoint
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def endpoint(self) -> str: '\n \n ' return pulumi.get(self, 'endpoint')
@property @pulumi.getter def endpoint(self) -> str: '\n \n ' return pulumi.get(self, 'endpoint')<|docstring|>Specifies the endpoint of the action.<|endoftext|>
b82bd907534ea5da88886ddb936d3d4816d562083a26e0ea6ef048fbcab3588e
@property @pulumi.getter def name(self) -> str: '\n Specifies the name of the action.\n ' return pulumi.get(self, 'name')
Specifies the name of the action.
sdk/python/pulumi_azure/core/outputs.py
name
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')<|docstring|>Specifies the name of the action.<|endoftext|>
b466b6028bb8715d03acb93b8f0c7947eb5d997b6217517035af4c4399af3ac8
def __init__(__self__, *, endpoint: str, name: str, routing_type: Optional[str]=None): '\n :param str endpoint: Specifies the endpoint of the route definition.\n :param str name: Specifies the name of the route definition.\n :param str routing_type: The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.\n ' pulumi.set(__self__, 'endpoint', endpoint) pulumi.set(__self__, 'name', name) if (routing_type is not None): pulumi.set(__self__, 'routing_type', routing_type)
:param str endpoint: Specifies the endpoint of the route definition. :param str name: Specifies the name of the route definition. :param str routing_type: The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.
sdk/python/pulumi_azure/core/outputs.py
__init__
suresh198526/pulumi-azure
0
python
def __init__(__self__, *, endpoint: str, name: str, routing_type: Optional[str]=None): '\n :param str endpoint: Specifies the endpoint of the route definition.\n :param str name: Specifies the name of the route definition.\n :param str routing_type: The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.\n ' pulumi.set(__self__, 'endpoint', endpoint) pulumi.set(__self__, 'name', name) if (routing_type is not None): pulumi.set(__self__, 'routing_type', routing_type)
def __init__(__self__, *, endpoint: str, name: str, routing_type: Optional[str]=None): '\n :param str endpoint: Specifies the endpoint of the route definition.\n :param str name: Specifies the name of the route definition.\n :param str routing_type: The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.\n ' pulumi.set(__self__, 'endpoint', endpoint) pulumi.set(__self__, 'name', name) if (routing_type is not None): pulumi.set(__self__, 'routing_type', routing_type)<|docstring|>:param str endpoint: Specifies the endpoint of the route definition. :param str name: Specifies the name of the route definition. :param str routing_type: The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.<|endoftext|>
d1c203303027ff73f781fb0b8325df8cae3d8995a3ba4a43ab36de60cee18c8e
@property @pulumi.getter def endpoint(self) -> str: '\n Specifies the endpoint of the route definition.\n ' return pulumi.get(self, 'endpoint')
Specifies the endpoint of the route definition.
sdk/python/pulumi_azure/core/outputs.py
endpoint
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def endpoint(self) -> str: '\n \n ' return pulumi.get(self, 'endpoint')
@property @pulumi.getter def endpoint(self) -> str: '\n \n ' return pulumi.get(self, 'endpoint')<|docstring|>Specifies the endpoint of the route definition.<|endoftext|>
6ec4fb712825d1316db353cbb5b59ad2e4ff9b712a4ca0eaacac8a150c8b666f
@property @pulumi.getter def name(self) -> str: '\n Specifies the name of the route definition.\n ' return pulumi.get(self, 'name')
Specifies the name of the route definition.
sdk/python/pulumi_azure/core/outputs.py
name
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')<|docstring|>Specifies the name of the route definition.<|endoftext|>
94d7185fe5f40f1cdfe53a1c51f9c5539c0b50bf1778bab5b3e0de4e0fa78625
@property @pulumi.getter(name='routingType') def routing_type(self) -> Optional[str]: '\n The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.\n ' return pulumi.get(self, 'routing_type')
The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.
sdk/python/pulumi_azure/core/outputs.py
routing_type
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='routingType') def routing_type(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'routing_type')
@property @pulumi.getter(name='routingType') def routing_type(self) -> Optional[str]: '\n \n ' return pulumi.get(self, 'routing_type')<|docstring|>The routing type that is supported for the resource request. Valid values are `ResourceTypeRoutingProxy` or `ResourceTypeRoutingProxyCache`. This value defaults to `ResourceTypeRoutingProxy`.<|endoftext|>
18cfd879e84778ba2c986f4f093f7b49bec1e296747a1819a30d981d2ed97bc3
def __init__(__self__, *, specification: str): '\n :param str specification: The endpoint where the validation specification is located.\n ' pulumi.set(__self__, 'specification', specification)
:param str specification: The endpoint where the validation specification is located.
sdk/python/pulumi_azure/core/outputs.py
__init__
suresh198526/pulumi-azure
0
python
def __init__(__self__, *, specification: str): '\n \n ' pulumi.set(__self__, 'specification', specification)
def __init__(__self__, *, specification: str): '\n \n ' pulumi.set(__self__, 'specification', specification)<|docstring|>:param str specification: The endpoint where the validation specification is located.<|endoftext|>
db11e8718f7a1d1d2839cbaf59153ad2a04895e3a2ae79e13ccebebf95700bc7
@property @pulumi.getter def specification(self) -> str: '\n The endpoint where the validation specification is located.\n ' return pulumi.get(self, 'specification')
The endpoint where the validation specification is located.
sdk/python/pulumi_azure/core/outputs.py
specification
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def specification(self) -> str: '\n \n ' return pulumi.get(self, 'specification')
@property @pulumi.getter def specification(self) -> str: '\n \n ' return pulumi.get(self, 'specification')<|docstring|>The endpoint where the validation specification is located.<|endoftext|>
82c041f5be5d9a830a5e3eae2a9095bbc23958f72d90d1117d2908b067f74d24
def __init__(__self__, *, id: str, location: str, name: str, tags: Mapping[(str, str)], type: str): '\n :param str id: The ID of this Resource.\n :param str location: The Azure Region in which this Resource exists.\n :param str name: The name of the Resource.\n :param Mapping[str, str] tags: A map of tags assigned to this Resource.\n :param str type: The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).\n ' pulumi.set(__self__, 'id', id) pulumi.set(__self__, 'location', location) pulumi.set(__self__, 'name', name) pulumi.set(__self__, 'tags', tags) pulumi.set(__self__, 'type', type)
:param str id: The ID of this Resource. :param str location: The Azure Region in which this Resource exists. :param str name: The name of the Resource. :param Mapping[str, str] tags: A map of tags assigned to this Resource. :param str type: The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).
sdk/python/pulumi_azure/core/outputs.py
__init__
suresh198526/pulumi-azure
0
python
def __init__(__self__, *, id: str, location: str, name: str, tags: Mapping[(str, str)], type: str): '\n :param str id: The ID of this Resource.\n :param str location: The Azure Region in which this Resource exists.\n :param str name: The name of the Resource.\n :param Mapping[str, str] tags: A map of tags assigned to this Resource.\n :param str type: The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).\n ' pulumi.set(__self__, 'id', id) pulumi.set(__self__, 'location', location) pulumi.set(__self__, 'name', name) pulumi.set(__self__, 'tags', tags) pulumi.set(__self__, 'type', type)
def __init__(__self__, *, id: str, location: str, name: str, tags: Mapping[(str, str)], type: str): '\n :param str id: The ID of this Resource.\n :param str location: The Azure Region in which this Resource exists.\n :param str name: The name of the Resource.\n :param Mapping[str, str] tags: A map of tags assigned to this Resource.\n :param str type: The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).\n ' pulumi.set(__self__, 'id', id) pulumi.set(__self__, 'location', location) pulumi.set(__self__, 'name', name) pulumi.set(__self__, 'tags', tags) pulumi.set(__self__, 'type', type)<|docstring|>:param str id: The ID of this Resource. :param str location: The Azure Region in which this Resource exists. :param str name: The name of the Resource. :param Mapping[str, str] tags: A map of tags assigned to this Resource. :param str type: The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).<|endoftext|>
6dfb29f8cbddc3b05598b0e1f884b1e8469c08070a4e2b6ec31c0e5f5e0b7372
@property @pulumi.getter def id(self) -> str: '\n The ID of this Resource.\n ' return pulumi.get(self, 'id')
The ID of this Resource.
sdk/python/pulumi_azure/core/outputs.py
id
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def id(self) -> str: '\n \n ' return pulumi.get(self, 'id')
@property @pulumi.getter def id(self) -> str: '\n \n ' return pulumi.get(self, 'id')<|docstring|>The ID of this Resource.<|endoftext|>
1834d17ce1d3a6f83ffcb95ecbcc063494db1f45806c980d1dfdeca998790183
@property @pulumi.getter def location(self) -> str: '\n The Azure Region in which this Resource exists.\n ' return pulumi.get(self, 'location')
The Azure Region in which this Resource exists.
sdk/python/pulumi_azure/core/outputs.py
location
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def location(self) -> str: '\n \n ' return pulumi.get(self, 'location')
@property @pulumi.getter def location(self) -> str: '\n \n ' return pulumi.get(self, 'location')<|docstring|>The Azure Region in which this Resource exists.<|endoftext|>
ae4134ad03102542e3e8617953e789d012fc31a9a10f8f8273c04f8a3bec9985
@property @pulumi.getter def name(self) -> str: '\n The name of the Resource.\n ' return pulumi.get(self, 'name')
The name of the Resource.
sdk/python/pulumi_azure/core/outputs.py
name
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')<|docstring|>The name of the Resource.<|endoftext|>
8c00abb590634528ba691e4e25844180fd0b3a84cbb82283654ba001b9ef1c0e
@property @pulumi.getter def tags(self) -> Mapping[(str, str)]: '\n A map of tags assigned to this Resource.\n ' return pulumi.get(self, 'tags')
A map of tags assigned to this Resource.
sdk/python/pulumi_azure/core/outputs.py
tags
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def tags(self) -> Mapping[(str, str)]: '\n \n ' return pulumi.get(self, 'tags')
@property @pulumi.getter def tags(self) -> Mapping[(str, str)]: '\n \n ' return pulumi.get(self, 'tags')<|docstring|>A map of tags assigned to this Resource.<|endoftext|>
0e7e73d12011bc9cb6972cb017ea8c537e7141d912019a508a688f09fc160f9b
@property @pulumi.getter def type(self) -> str: '\n The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).\n ' return pulumi.get(self, 'type')
The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).
sdk/python/pulumi_azure/core/outputs.py
type
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def type(self) -> str: '\n \n ' return pulumi.get(self, 'type')
@property @pulumi.getter def type(self) -> str: '\n \n ' return pulumi.get(self, 'type')<|docstring|>The Resource Type of the Resources you want to list (e.g. `Microsoft.Network/virtualNetworks`). A full list of available Resource Types can be found [here](https://docs.microsoft.com/en-us/azure/azure-resource-manager/azure-services-resource-providers).<|endoftext|>
0ad40ffb6c17ddd502c137f7ec7d505c0b5f5f7953870b8c9abd70d30552bdf6
def __init__(__self__, *, display_name: str, location_placement_id: str, quota_id: str, spending_limit: str, state: str, subscription_id: str, tenant_id: str): '\n :param str display_name: The subscription display name.\n :param str location_placement_id: The subscription location placement ID.\n :param str quota_id: The subscription quota ID.\n :param str spending_limit: The subscription spending limit.\n :param str state: The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted.\n :param str subscription_id: The subscription GUID.\n :param str tenant_id: The subscription tenant ID.\n ' pulumi.set(__self__, 'display_name', display_name) pulumi.set(__self__, 'location_placement_id', location_placement_id) pulumi.set(__self__, 'quota_id', quota_id) pulumi.set(__self__, 'spending_limit', spending_limit) pulumi.set(__self__, 'state', state) pulumi.set(__self__, 'subscription_id', subscription_id) pulumi.set(__self__, 'tenant_id', tenant_id)
:param str display_name: The subscription display name. :param str location_placement_id: The subscription location placement ID. :param str quota_id: The subscription quota ID. :param str spending_limit: The subscription spending limit. :param str state: The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted. :param str subscription_id: The subscription GUID. :param str tenant_id: The subscription tenant ID.
sdk/python/pulumi_azure/core/outputs.py
__init__
suresh198526/pulumi-azure
0
python
def __init__(__self__, *, display_name: str, location_placement_id: str, quota_id: str, spending_limit: str, state: str, subscription_id: str, tenant_id: str): '\n :param str display_name: The subscription display name.\n :param str location_placement_id: The subscription location placement ID.\n :param str quota_id: The subscription quota ID.\n :param str spending_limit: The subscription spending limit.\n :param str state: The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted.\n :param str subscription_id: The subscription GUID.\n :param str tenant_id: The subscription tenant ID.\n ' pulumi.set(__self__, 'display_name', display_name) pulumi.set(__self__, 'location_placement_id', location_placement_id) pulumi.set(__self__, 'quota_id', quota_id) pulumi.set(__self__, 'spending_limit', spending_limit) pulumi.set(__self__, 'state', state) pulumi.set(__self__, 'subscription_id', subscription_id) pulumi.set(__self__, 'tenant_id', tenant_id)
def __init__(__self__, *, display_name: str, location_placement_id: str, quota_id: str, spending_limit: str, state: str, subscription_id: str, tenant_id: str): '\n :param str display_name: The subscription display name.\n :param str location_placement_id: The subscription location placement ID.\n :param str quota_id: The subscription quota ID.\n :param str spending_limit: The subscription spending limit.\n :param str state: The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted.\n :param str subscription_id: The subscription GUID.\n :param str tenant_id: The subscription tenant ID.\n ' pulumi.set(__self__, 'display_name', display_name) pulumi.set(__self__, 'location_placement_id', location_placement_id) pulumi.set(__self__, 'quota_id', quota_id) pulumi.set(__self__, 'spending_limit', spending_limit) pulumi.set(__self__, 'state', state) pulumi.set(__self__, 'subscription_id', subscription_id) pulumi.set(__self__, 'tenant_id', tenant_id)<|docstring|>:param str display_name: The subscription display name. :param str location_placement_id: The subscription location placement ID. :param str quota_id: The subscription quota ID. :param str spending_limit: The subscription spending limit. :param str state: The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted. :param str subscription_id: The subscription GUID. :param str tenant_id: The subscription tenant ID.<|endoftext|>
6167b089fc26e32a33c4e49b58d082d31fe2ebc0b687d6e9855cd842adf4ad03
@property @pulumi.getter(name='displayName') def display_name(self) -> str: '\n The subscription display name.\n ' return pulumi.get(self, 'display_name')
The subscription display name.
sdk/python/pulumi_azure/core/outputs.py
display_name
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='displayName') def display_name(self) -> str: '\n \n ' return pulumi.get(self, 'display_name')
@property @pulumi.getter(name='displayName') def display_name(self) -> str: '\n \n ' return pulumi.get(self, 'display_name')<|docstring|>The subscription display name.<|endoftext|>
890a666c2af921e269e7df1917521c2ea09bafd983df0d88a20cfe9b6aebdbfb
@property @pulumi.getter(name='locationPlacementId') def location_placement_id(self) -> str: '\n The subscription location placement ID.\n ' return pulumi.get(self, 'location_placement_id')
The subscription location placement ID.
sdk/python/pulumi_azure/core/outputs.py
location_placement_id
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='locationPlacementId') def location_placement_id(self) -> str: '\n \n ' return pulumi.get(self, 'location_placement_id')
@property @pulumi.getter(name='locationPlacementId') def location_placement_id(self) -> str: '\n \n ' return pulumi.get(self, 'location_placement_id')<|docstring|>The subscription location placement ID.<|endoftext|>
cb7a9fbfe34078649b021cd2a5c2f68265529b204603ffd9f6890b9dc942e129
@property @pulumi.getter(name='quotaId') def quota_id(self) -> str: '\n The subscription quota ID.\n ' return pulumi.get(self, 'quota_id')
The subscription quota ID.
sdk/python/pulumi_azure/core/outputs.py
quota_id
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='quotaId') def quota_id(self) -> str: '\n \n ' return pulumi.get(self, 'quota_id')
@property @pulumi.getter(name='quotaId') def quota_id(self) -> str: '\n \n ' return pulumi.get(self, 'quota_id')<|docstring|>The subscription quota ID.<|endoftext|>
0ee8b1fa3641bff0ff52034e073b4fee89ebffabbfaf1feefcedae50973bdfbc
@property @pulumi.getter(name='spendingLimit') def spending_limit(self) -> str: '\n The subscription spending limit.\n ' return pulumi.get(self, 'spending_limit')
The subscription spending limit.
sdk/python/pulumi_azure/core/outputs.py
spending_limit
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='spendingLimit') def spending_limit(self) -> str: '\n \n ' return pulumi.get(self, 'spending_limit')
@property @pulumi.getter(name='spendingLimit') def spending_limit(self) -> str: '\n \n ' return pulumi.get(self, 'spending_limit')<|docstring|>The subscription spending limit.<|endoftext|>
4479ab8edb8d8cbfdea39e3206640c5ef0d794cbd9d05ac5f6c340966a3fcd4e
@property @pulumi.getter def state(self) -> str: '\n The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted.\n ' return pulumi.get(self, 'state')
The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted.
sdk/python/pulumi_azure/core/outputs.py
state
suresh198526/pulumi-azure
0
python
@property @pulumi.getter def state(self) -> str: '\n \n ' return pulumi.get(self, 'state')
@property @pulumi.getter def state(self) -> str: '\n \n ' return pulumi.get(self, 'state')<|docstring|>The subscription state. Possible values are Enabled, Warned, PastDue, Disabled, and Deleted.<|endoftext|>
458075aefcdf493f661e2d1848e2d1483f9ec9ca6f8316dddd656bead9588fc8
@property @pulumi.getter(name='subscriptionId') def subscription_id(self) -> str: '\n The subscription GUID.\n ' return pulumi.get(self, 'subscription_id')
The subscription GUID.
sdk/python/pulumi_azure/core/outputs.py
subscription_id
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='subscriptionId') def subscription_id(self) -> str: '\n \n ' return pulumi.get(self, 'subscription_id')
@property @pulumi.getter(name='subscriptionId') def subscription_id(self) -> str: '\n \n ' return pulumi.get(self, 'subscription_id')<|docstring|>The subscription GUID.<|endoftext|>
368362b34c317b35c366779ea4126012de0ac469bf6ee47bb4bc69bd73fe3c8e
@property @pulumi.getter(name='tenantId') def tenant_id(self) -> str: '\n The subscription tenant ID.\n ' return pulumi.get(self, 'tenant_id')
The subscription tenant ID.
sdk/python/pulumi_azure/core/outputs.py
tenant_id
suresh198526/pulumi-azure
0
python
@property @pulumi.getter(name='tenantId') def tenant_id(self) -> str: '\n \n ' return pulumi.get(self, 'tenant_id')
@property @pulumi.getter(name='tenantId') def tenant_id(self) -> str: '\n \n ' return pulumi.get(self, 'tenant_id')<|docstring|>The subscription tenant ID.<|endoftext|>
824f4f35619c6b82972bdb044f69deb9f22862f47dfc841ef116f22437883823
def RoadnetPa(directed: bool=False, verbose: int=2, cache_path: str='graphs/networkrepository', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the roadNet-PA graph.\n\n The graph is automatically retrieved from the NetworkRepository repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of roadNet-PA graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-06 12:17:08.702691\n\t\n\tThe undirected graph roadNet-PA has 1088092 nodes and 1541898 unweighted\n\tedges, of which none are self-loops. The graph is extremely sparse as it\n\thas a density of 0.00000 and has 206 connected components, where the component\n\twith most nodes has 1087562 nodes and the component with the least nodes\n\thas 2 nodes. The graph median node degree is 3, the mean node degree is\n\t2.83, and the node degree mode is 3. The top 5 most central nodes are 859327\n\t(degree 9), 847933 (degree 9), 759554 (degree 9), 674503 (degree 9) and\n\t1046565 (degree 8).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@inproceedings{nr,\n\t title = {The Network Data Repository with Interactive Graph Analytics and Visualization},\n\t author={Ryan A. Rossi and Nesreen K. Ahmed},\n\t booktitle = {AAAI},\n\t url={http://networkrepository.com},\n\t year={2015}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.networkrepository import RoadnetPa\n\t\n\t # Then load the graph\n\t graph = RoadnetPa()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='RoadnetPa', dataset='networkrepository', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
Return new instance of the roadNet-PA graph. The graph is automatically retrieved from the NetworkRepository repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of roadNet-PA graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-06 12:17:08.702691 The undirected graph roadNet-PA has 1088092 nodes and 1541898 unweighted edges, of which none are self-loops. The graph is extremely sparse as it has a density of 0.00000 and has 206 connected components, where the component with most nodes has 1087562 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 3, the mean node degree is 2.83, and the node degree mode is 3. The top 5 most central nodes are 859327 (degree 9), 847933 (degree 9), 759554 (degree 9), 674503 (degree 9) and 1046565 (degree 8). References --------------------- Please cite the following if you use the data: @inproceedings{nr, title = {The Network Data Repository with Interactive Graph Analytics and Visualization}, author={Ryan A. Rossi and Nesreen K. Ahmed}, booktitle = {AAAI}, url={http://networkrepository.com}, year={2015} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.networkrepository import RoadnetPa # Then load the graph graph = RoadnetPa() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.
bindings/python/ensmallen_graph/datasets/networkrepository/roadnetpa.py
RoadnetPa
caufieldjh/ensmallen_graph
0
python
def RoadnetPa(directed: bool=False, verbose: int=2, cache_path: str='graphs/networkrepository', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the roadNet-PA graph.\n\n The graph is automatically retrieved from the NetworkRepository repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of roadNet-PA graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-06 12:17:08.702691\n\t\n\tThe undirected graph roadNet-PA has 1088092 nodes and 1541898 unweighted\n\tedges, of which none are self-loops. The graph is extremely sparse as it\n\thas a density of 0.00000 and has 206 connected components, where the component\n\twith most nodes has 1087562 nodes and the component with the least nodes\n\thas 2 nodes. The graph median node degree is 3, the mean node degree is\n\t2.83, and the node degree mode is 3. The top 5 most central nodes are 859327\n\t(degree 9), 847933 (degree 9), 759554 (degree 9), 674503 (degree 9) and\n\t1046565 (degree 8).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@inproceedings{nr,\n\t title = {The Network Data Repository with Interactive Graph Analytics and Visualization},\n\t author={Ryan A. Rossi and Nesreen K. Ahmed},\n\t booktitle = {AAAI},\n\t url={http://networkrepository.com},\n\t year={2015}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.networkrepository import RoadnetPa\n\t\n\t # Then load the graph\n\t graph = RoadnetPa()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='RoadnetPa', dataset='networkrepository', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
def RoadnetPa(directed: bool=False, verbose: int=2, cache_path: str='graphs/networkrepository', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the roadNet-PA graph.\n\n The graph is automatically retrieved from the NetworkRepository repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of roadNet-PA graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-06 12:17:08.702691\n\t\n\tThe undirected graph roadNet-PA has 1088092 nodes and 1541898 unweighted\n\tedges, of which none are self-loops. The graph is extremely sparse as it\n\thas a density of 0.00000 and has 206 connected components, where the component\n\twith most nodes has 1087562 nodes and the component with the least nodes\n\thas 2 nodes. The graph median node degree is 3, the mean node degree is\n\t2.83, and the node degree mode is 3. The top 5 most central nodes are 859327\n\t(degree 9), 847933 (degree 9), 759554 (degree 9), 674503 (degree 9) and\n\t1046565 (degree 8).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@inproceedings{nr,\n\t title = {The Network Data Repository with Interactive Graph Analytics and Visualization},\n\t author={Ryan A. Rossi and Nesreen K. Ahmed},\n\t booktitle = {AAAI},\n\t url={http://networkrepository.com},\n\t year={2015}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.networkrepository import RoadnetPa\n\t\n\t # Then load the graph\n\t graph = RoadnetPa()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='RoadnetPa', dataset='networkrepository', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()<|docstring|>Return new instance of the roadNet-PA graph. The graph is automatically retrieved from the NetworkRepository repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of roadNet-PA graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-06 12:17:08.702691 The undirected graph roadNet-PA has 1088092 nodes and 1541898 unweighted edges, of which none are self-loops. The graph is extremely sparse as it has a density of 0.00000 and has 206 connected components, where the component with most nodes has 1087562 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 3, the mean node degree is 2.83, and the node degree mode is 3. The top 5 most central nodes are 859327 (degree 9), 847933 (degree 9), 759554 (degree 9), 674503 (degree 9) and 1046565 (degree 8). References --------------------- Please cite the following if you use the data: @inproceedings{nr, title = {The Network Data Repository with Interactive Graph Analytics and Visualization}, author={Ryan A. Rossi and Nesreen K. Ahmed}, booktitle = {AAAI}, url={http://networkrepository.com}, year={2015} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.networkrepository import RoadnetPa # Then load the graph graph = RoadnetPa() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.<|endoftext|>
ed1f588a9117a99ce4783e5a70e015a7f89ea47c3d1a09b164428d394024c7d6
def model_resnet50_keras(input_shape: tuple, classes: int, include_top=True, weights='imagenet') -> keras.Model: '\n Keras Applicationsに用意されているResNet50を読み込む。\n\n Deep Residual Learning for Image Recognition\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n https://arxiv.org/abs/1512.03385\n\n Args:\n input_shape tuple:\n 入力の形状を指定する。\n num_classes int:\n 分類するクラス数を指定する。\n\n Returns:\n keras.Model:\n ResNet50を返す。\n ' return keras.applications.resnet50.ResNet50(include_top=include_top, weights=weights, input_tensor=None, input_shape=((224, 224, 3) if (weights == 'imagenet') else input_shape), pooling=None, classes=(classes if (include_top and (not weights)) else 1000))
Keras Applicationsに用意されているResNet50を読み込む。 Deep Residual Learning for Image Recognition Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun https://arxiv.org/abs/1512.03385 Args: input_shape tuple: 入力の形状を指定する。 num_classes int: 分類するクラス数を指定する。 Returns: keras.Model: ResNet50を返す。
models/resnet50.py
model_resnet50_keras
sugaok/my-deep-learning-base
1
python
def model_resnet50_keras(input_shape: tuple, classes: int, include_top=True, weights='imagenet') -> keras.Model: '\n Keras Applicationsに用意されているResNet50を読み込む。\n\n Deep Residual Learning for Image Recognition\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n https://arxiv.org/abs/1512.03385\n\n Args:\n input_shape tuple:\n 入力の形状を指定する。\n num_classes int:\n 分類するクラス数を指定する。\n\n Returns:\n keras.Model:\n ResNet50を返す。\n ' return keras.applications.resnet50.ResNet50(include_top=include_top, weights=weights, input_tensor=None, input_shape=((224, 224, 3) if (weights == 'imagenet') else input_shape), pooling=None, classes=(classes if (include_top and (not weights)) else 1000))
def model_resnet50_keras(input_shape: tuple, classes: int, include_top=True, weights='imagenet') -> keras.Model: '\n Keras Applicationsに用意されているResNet50を読み込む。\n\n Deep Residual Learning for Image Recognition\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n https://arxiv.org/abs/1512.03385\n\n Args:\n input_shape tuple:\n 入力の形状を指定する。\n num_classes int:\n 分類するクラス数を指定する。\n\n Returns:\n keras.Model:\n ResNet50を返す。\n ' return keras.applications.resnet50.ResNet50(include_top=include_top, weights=weights, input_tensor=None, input_shape=((224, 224, 3) if (weights == 'imagenet') else input_shape), pooling=None, classes=(classes if (include_top and (not weights)) else 1000))<|docstring|>Keras Applicationsに用意されているResNet50を読み込む。 Deep Residual Learning for Image Recognition Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun https://arxiv.org/abs/1512.03385 Args: input_shape tuple: 入力の形状を指定する。 num_classes int: 分類するクラス数を指定する。 Returns: keras.Model: ResNet50を返す。<|endoftext|>
d558c7449849fec455c88a665440847b31e17a43f87d60b2e5c5bb7453752001
def model_resnet50(input_shape: tuple, classes: int) -> keras.Model: '\n ResNet50を読み込む。\n\n Deep Residual Learning for Image Recognition\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n https://arxiv.org/abs/1512.03385\n\n Args:\n input_shape tuple:\n 入力の形状を指定する。\n num_classes int:\n 分類するクラス数を指定する。\n\n Returns:\n keras.Model:\n ResNet50を返す。\n ' inputs = keras.layers.Input(shape=input_shape) x = inputs s = max(input_shape[0], input_shape[1]) if (s >= 224): x = keras.layers.Conv2D(64, 7, 2, name='conv1')(x) if (s >= 112): pass x = keras.layers.Conv2D(32, 3, 2)(x) x = keras.layers.BatchNormalization()(x) x = keras.layers.ReLU()(x)
ResNet50を読み込む。 Deep Residual Learning for Image Recognition Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun https://arxiv.org/abs/1512.03385 Args: input_shape tuple: 入力の形状を指定する。 num_classes int: 分類するクラス数を指定する。 Returns: keras.Model: ResNet50を返す。
models/resnet50.py
model_resnet50
sugaok/my-deep-learning-base
1
python
def model_resnet50(input_shape: tuple, classes: int) -> keras.Model: '\n ResNet50を読み込む。\n\n Deep Residual Learning for Image Recognition\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n https://arxiv.org/abs/1512.03385\n\n Args:\n input_shape tuple:\n 入力の形状を指定する。\n num_classes int:\n 分類するクラス数を指定する。\n\n Returns:\n keras.Model:\n ResNet50を返す。\n ' inputs = keras.layers.Input(shape=input_shape) x = inputs s = max(input_shape[0], input_shape[1]) if (s >= 224): x = keras.layers.Conv2D(64, 7, 2, name='conv1')(x) if (s >= 112): pass x = keras.layers.Conv2D(32, 3, 2)(x) x = keras.layers.BatchNormalization()(x) x = keras.layers.ReLU()(x)
def model_resnet50(input_shape: tuple, classes: int) -> keras.Model: '\n ResNet50を読み込む。\n\n Deep Residual Learning for Image Recognition\n Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n https://arxiv.org/abs/1512.03385\n\n Args:\n input_shape tuple:\n 入力の形状を指定する。\n num_classes int:\n 分類するクラス数を指定する。\n\n Returns:\n keras.Model:\n ResNet50を返す。\n ' inputs = keras.layers.Input(shape=input_shape) x = inputs s = max(input_shape[0], input_shape[1]) if (s >= 224): x = keras.layers.Conv2D(64, 7, 2, name='conv1')(x) if (s >= 112): pass x = keras.layers.Conv2D(32, 3, 2)(x) x = keras.layers.BatchNormalization()(x) x = keras.layers.ReLU()(x)<|docstring|>ResNet50を読み込む。 Deep Residual Learning for Image Recognition Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun https://arxiv.org/abs/1512.03385 Args: input_shape tuple: 入力の形状を指定する。 num_classes int: 分類するクラス数を指定する。 Returns: keras.Model: ResNet50を返す。<|endoftext|>
cc09e176516213f63e22aa96c51173f4d0c4d0f2f7ba026ec27ba3e5832cab66
def get_single_run_config(out_dir): 'Load the config file from a specified experiment.\n\n Args:\n out_dir (str): The path to the experiment.\n\n Returns:\n The Namespace object containing argument names and values.\n ' print(('Loading the configuration of run: %s' % out_dir)) if (not os.path.exists(os.path.join(out_dir, 'config.pickle'))): raise NotImplementedError((('The run "%s" does not contain a ' % out_dir) + '"config.pickle" file.')) with open(os.path.join(out_dir, 'config.pickle'), 'rb') as f: config = pickle.load(f) return config
Load the config file from a specified experiment. Args: out_dir (str): The path to the experiment. Returns: The Namespace object containing argument names and values.
hypnettorch/hpsearch/gather_random_seeds.py
get_single_run_config
pennfranc/hypnettorch
31
python
def get_single_run_config(out_dir): 'Load the config file from a specified experiment.\n\n Args:\n out_dir (str): The path to the experiment.\n\n Returns:\n The Namespace object containing argument names and values.\n ' print(('Loading the configuration of run: %s' % out_dir)) if (not os.path.exists(os.path.join(out_dir, 'config.pickle'))): raise NotImplementedError((('The run "%s" does not contain a ' % out_dir) + '"config.pickle" file.')) with open(os.path.join(out_dir, 'config.pickle'), 'rb') as f: config = pickle.load(f) return config
def get_single_run_config(out_dir): 'Load the config file from a specified experiment.\n\n Args:\n out_dir (str): The path to the experiment.\n\n Returns:\n The Namespace object containing argument names and values.\n ' print(('Loading the configuration of run: %s' % out_dir)) if (not os.path.exists(os.path.join(out_dir, 'config.pickle'))): raise NotImplementedError((('The run "%s" does not contain a ' % out_dir) + '"config.pickle" file.')) with open(os.path.join(out_dir, 'config.pickle'), 'rb') as f: config = pickle.load(f) return config<|docstring|>Load the config file from a specified experiment. Args: out_dir (str): The path to the experiment. Returns: The Namespace object containing argument names and values.<|endoftext|>
3647241f3893a7fc6ba9b5c6e9bf0e4bf70b17d7a65f8f9e2a9b4b9973f623ae
def get_best_hpsearch_config(out_dir): 'Load the config file from the best run of a hyperparameter search.\n\n This file loads the results of the hyperparameter search, and select the\n configuration that lead to the best performance score.\n\n Args:\n out_dir (str): The path to the hpsearch result folder.\n\n Returns:\n (tuple): Tuple containing:\n\n - **config**: The config of the best run.\n - **best_out_dir**: The path to the best run.\n ' run_dirs = os.listdir(out_dir) if ('TO_BE_DELETED' in run_dirs): run_dirs.remove('TO_BE_DELETED') run_dirs.extend(os.listdir(os.path.join(out_dir, 'TO_BE_DELETED'))) curr_best_dir = None curr_best_score = None for (i, sim_dir) in enumerate(run_dirs): sim_path = os.path.join(run_dirs, sim_dir) if (not os.path.isdir(sim_path)): continue if (not os.path.exists(os.path.join(sim_path, hpsearch._SUMMARY_FILENAME))): continue try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(sim_path, i) except: continue has_finished = int(performance_dict['finished'][0]) if (not has_finished): continue curr_score = float(performance_dict[hpsearch._PERFORMANCE_KEY][0]) if (curr_best_dir is None): curr_best_dir = sim_path curr_best_score = curr_score elif hpsearch._PERFORMANCE_SORT_ASC: if (curr_score < curr_best_score): curr_best_dir = sim_path curr_best_score = curr_score elif (curr_score > curr_best_score): curr_best_dir = sim_path curr_best_score = curr_score if (curr_best_dir is None): raise RuntimeError('Did not find any finished run!') return (get_single_run_config(curr_best_dir), curr_best_dir)
Load the config file from the best run of a hyperparameter search. This file loads the results of the hyperparameter search, and select the configuration that lead to the best performance score. Args: out_dir (str): The path to the hpsearch result folder. Returns: (tuple): Tuple containing: - **config**: The config of the best run. - **best_out_dir**: The path to the best run.
hypnettorch/hpsearch/gather_random_seeds.py
get_best_hpsearch_config
pennfranc/hypnettorch
31
python
def get_best_hpsearch_config(out_dir): 'Load the config file from the best run of a hyperparameter search.\n\n This file loads the results of the hyperparameter search, and select the\n configuration that lead to the best performance score.\n\n Args:\n out_dir (str): The path to the hpsearch result folder.\n\n Returns:\n (tuple): Tuple containing:\n\n - **config**: The config of the best run.\n - **best_out_dir**: The path to the best run.\n ' run_dirs = os.listdir(out_dir) if ('TO_BE_DELETED' in run_dirs): run_dirs.remove('TO_BE_DELETED') run_dirs.extend(os.listdir(os.path.join(out_dir, 'TO_BE_DELETED'))) curr_best_dir = None curr_best_score = None for (i, sim_dir) in enumerate(run_dirs): sim_path = os.path.join(run_dirs, sim_dir) if (not os.path.isdir(sim_path)): continue if (not os.path.exists(os.path.join(sim_path, hpsearch._SUMMARY_FILENAME))): continue try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(sim_path, i) except: continue has_finished = int(performance_dict['finished'][0]) if (not has_finished): continue curr_score = float(performance_dict[hpsearch._PERFORMANCE_KEY][0]) if (curr_best_dir is None): curr_best_dir = sim_path curr_best_score = curr_score elif hpsearch._PERFORMANCE_SORT_ASC: if (curr_score < curr_best_score): curr_best_dir = sim_path curr_best_score = curr_score elif (curr_score > curr_best_score): curr_best_dir = sim_path curr_best_score = curr_score if (curr_best_dir is None): raise RuntimeError('Did not find any finished run!') return (get_single_run_config(curr_best_dir), curr_best_dir)
def get_best_hpsearch_config(out_dir): 'Load the config file from the best run of a hyperparameter search.\n\n This file loads the results of the hyperparameter search, and select the\n configuration that lead to the best performance score.\n\n Args:\n out_dir (str): The path to the hpsearch result folder.\n\n Returns:\n (tuple): Tuple containing:\n\n - **config**: The config of the best run.\n - **best_out_dir**: The path to the best run.\n ' run_dirs = os.listdir(out_dir) if ('TO_BE_DELETED' in run_dirs): run_dirs.remove('TO_BE_DELETED') run_dirs.extend(os.listdir(os.path.join(out_dir, 'TO_BE_DELETED'))) curr_best_dir = None curr_best_score = None for (i, sim_dir) in enumerate(run_dirs): sim_path = os.path.join(run_dirs, sim_dir) if (not os.path.isdir(sim_path)): continue if (not os.path.exists(os.path.join(sim_path, hpsearch._SUMMARY_FILENAME))): continue try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(sim_path, i) except: continue has_finished = int(performance_dict['finished'][0]) if (not has_finished): continue curr_score = float(performance_dict[hpsearch._PERFORMANCE_KEY][0]) if (curr_best_dir is None): curr_best_dir = sim_path curr_best_score = curr_score elif hpsearch._PERFORMANCE_SORT_ASC: if (curr_score < curr_best_score): curr_best_dir = sim_path curr_best_score = curr_score elif (curr_score > curr_best_score): curr_best_dir = sim_path curr_best_score = curr_score if (curr_best_dir is None): raise RuntimeError('Did not find any finished run!') return (get_single_run_config(curr_best_dir), curr_best_dir)<|docstring|>Load the config file from the best run of a hyperparameter search. This file loads the results of the hyperparameter search, and select the configuration that lead to the best performance score. Args: out_dir (str): The path to the hpsearch result folder. Returns: (tuple): Tuple containing: - **config**: The config of the best run. - **best_out_dir**: The path to the best run.<|endoftext|>
bed1c12c4f6db9806efcb8fd5f621a3693e11b23ce04e289b488c9b3234d5dbc
def build_grid_and_conditions(cmd_args, config, seeds_list): 'Build the hpconfig for the random seed gathering.\n\n Args:\n cmd_args: CLI arguments of this script.\n config: The config to be translated into a search grid.\n seeds_list (list): The random seeds to be gathered.\n\n (tuple): Tuple containing:\n\n - **grid** (dict): The search grid.\n - **conditions** (list): Constraints for the search grid.\n ' grid = {} for (k, v) in vars(config).items(): if isinstance(v, str): v = v.strip('"') v = (('"' + v) + '"') grid[k] = [v] grid['random_seed'] = seeds_list conditions = [] if cmd_args.vary_data_seed: for s in seeds_list: conditions.append(({'random_seed': [s]}, {'data_random_seed': [s]})) return (grid, conditions)
Build the hpconfig for the random seed gathering. Args: cmd_args: CLI arguments of this script. config: The config to be translated into a search grid. seeds_list (list): The random seeds to be gathered. (tuple): Tuple containing: - **grid** (dict): The search grid. - **conditions** (list): Constraints for the search grid.
hypnettorch/hpsearch/gather_random_seeds.py
build_grid_and_conditions
pennfranc/hypnettorch
31
python
def build_grid_and_conditions(cmd_args, config, seeds_list): 'Build the hpconfig for the random seed gathering.\n\n Args:\n cmd_args: CLI arguments of this script.\n config: The config to be translated into a search grid.\n seeds_list (list): The random seeds to be gathered.\n\n (tuple): Tuple containing:\n\n - **grid** (dict): The search grid.\n - **conditions** (list): Constraints for the search grid.\n ' grid = {} for (k, v) in vars(config).items(): if isinstance(v, str): v = v.strip('"') v = (('"' + v) + '"') grid[k] = [v] grid['random_seed'] = seeds_list conditions = [] if cmd_args.vary_data_seed: for s in seeds_list: conditions.append(({'random_seed': [s]}, {'data_random_seed': [s]})) return (grid, conditions)
def build_grid_and_conditions(cmd_args, config, seeds_list): 'Build the hpconfig for the random seed gathering.\n\n Args:\n cmd_args: CLI arguments of this script.\n config: The config to be translated into a search grid.\n seeds_list (list): The random seeds to be gathered.\n\n (tuple): Tuple containing:\n\n - **grid** (dict): The search grid.\n - **conditions** (list): Constraints for the search grid.\n ' grid = {} for (k, v) in vars(config).items(): if isinstance(v, str): v = v.strip('"') v = (('"' + v) + '"') grid[k] = [v] grid['random_seed'] = seeds_list conditions = [] if cmd_args.vary_data_seed: for s in seeds_list: conditions.append(({'random_seed': [s]}, {'data_random_seed': [s]})) return (grid, conditions)<|docstring|>Build the hpconfig for the random seed gathering. Args: cmd_args: CLI arguments of this script. config: The config to be translated into a search grid. seeds_list (list): The random seeds to be gathered. (tuple): Tuple containing: - **grid** (dict): The search grid. - **conditions** (list): Constraints for the search grid.<|endoftext|>
5d5bdd5e2d9be29522c2fb6601bf7436ce2134ca97750a5af7306eb479b388ee
def get_hpsearch_call(cmd_args, num_seeds, grid_config, hpsearch_dir=None): 'Generate the command line for the hpsearch.\n\n Args:\n cmd_args: The command line arguments.\n num_seeds (int): Number of searches.\n grid_config (str): Location of search grid.\n hpsearch_dir (str, optional): Where the hpsearch should write its\n results to.\n\n Returns:\n (str): The command line to be executed.\n\n ' cluster_cmd_prefix = '' cluster_cmd_suffix = '' non_cluster_cmd_suffix = '' if (cmd_args.run_cluster and (cmd_args.scheduler == 'lsf')): cluster_cmd_prefix = (('bsub -n 1 -W %s:00 ' % cmd_args.hps_num_hours) + ('-e random_seeds.err -o random_seeds.out -R "%s" ' % cmd_args.hps_resources.strip('"'))) cluster_cmd_suffix = (((((' --run_cluster ' + ('--scheduler=%s ' % cmd_args.scheduler)) + ('--num_jobs=%s ' % cmd_args.num_jobs)) + ('--num_hours=%s ' % cmd_args.num_hours)) + ('--resources="\\"%s\\"" ' % cmd_args.resources.strip('"'))) + ('--num_searches=%d ' % num_seeds)) elif cmd_args.run_cluster: assert (cmd_args.scheduler == 'slurm') cluster_cmd_suffix = (((((((((' --run_cluster ' + ('--scheduler=%s ' % cmd_args.scheduler)) + ('--num_jobs=%s ' % cmd_args.num_jobs)) + ('--num_hours=%s ' % cmd_args.num_hours)) + ('--slurm_mem=%s ' % cmd_args.slurm_mem)) + ('--slurm_gres=%s ' % cmd_args.slurm_gres)) + ('--slurm_partition=%s ' % cmd_args.slurm_partition)) + ('--slurm_qos=%s ' % cmd_args.slurm_qos)) + ('--slurm_constraint=%s ' % cmd_args.slurm_constraint)) + ('--num_searches=%d ' % num_seeds)) else: non_cluster_cmd_suffix = ((((('--visible_gpus=%s ' % cmd_args.visible_gpus) + ('--allowed_load=%f ' % cmd_args.allowed_load)) + ('--allowed_memory=%f ' % cmd_args.allowed_memory)) + ('--sim_startup_time=%d ' % cmd_args.sim_startup_time)) + ('--max_num_jobs_per_gpu=%d ' % cmd_args.max_num_jobs_per_gpu)) cmd_str = (((cluster_cmd_prefix + ('python3 hpsearch.py --grid_module=%s ' % cmd_args.grid_module)) + ('--grid_config=%s ' % grid_config)) + ('--run_cwd=%s ' % cmd_args.run_cwd)) if cmd_args.deterministic_search: cmd_str += '--deterministic_search ' if cmd_args.dont_generate_full_grid: cmd_str += '--dont_generate_full_grid ' if (hpsearch_dir is not None): cmd_str += (('--out_dir=%s --force_out_dir ' % hpsearch_dir) + '--dont_force_new_dir ') cmd_str += (cluster_cmd_suffix + non_cluster_cmd_suffix) return cmd_str
Generate the command line for the hpsearch. Args: cmd_args: The command line arguments. num_seeds (int): Number of searches. grid_config (str): Location of search grid. hpsearch_dir (str, optional): Where the hpsearch should write its results to. Returns: (str): The command line to be executed.
hypnettorch/hpsearch/gather_random_seeds.py
get_hpsearch_call
pennfranc/hypnettorch
31
python
def get_hpsearch_call(cmd_args, num_seeds, grid_config, hpsearch_dir=None): 'Generate the command line for the hpsearch.\n\n Args:\n cmd_args: The command line arguments.\n num_seeds (int): Number of searches.\n grid_config (str): Location of search grid.\n hpsearch_dir (str, optional): Where the hpsearch should write its\n results to.\n\n Returns:\n (str): The command line to be executed.\n\n ' cluster_cmd_prefix = cluster_cmd_suffix = non_cluster_cmd_suffix = if (cmd_args.run_cluster and (cmd_args.scheduler == 'lsf')): cluster_cmd_prefix = (('bsub -n 1 -W %s:00 ' % cmd_args.hps_num_hours) + ('-e random_seeds.err -o random_seeds.out -R "%s" ' % cmd_args.hps_resources.strip('"'))) cluster_cmd_suffix = (((((' --run_cluster ' + ('--scheduler=%s ' % cmd_args.scheduler)) + ('--num_jobs=%s ' % cmd_args.num_jobs)) + ('--num_hours=%s ' % cmd_args.num_hours)) + ('--resources="\\"%s\\ ' % cmd_args.resources.strip('"'))) + ('--num_searches=%d ' % num_seeds)) elif cmd_args.run_cluster: assert (cmd_args.scheduler == 'slurm') cluster_cmd_suffix = (((((((((' --run_cluster ' + ('--scheduler=%s ' % cmd_args.scheduler)) + ('--num_jobs=%s ' % cmd_args.num_jobs)) + ('--num_hours=%s ' % cmd_args.num_hours)) + ('--slurm_mem=%s ' % cmd_args.slurm_mem)) + ('--slurm_gres=%s ' % cmd_args.slurm_gres)) + ('--slurm_partition=%s ' % cmd_args.slurm_partition)) + ('--slurm_qos=%s ' % cmd_args.slurm_qos)) + ('--slurm_constraint=%s ' % cmd_args.slurm_constraint)) + ('--num_searches=%d ' % num_seeds)) else: non_cluster_cmd_suffix = ((((('--visible_gpus=%s ' % cmd_args.visible_gpus) + ('--allowed_load=%f ' % cmd_args.allowed_load)) + ('--allowed_memory=%f ' % cmd_args.allowed_memory)) + ('--sim_startup_time=%d ' % cmd_args.sim_startup_time)) + ('--max_num_jobs_per_gpu=%d ' % cmd_args.max_num_jobs_per_gpu)) cmd_str = (((cluster_cmd_prefix + ('python3 hpsearch.py --grid_module=%s ' % cmd_args.grid_module)) + ('--grid_config=%s ' % grid_config)) + ('--run_cwd=%s ' % cmd_args.run_cwd)) if cmd_args.deterministic_search: cmd_str += '--deterministic_search ' if cmd_args.dont_generate_full_grid: cmd_str += '--dont_generate_full_grid ' if (hpsearch_dir is not None): cmd_str += (('--out_dir=%s --force_out_dir ' % hpsearch_dir) + '--dont_force_new_dir ') cmd_str += (cluster_cmd_suffix + non_cluster_cmd_suffix) return cmd_str
def get_hpsearch_call(cmd_args, num_seeds, grid_config, hpsearch_dir=None): 'Generate the command line for the hpsearch.\n\n Args:\n cmd_args: The command line arguments.\n num_seeds (int): Number of searches.\n grid_config (str): Location of search grid.\n hpsearch_dir (str, optional): Where the hpsearch should write its\n results to.\n\n Returns:\n (str): The command line to be executed.\n\n ' cluster_cmd_prefix = cluster_cmd_suffix = non_cluster_cmd_suffix = if (cmd_args.run_cluster and (cmd_args.scheduler == 'lsf')): cluster_cmd_prefix = (('bsub -n 1 -W %s:00 ' % cmd_args.hps_num_hours) + ('-e random_seeds.err -o random_seeds.out -R "%s" ' % cmd_args.hps_resources.strip('"'))) cluster_cmd_suffix = (((((' --run_cluster ' + ('--scheduler=%s ' % cmd_args.scheduler)) + ('--num_jobs=%s ' % cmd_args.num_jobs)) + ('--num_hours=%s ' % cmd_args.num_hours)) + ('--resources="\\"%s\\ ' % cmd_args.resources.strip('"'))) + ('--num_searches=%d ' % num_seeds)) elif cmd_args.run_cluster: assert (cmd_args.scheduler == 'slurm') cluster_cmd_suffix = (((((((((' --run_cluster ' + ('--scheduler=%s ' % cmd_args.scheduler)) + ('--num_jobs=%s ' % cmd_args.num_jobs)) + ('--num_hours=%s ' % cmd_args.num_hours)) + ('--slurm_mem=%s ' % cmd_args.slurm_mem)) + ('--slurm_gres=%s ' % cmd_args.slurm_gres)) + ('--slurm_partition=%s ' % cmd_args.slurm_partition)) + ('--slurm_qos=%s ' % cmd_args.slurm_qos)) + ('--slurm_constraint=%s ' % cmd_args.slurm_constraint)) + ('--num_searches=%d ' % num_seeds)) else: non_cluster_cmd_suffix = ((((('--visible_gpus=%s ' % cmd_args.visible_gpus) + ('--allowed_load=%f ' % cmd_args.allowed_load)) + ('--allowed_memory=%f ' % cmd_args.allowed_memory)) + ('--sim_startup_time=%d ' % cmd_args.sim_startup_time)) + ('--max_num_jobs_per_gpu=%d ' % cmd_args.max_num_jobs_per_gpu)) cmd_str = (((cluster_cmd_prefix + ('python3 hpsearch.py --grid_module=%s ' % cmd_args.grid_module)) + ('--grid_config=%s ' % grid_config)) + ('--run_cwd=%s ' % cmd_args.run_cwd)) if cmd_args.deterministic_search: cmd_str += '--deterministic_search ' if cmd_args.dont_generate_full_grid: cmd_str += '--dont_generate_full_grid ' if (hpsearch_dir is not None): cmd_str += (('--out_dir=%s --force_out_dir ' % hpsearch_dir) + '--dont_force_new_dir ') cmd_str += (cluster_cmd_suffix + non_cluster_cmd_suffix) return cmd_str<|docstring|>Generate the command line for the hpsearch. Args: cmd_args: The command line arguments. num_seeds (int): Number of searches. grid_config (str): Location of search grid. hpsearch_dir (str, optional): Where the hpsearch should write its results to. Returns: (str): The command line to be executed.<|endoftext|>
4ee53bf97b210b5ab754d4d724247207d0f3efd40582bd38a560eef54f306aa1
def write_seeds_summary(results_dir, summary_keys, summary_sem, summary_precs, ret_seeds=False, summary_fn=None, seeds_summary_fn='seeds_summary_text.txt'): 'Write the MEAN and STD (resp. SEM) while aggregating all seeds to text\n file.\n\n Args:\n results_dir (str): The results directory.\n summary_keys (list): See argument ``summary_keys`` of function\n :func:`run`.\n summary_sem (bool): See argument ``summary_sem`` of function\n :func:`run`.\n summary_precs (list or int, optional): See argument ``summary_precs`` of\n function :func:`run`.\n summary_fn (str, optional): If given, this will determine\n the name of the summary file within individual runs.\n seeds_summmary_fn (str, optional): The name to give to the summary\n file across all seeds.\n ret_seeds (bool, optional): If activated, the random seeds of all\n considered runs are returned as a list.\n ' random_seeds = [] if (summary_precs is None): summary_precs = 2 if isinstance(summary_precs, int): summary_precs = ([summary_precs] * len(summary_keys)) else: assert (len(summary_keys) == len(summary_precs)) score_dict = {} n_scores = 0 for k in summary_keys: score_dict[k] = [] seed_dirs = [] seed_dir_prefix = {} for (i, sim_dir) in enumerate(os.listdir(results_dir)): sim_path = os.path.join(results_dir, sim_dir) if (not os.path.isdir(sim_path)): continue try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(sim_path, i, summary_fn=summary_fn) except: warn(('Cannot read results from simulation "%s"!' % sim_dir)) continue has_finished = int(performance_dict['finished'][0]) if (not has_finished): warn(('Simulation "%s" did not finish!' % sim_dir)) continue n_scores += 1 for k in summary_keys: score_dict[k].append(float(performance_dict[k][0])) if ret_seeds: sim_config = get_single_run_config(sim_path) random_seeds.append(sim_config.random_seed) seed_dirs.append(sim_path) if (sim_dir.count('_') == 2): prefix = sim_dir[:sim_dir.rfind('_')] if (prefix not in seed_dir_prefix.keys()): seed_dir_prefix[prefix] = [sim_path, 1] else: seed_dir_prefix[prefix][1] += 1 else: seed_dir_prefix[sim_dir] = [sim_path, 1] original_seed_path = None nunique = 0 for (k, v) in seed_dir_prefix.items(): if (v[1] == 1): original_seed_path = v[0] nunique += 1 if (nunique > 1): original_seed_path = None if (n_scores == 0): raise RuntimeError('No results found!') score_means = {} score_devs = {} for k in summary_keys: score_means[k] = np.mean(score_dict[k]) score_devs[k] = np.std(score_dict[k]) if summary_sem: score_devs[k] /= np.sqrt(n_scores) filename = os.path.join(results_dir, seeds_summary_fn) with open(filename, 'w') as f: for (i, k) in enumerate(summary_keys): p = summary_precs[i] f.write(((((('%s (mean +/- %s): %.' + str(p)) + 'f +- %.') + str(p)) + 'f\n') % (k, ('sem' if summary_sem else 'std'), score_means[k], score_devs[k]))) f.write(('Number of seeds: %i \n\n' % n_scores)) f.write('Publication tables style: \n') f.write(('%s \n' % summary_keys)) tab_str = '' for (i, k) in enumerate(summary_keys): if (i > 0): tab_str += ' & ' p = summary_precs[i] tab_str += ((((('%.' + str(p)) + 'f $\\pm$ %.') + str(p)) + 'f ') % (score_means[k], score_devs[k])) f.write(('%s \n\n' % tab_str)) return (random_seeds if ret_seeds else None)
Write the MEAN and STD (resp. SEM) while aggregating all seeds to text file. Args: results_dir (str): The results directory. summary_keys (list): See argument ``summary_keys`` of function :func:`run`. summary_sem (bool): See argument ``summary_sem`` of function :func:`run`. summary_precs (list or int, optional): See argument ``summary_precs`` of function :func:`run`. summary_fn (str, optional): If given, this will determine the name of the summary file within individual runs. seeds_summmary_fn (str, optional): The name to give to the summary file across all seeds. ret_seeds (bool, optional): If activated, the random seeds of all considered runs are returned as a list.
hypnettorch/hpsearch/gather_random_seeds.py
write_seeds_summary
pennfranc/hypnettorch
31
python
def write_seeds_summary(results_dir, summary_keys, summary_sem, summary_precs, ret_seeds=False, summary_fn=None, seeds_summary_fn='seeds_summary_text.txt'): 'Write the MEAN and STD (resp. SEM) while aggregating all seeds to text\n file.\n\n Args:\n results_dir (str): The results directory.\n summary_keys (list): See argument ``summary_keys`` of function\n :func:`run`.\n summary_sem (bool): See argument ``summary_sem`` of function\n :func:`run`.\n summary_precs (list or int, optional): See argument ``summary_precs`` of\n function :func:`run`.\n summary_fn (str, optional): If given, this will determine\n the name of the summary file within individual runs.\n seeds_summmary_fn (str, optional): The name to give to the summary\n file across all seeds.\n ret_seeds (bool, optional): If activated, the random seeds of all\n considered runs are returned as a list.\n ' random_seeds = [] if (summary_precs is None): summary_precs = 2 if isinstance(summary_precs, int): summary_precs = ([summary_precs] * len(summary_keys)) else: assert (len(summary_keys) == len(summary_precs)) score_dict = {} n_scores = 0 for k in summary_keys: score_dict[k] = [] seed_dirs = [] seed_dir_prefix = {} for (i, sim_dir) in enumerate(os.listdir(results_dir)): sim_path = os.path.join(results_dir, sim_dir) if (not os.path.isdir(sim_path)): continue try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(sim_path, i, summary_fn=summary_fn) except: warn(('Cannot read results from simulation "%s"!' % sim_dir)) continue has_finished = int(performance_dict['finished'][0]) if (not has_finished): warn(('Simulation "%s" did not finish!' % sim_dir)) continue n_scores += 1 for k in summary_keys: score_dict[k].append(float(performance_dict[k][0])) if ret_seeds: sim_config = get_single_run_config(sim_path) random_seeds.append(sim_config.random_seed) seed_dirs.append(sim_path) if (sim_dir.count('_') == 2): prefix = sim_dir[:sim_dir.rfind('_')] if (prefix not in seed_dir_prefix.keys()): seed_dir_prefix[prefix] = [sim_path, 1] else: seed_dir_prefix[prefix][1] += 1 else: seed_dir_prefix[sim_dir] = [sim_path, 1] original_seed_path = None nunique = 0 for (k, v) in seed_dir_prefix.items(): if (v[1] == 1): original_seed_path = v[0] nunique += 1 if (nunique > 1): original_seed_path = None if (n_scores == 0): raise RuntimeError('No results found!') score_means = {} score_devs = {} for k in summary_keys: score_means[k] = np.mean(score_dict[k]) score_devs[k] = np.std(score_dict[k]) if summary_sem: score_devs[k] /= np.sqrt(n_scores) filename = os.path.join(results_dir, seeds_summary_fn) with open(filename, 'w') as f: for (i, k) in enumerate(summary_keys): p = summary_precs[i] f.write(((((('%s (mean +/- %s): %.' + str(p)) + 'f +- %.') + str(p)) + 'f\n') % (k, ('sem' if summary_sem else 'std'), score_means[k], score_devs[k]))) f.write(('Number of seeds: %i \n\n' % n_scores)) f.write('Publication tables style: \n') f.write(('%s \n' % summary_keys)) tab_str = for (i, k) in enumerate(summary_keys): if (i > 0): tab_str += ' & ' p = summary_precs[i] tab_str += ((((('%.' + str(p)) + 'f $\\pm$ %.') + str(p)) + 'f ') % (score_means[k], score_devs[k])) f.write(('%s \n\n' % tab_str)) return (random_seeds if ret_seeds else None)
def write_seeds_summary(results_dir, summary_keys, summary_sem, summary_precs, ret_seeds=False, summary_fn=None, seeds_summary_fn='seeds_summary_text.txt'): 'Write the MEAN and STD (resp. SEM) while aggregating all seeds to text\n file.\n\n Args:\n results_dir (str): The results directory.\n summary_keys (list): See argument ``summary_keys`` of function\n :func:`run`.\n summary_sem (bool): See argument ``summary_sem`` of function\n :func:`run`.\n summary_precs (list or int, optional): See argument ``summary_precs`` of\n function :func:`run`.\n summary_fn (str, optional): If given, this will determine\n the name of the summary file within individual runs.\n seeds_summmary_fn (str, optional): The name to give to the summary\n file across all seeds.\n ret_seeds (bool, optional): If activated, the random seeds of all\n considered runs are returned as a list.\n ' random_seeds = [] if (summary_precs is None): summary_precs = 2 if isinstance(summary_precs, int): summary_precs = ([summary_precs] * len(summary_keys)) else: assert (len(summary_keys) == len(summary_precs)) score_dict = {} n_scores = 0 for k in summary_keys: score_dict[k] = [] seed_dirs = [] seed_dir_prefix = {} for (i, sim_dir) in enumerate(os.listdir(results_dir)): sim_path = os.path.join(results_dir, sim_dir) if (not os.path.isdir(sim_path)): continue try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(sim_path, i, summary_fn=summary_fn) except: warn(('Cannot read results from simulation "%s"!' % sim_dir)) continue has_finished = int(performance_dict['finished'][0]) if (not has_finished): warn(('Simulation "%s" did not finish!' % sim_dir)) continue n_scores += 1 for k in summary_keys: score_dict[k].append(float(performance_dict[k][0])) if ret_seeds: sim_config = get_single_run_config(sim_path) random_seeds.append(sim_config.random_seed) seed_dirs.append(sim_path) if (sim_dir.count('_') == 2): prefix = sim_dir[:sim_dir.rfind('_')] if (prefix not in seed_dir_prefix.keys()): seed_dir_prefix[prefix] = [sim_path, 1] else: seed_dir_prefix[prefix][1] += 1 else: seed_dir_prefix[sim_dir] = [sim_path, 1] original_seed_path = None nunique = 0 for (k, v) in seed_dir_prefix.items(): if (v[1] == 1): original_seed_path = v[0] nunique += 1 if (nunique > 1): original_seed_path = None if (n_scores == 0): raise RuntimeError('No results found!') score_means = {} score_devs = {} for k in summary_keys: score_means[k] = np.mean(score_dict[k]) score_devs[k] = np.std(score_dict[k]) if summary_sem: score_devs[k] /= np.sqrt(n_scores) filename = os.path.join(results_dir, seeds_summary_fn) with open(filename, 'w') as f: for (i, k) in enumerate(summary_keys): p = summary_precs[i] f.write(((((('%s (mean +/- %s): %.' + str(p)) + 'f +- %.') + str(p)) + 'f\n') % (k, ('sem' if summary_sem else 'std'), score_means[k], score_devs[k]))) f.write(('Number of seeds: %i \n\n' % n_scores)) f.write('Publication tables style: \n') f.write(('%s \n' % summary_keys)) tab_str = for (i, k) in enumerate(summary_keys): if (i > 0): tab_str += ' & ' p = summary_precs[i] tab_str += ((((('%.' + str(p)) + 'f $\\pm$ %.') + str(p)) + 'f ') % (score_means[k], score_devs[k])) f.write(('%s \n\n' % tab_str)) return (random_seeds if ret_seeds else None)<|docstring|>Write the MEAN and STD (resp. SEM) while aggregating all seeds to text file. Args: results_dir (str): The results directory. summary_keys (list): See argument ``summary_keys`` of function :func:`run`. summary_sem (bool): See argument ``summary_sem`` of function :func:`run`. summary_precs (list or int, optional): See argument ``summary_precs`` of function :func:`run`. summary_fn (str, optional): If given, this will determine the name of the summary file within individual runs. seeds_summmary_fn (str, optional): The name to give to the summary file across all seeds. ret_seeds (bool, optional): If activated, the random seeds of all considered runs are returned as a list.<|endoftext|>
f37708db3eaa56455a9d22f49dd274716b720fda2db47b735fe10f9dd363ddde
def run(grid_module=None, results_dir='./out/random_seeds', config=None, ignore_kwds=None, forced_params=None, summary_keys=None, summary_sem=False, summary_precs=None, hpmod_path=None): "Run the script.\n\n Args:\n grid_module (str, optional): Name of the reference module which contains\n the hyperparameter search config that can be modified to gather\n random seeds.\n results_dir (str, optional): The path where the hpsearch should store\n its results.\n config: The Namespace object containing argument names and values.\n If provided, all random seeds will be gathered from zero, with no\n reference run.\n ignore_kwds (list, optional): A list of keywords in the config file\n to exclude from the grid.\n forced_params (dict, optional): Dict of key-value pairs specifying\n hyperparameter values that should be fixed across runs.\n summary_keys (list, optional): If provided, those mean and std of those\n summary keys will be written by function\n :func:`write_seeds_summary`. Otherwise, the performance key defined\n in ``grid_module`` will be used.\n summary_sem (bool): Whether SEM or SD should be calculated in function\n :func:`write_seeds_summary`.\n summary_precs (list or int, optional): The precision with which the\n summary statistics according to ``summary_keys`` should be listed.\n hpmod_path (str, optional): If the hpsearch doesn't reside in the same\n directory as the calling script, then we need to know from where to\n start the hpsearch.\n " if (ignore_kwds is None): ignore_kwds = [] if (forced_params is None): forced_params = {} parser = argparse.ArgumentParser(description='Gathering random seeds for the specified experiment.') parser.add_argument('--seeds_dir', type=str, default='', help=(((((('If provided, all other arguments (except ' + '"grid_module") are ignored! ') + 'This is supposed to be the output folder of a ') + 'random seed gathering experiment. If provided, ') + 'the results (for different seeds) within this ') + 'directory are gathered and written to a human-') + 'readible text file.')) parser.add_argument('--run_dir', type=str, default='', help=(((((('The output directory of a simulation or a ' + 'hyperparameter search. For single runs, the configuration will be ') + 'loaded and run with different seeds.') + 'For multiple runs, i.e. results of ') + 'hyperparameter searches, the configuration ') + 'leading to the best performance will be ') + 'selected and run with different seeds.')) parser.add_argument('--config_name', type=str, default='hpsearch_random_seeds', help=(((('A name for this call of gathering random ' + 'seeds. As multiple gatherings might be running ') + 'in parallel, it is important that this name is ') + 'unique name for each experiment. ') + 'Default: %(default)s.')) parser.add_argument('--grid_module', type=str, default=grid_module, help=(('See CLI argument "grid_module" of ' + 'hyperparameter search script "hpsearch". ') + ('Default: %(default)s.' if (grid_module is not None) else ''))) parser.add_argument('--num_seeds', type=int, default=10, help='The number of different random seeds.') parser.add_argument('--seeds_list', type=str, default='', help=('The list of seeds to use. If specified, ' + '"num_seeds" will be ignored.')) parser.add_argument('--vary_data_seed', action='store_true', help=(('If activated, "data_random_seed"s are set ' + 'equal to "random_seed"s. Otherwise only ') + '"random_seed"s are varied.')) parser.add_argument('--start_gathering', action='store_true', help=('If activated, the actual gathering of random ' + 'seeds is started via the "hpsearch.py" script.')) hpgroup = parser.add_argument_group('Hpsearch call options') hpgroup.add_argument('--hps_num_hours', type=int, metavar='N', default=24, help=((('If "run_cluster" is activated, then this ' + 'option determines the maximum number of hours ') + 'the entire search may run on the cluster. ') + 'Default: %(default)s.')) hpgroup.add_argument('--hps_resources', type=str, default='"rusage[mem=8000]"', help=(((('If "run_cluster" is activated and "scheduler" ' + 'is "lsf", then this option determines the ') + 'resources assigned to the entire ') + 'hyperparameter search (option -R of bsub). ') + 'Default: %(default)s.')) hpgroup.add_argument('--hps_slurm_mem', type=str, default='8G', help=('See option "slum_mem". This argument effects ' + 'hyperparameter search itself. Default: %(default)s.')) rsgroup = parser.add_argument_group('Random seed hpsearch options') hpsearch.hpsearch_cli_arguments(rsgroup, show_out_dir=False, show_grid_module=False) cmd_args = parser.parse_args() grid_module = cmd_args.grid_module if (grid_module is None): raise ValueError('"grid_module" needs to be specified.') grid_module = importlib.import_module(grid_module) hpsearch._read_config(grid_module, require_perf_eval_handle=True) if (summary_keys is None): summary_keys = [hpsearch._PERFORMANCE_KEY] if len(cmd_args.seeds_dir): print('Writing seed summary ...') write_seeds_summary(cmd_args.seeds_dir, summary_keys, summary_sem, summary_precs) exit(0) if (len(cmd_args.seeds_list) > 0): seeds_list = misc.str_to_ints(cmd_args.seeds_list) cmd_args.num_seeds = len(seeds_list) else: seeds_list = list(range(cmd_args.num_seeds)) if ((config is not None) and (cmd_args.run_dir != '')): raise ValueError(('"run_dir" may not be specified if configuration ' + 'is provided directly.')) hpsearch_dir = None if (config is None): if (not os.path.exists(cmd_args.run_dir)): raise_error = True if (cmd_args.run_cwd != ''): tmp_dir = os.path.join(cmd_args.run_cwd, cmd_args.run_dir) if os.path.exists(tmp_dir): cmd_args.run_dir = tmp_dir raise_error = False if raise_error: raise ValueError(('Directory "%s" does not exist!' % cmd_args.run_dir)) single_run = False if os.path.exists(os.path.join(cmd_args.run_dir, 'config.pickle')): single_run = True if single_run: config = get_single_run_config(cmd_args.run_dir) run_dir = cmd_args.run_dir else: (config, run_dir) = get_best_hpsearch_config(cmd_args.run_dir) try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(run_dir, (- 1)) has_finished = int(performance_dict['finished'][0]) if (not has_finished): raise Exception() use_run = True except: use_run = False if use_run: run_dir = os.path.normpath(run_dir) if (not os.path.isabs(results_dir)): if os.path.isdir(cmd_args.run_cwd): results_dir = os.path.join(cmd_args.run_cwd, results_dir) results_dir = os.path.abspath(results_dir) hpsearch_dir = os.path.join(results_dir, os.path.basename(run_dir)) if os.path.exists(hpsearch_dir): warn(('Folder "%s" already exists.' % hpsearch_dir)) print('Attempting to aggregate random seed results ...') gathered_seeds = write_seeds_summary(hpsearch_dir, summary_keys, summary_sem, summary_precs, ret_seeds=True) if (len(gathered_seeds) >= len(seeds_list)): print('Already enough seeds have been gathered!') exit(0) for gs in gathered_seeds: if (gs in seeds_list): seeds_list.remove(gs) else: ignored_seed = seeds_list.pop() if (len(cmd_args.seeds_list) > 0): print((('Seed %d is ignored as seed %d already ' % (ignored_seed, gs)) + 'exists.')) else: os.makedirs(hpsearch_dir) shutil.copytree(run_dir, os.path.join(hpsearch_dir, os.path.basename(run_dir))) if (config.random_seed in seeds_list): seeds_list.remove(config.random_seed) else: ignored_seed = seeds_list.pop() if (len(cmd_args.seeds_list) > 0): print(('Seed %d is ignored as seed %d already exists.' % (ignored_seed, config.random_seed))) print(('%d random seeds will be gathered!' % len(seeds_list))) if (hpsearch._OUT_ARG not in ignore_kwds): ignore_kwds.append(hpsearch._OUT_ARG) for kwd in ignore_kwds: delattr(config, kwd) if (len(forced_params.keys()) > 0): for (kwd, value) in forced_params.items(): setattr(config, kwd, value) (config_dn, config_bn) = os.path.split(cmd_args.config_name) if (len(config_dn) == 0): config_dn = tempfile.gettempdir() else: config_dn = os.path.abspath(config_dn) config_fn_prefix = os.path.splitext(config_bn)[0] config_name = os.path.join(config_dn, (config_fn_prefix + '.pickle')) if os.path.exists(config_name): if (len(config_dn) > 0): overwrite = input((('The config file "%s" ' % config_name) + 'already exists! Do you want to overwrite the file? [y/n] ')) if (not (overwrite in ['yes', 'y', 'Y'])): exit(1) else: config_name_temp = tempfile.NamedTemporaryFile(prefix=config_fn_prefix, suffix='.pickle') print(('Search grid "%s" already exists, using name "%s" instead!' % (config_name, config_name_temp.name))) config_name = config_name_temp.name config_name_temp.close() (grid, conditions) = build_grid_and_conditions(cmd_args, config, seeds_list) rseed_config = {'grid': grid, 'conditions': conditions} with open(config_name, 'wb') as f: pickle.dump(rseed_config, f) if cmd_args.start_gathering: cmd_str = get_hpsearch_call(cmd_args, len(seeds_list), config_name, hpsearch_dir=hpsearch_dir) print(cmd_str) if (hpmod_path is not None): backup_curr_path = os.getcwd() os.chdir(hpmod_path) if (cmd_args.run_cluster and (cmd_args.scheduler == 'slurm')): job_script_fn = hpsearch._write_slurm_script(Namespace(**{'num_hours': cmd_args.hps_num_hours, 'slurm_mem': cmd_args.hps_slurm_mem, 'slurm_gres': 'gpu:0', 'slurm_partition': cmd_args.slurm_partition, 'slurm_qos': cmd_args.slurm_qos, 'slurm_constraint': cmd_args.slurm_constraint}), cmd_str, 'random_seeds') cmd_str = ('sbatch %s' % job_script_fn) print(('We will execute command "%s".' % cmd_str)) print('Starting gathering random seeds...') ret = call(cmd_str, shell=True, executable='/bin/bash') print(('Call finished with return code %d.' % ret)) if (hpmod_path is not None): os.chdir(backup_curr_path) if ((not cmd_args.run_cluster) and (hpsearch_dir is not None)): write_seeds_summary(hpsearch_dir, summary_keys, summary_sem, summary_precs) print('Random seed gathering finished successfully!') exit(0) print((hpsearch_dir is None)) if (hpsearch_dir is not None): print((('IMPORTANT: At least one random seed has already been ' + 'gathered! Please ensure that the hpsearch forces the correct ') + 'output path.')) print('Below is a possible hpsearch call:') call_appendix = '' if (hpsearch_dir is not None): call_appendix = ('--force_out_dir --dont_force_new_dir ' + ('--out_dir=%s' % hpsearch_dir)) print() print(('python3 hpsearch.py --grid_module=%s --grid_config=%s %s' % (cmd_args.grid_module, config_name, call_appendix))) print() if (hpsearch_dir is None): print(('Below is the "grid_module" name and the path to the ' + '"grid_config".')) print(cmd_args.grid_module, config_name) else: print((('Below is the "grid_module" name, the path to the ' + '"grid_config" and the output path that should be used for the ') + 'hpsearch.')) print(cmd_args.grid_module, config_name, hpsearch_dir)
Run the script. Args: grid_module (str, optional): Name of the reference module which contains the hyperparameter search config that can be modified to gather random seeds. results_dir (str, optional): The path where the hpsearch should store its results. config: The Namespace object containing argument names and values. If provided, all random seeds will be gathered from zero, with no reference run. ignore_kwds (list, optional): A list of keywords in the config file to exclude from the grid. forced_params (dict, optional): Dict of key-value pairs specifying hyperparameter values that should be fixed across runs. summary_keys (list, optional): If provided, those mean and std of those summary keys will be written by function :func:`write_seeds_summary`. Otherwise, the performance key defined in ``grid_module`` will be used. summary_sem (bool): Whether SEM or SD should be calculated in function :func:`write_seeds_summary`. summary_precs (list or int, optional): The precision with which the summary statistics according to ``summary_keys`` should be listed. hpmod_path (str, optional): If the hpsearch doesn't reside in the same directory as the calling script, then we need to know from where to start the hpsearch.
hypnettorch/hpsearch/gather_random_seeds.py
run
pennfranc/hypnettorch
31
python
def run(grid_module=None, results_dir='./out/random_seeds', config=None, ignore_kwds=None, forced_params=None, summary_keys=None, summary_sem=False, summary_precs=None, hpmod_path=None): "Run the script.\n\n Args:\n grid_module (str, optional): Name of the reference module which contains\n the hyperparameter search config that can be modified to gather\n random seeds.\n results_dir (str, optional): The path where the hpsearch should store\n its results.\n config: The Namespace object containing argument names and values.\n If provided, all random seeds will be gathered from zero, with no\n reference run.\n ignore_kwds (list, optional): A list of keywords in the config file\n to exclude from the grid.\n forced_params (dict, optional): Dict of key-value pairs specifying\n hyperparameter values that should be fixed across runs.\n summary_keys (list, optional): If provided, those mean and std of those\n summary keys will be written by function\n :func:`write_seeds_summary`. Otherwise, the performance key defined\n in ``grid_module`` will be used.\n summary_sem (bool): Whether SEM or SD should be calculated in function\n :func:`write_seeds_summary`.\n summary_precs (list or int, optional): The precision with which the\n summary statistics according to ``summary_keys`` should be listed.\n hpmod_path (str, optional): If the hpsearch doesn't reside in the same\n directory as the calling script, then we need to know from where to\n start the hpsearch.\n " if (ignore_kwds is None): ignore_kwds = [] if (forced_params is None): forced_params = {} parser = argparse.ArgumentParser(description='Gathering random seeds for the specified experiment.') parser.add_argument('--seeds_dir', type=str, default=, help=(((((('If provided, all other arguments (except ' + '"grid_module") are ignored! ') + 'This is supposed to be the output folder of a ') + 'random seed gathering experiment. If provided, ') + 'the results (for different seeds) within this ') + 'directory are gathered and written to a human-') + 'readible text file.')) parser.add_argument('--run_dir', type=str, default=, help=(((((('The output directory of a simulation or a ' + 'hyperparameter search. For single runs, the configuration will be ') + 'loaded and run with different seeds.') + 'For multiple runs, i.e. results of ') + 'hyperparameter searches, the configuration ') + 'leading to the best performance will be ') + 'selected and run with different seeds.')) parser.add_argument('--config_name', type=str, default='hpsearch_random_seeds', help=(((('A name for this call of gathering random ' + 'seeds. As multiple gatherings might be running ') + 'in parallel, it is important that this name is ') + 'unique name for each experiment. ') + 'Default: %(default)s.')) parser.add_argument('--grid_module', type=str, default=grid_module, help=(('See CLI argument "grid_module" of ' + 'hyperparameter search script "hpsearch". ') + ('Default: %(default)s.' if (grid_module is not None) else ))) parser.add_argument('--num_seeds', type=int, default=10, help='The number of different random seeds.') parser.add_argument('--seeds_list', type=str, default=, help=('The list of seeds to use. If specified, ' + '"num_seeds" will be ignored.')) parser.add_argument('--vary_data_seed', action='store_true', help=(('If activated, "data_random_seed"s are set ' + 'equal to "random_seed"s. Otherwise only ') + '"random_seed"s are varied.')) parser.add_argument('--start_gathering', action='store_true', help=('If activated, the actual gathering of random ' + 'seeds is started via the "hpsearch.py" script.')) hpgroup = parser.add_argument_group('Hpsearch call options') hpgroup.add_argument('--hps_num_hours', type=int, metavar='N', default=24, help=((('If "run_cluster" is activated, then this ' + 'option determines the maximum number of hours ') + 'the entire search may run on the cluster. ') + 'Default: %(default)s.')) hpgroup.add_argument('--hps_resources', type=str, default='"rusage[mem=8000]"', help=(((('If "run_cluster" is activated and "scheduler" ' + 'is "lsf", then this option determines the ') + 'resources assigned to the entire ') + 'hyperparameter search (option -R of bsub). ') + 'Default: %(default)s.')) hpgroup.add_argument('--hps_slurm_mem', type=str, default='8G', help=('See option "slum_mem". This argument effects ' + 'hyperparameter search itself. Default: %(default)s.')) rsgroup = parser.add_argument_group('Random seed hpsearch options') hpsearch.hpsearch_cli_arguments(rsgroup, show_out_dir=False, show_grid_module=False) cmd_args = parser.parse_args() grid_module = cmd_args.grid_module if (grid_module is None): raise ValueError('"grid_module" needs to be specified.') grid_module = importlib.import_module(grid_module) hpsearch._read_config(grid_module, require_perf_eval_handle=True) if (summary_keys is None): summary_keys = [hpsearch._PERFORMANCE_KEY] if len(cmd_args.seeds_dir): print('Writing seed summary ...') write_seeds_summary(cmd_args.seeds_dir, summary_keys, summary_sem, summary_precs) exit(0) if (len(cmd_args.seeds_list) > 0): seeds_list = misc.str_to_ints(cmd_args.seeds_list) cmd_args.num_seeds = len(seeds_list) else: seeds_list = list(range(cmd_args.num_seeds)) if ((config is not None) and (cmd_args.run_dir != )): raise ValueError(('"run_dir" may not be specified if configuration ' + 'is provided directly.')) hpsearch_dir = None if (config is None): if (not os.path.exists(cmd_args.run_dir)): raise_error = True if (cmd_args.run_cwd != ): tmp_dir = os.path.join(cmd_args.run_cwd, cmd_args.run_dir) if os.path.exists(tmp_dir): cmd_args.run_dir = tmp_dir raise_error = False if raise_error: raise ValueError(('Directory "%s" does not exist!' % cmd_args.run_dir)) single_run = False if os.path.exists(os.path.join(cmd_args.run_dir, 'config.pickle')): single_run = True if single_run: config = get_single_run_config(cmd_args.run_dir) run_dir = cmd_args.run_dir else: (config, run_dir) = get_best_hpsearch_config(cmd_args.run_dir) try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(run_dir, (- 1)) has_finished = int(performance_dict['finished'][0]) if (not has_finished): raise Exception() use_run = True except: use_run = False if use_run: run_dir = os.path.normpath(run_dir) if (not os.path.isabs(results_dir)): if os.path.isdir(cmd_args.run_cwd): results_dir = os.path.join(cmd_args.run_cwd, results_dir) results_dir = os.path.abspath(results_dir) hpsearch_dir = os.path.join(results_dir, os.path.basename(run_dir)) if os.path.exists(hpsearch_dir): warn(('Folder "%s" already exists.' % hpsearch_dir)) print('Attempting to aggregate random seed results ...') gathered_seeds = write_seeds_summary(hpsearch_dir, summary_keys, summary_sem, summary_precs, ret_seeds=True) if (len(gathered_seeds) >= len(seeds_list)): print('Already enough seeds have been gathered!') exit(0) for gs in gathered_seeds: if (gs in seeds_list): seeds_list.remove(gs) else: ignored_seed = seeds_list.pop() if (len(cmd_args.seeds_list) > 0): print((('Seed %d is ignored as seed %d already ' % (ignored_seed, gs)) + 'exists.')) else: os.makedirs(hpsearch_dir) shutil.copytree(run_dir, os.path.join(hpsearch_dir, os.path.basename(run_dir))) if (config.random_seed in seeds_list): seeds_list.remove(config.random_seed) else: ignored_seed = seeds_list.pop() if (len(cmd_args.seeds_list) > 0): print(('Seed %d is ignored as seed %d already exists.' % (ignored_seed, config.random_seed))) print(('%d random seeds will be gathered!' % len(seeds_list))) if (hpsearch._OUT_ARG not in ignore_kwds): ignore_kwds.append(hpsearch._OUT_ARG) for kwd in ignore_kwds: delattr(config, kwd) if (len(forced_params.keys()) > 0): for (kwd, value) in forced_params.items(): setattr(config, kwd, value) (config_dn, config_bn) = os.path.split(cmd_args.config_name) if (len(config_dn) == 0): config_dn = tempfile.gettempdir() else: config_dn = os.path.abspath(config_dn) config_fn_prefix = os.path.splitext(config_bn)[0] config_name = os.path.join(config_dn, (config_fn_prefix + '.pickle')) if os.path.exists(config_name): if (len(config_dn) > 0): overwrite = input((('The config file "%s" ' % config_name) + 'already exists! Do you want to overwrite the file? [y/n] ')) if (not (overwrite in ['yes', 'y', 'Y'])): exit(1) else: config_name_temp = tempfile.NamedTemporaryFile(prefix=config_fn_prefix, suffix='.pickle') print(('Search grid "%s" already exists, using name "%s" instead!' % (config_name, config_name_temp.name))) config_name = config_name_temp.name config_name_temp.close() (grid, conditions) = build_grid_and_conditions(cmd_args, config, seeds_list) rseed_config = {'grid': grid, 'conditions': conditions} with open(config_name, 'wb') as f: pickle.dump(rseed_config, f) if cmd_args.start_gathering: cmd_str = get_hpsearch_call(cmd_args, len(seeds_list), config_name, hpsearch_dir=hpsearch_dir) print(cmd_str) if (hpmod_path is not None): backup_curr_path = os.getcwd() os.chdir(hpmod_path) if (cmd_args.run_cluster and (cmd_args.scheduler == 'slurm')): job_script_fn = hpsearch._write_slurm_script(Namespace(**{'num_hours': cmd_args.hps_num_hours, 'slurm_mem': cmd_args.hps_slurm_mem, 'slurm_gres': 'gpu:0', 'slurm_partition': cmd_args.slurm_partition, 'slurm_qos': cmd_args.slurm_qos, 'slurm_constraint': cmd_args.slurm_constraint}), cmd_str, 'random_seeds') cmd_str = ('sbatch %s' % job_script_fn) print(('We will execute command "%s".' % cmd_str)) print('Starting gathering random seeds...') ret = call(cmd_str, shell=True, executable='/bin/bash') print(('Call finished with return code %d.' % ret)) if (hpmod_path is not None): os.chdir(backup_curr_path) if ((not cmd_args.run_cluster) and (hpsearch_dir is not None)): write_seeds_summary(hpsearch_dir, summary_keys, summary_sem, summary_precs) print('Random seed gathering finished successfully!') exit(0) print((hpsearch_dir is None)) if (hpsearch_dir is not None): print((('IMPORTANT: At least one random seed has already been ' + 'gathered! Please ensure that the hpsearch forces the correct ') + 'output path.')) print('Below is a possible hpsearch call:') call_appendix = if (hpsearch_dir is not None): call_appendix = ('--force_out_dir --dont_force_new_dir ' + ('--out_dir=%s' % hpsearch_dir)) print() print(('python3 hpsearch.py --grid_module=%s --grid_config=%s %s' % (cmd_args.grid_module, config_name, call_appendix))) print() if (hpsearch_dir is None): print(('Below is the "grid_module" name and the path to the ' + '"grid_config".')) print(cmd_args.grid_module, config_name) else: print((('Below is the "grid_module" name, the path to the ' + '"grid_config" and the output path that should be used for the ') + 'hpsearch.')) print(cmd_args.grid_module, config_name, hpsearch_dir)
def run(grid_module=None, results_dir='./out/random_seeds', config=None, ignore_kwds=None, forced_params=None, summary_keys=None, summary_sem=False, summary_precs=None, hpmod_path=None): "Run the script.\n\n Args:\n grid_module (str, optional): Name of the reference module which contains\n the hyperparameter search config that can be modified to gather\n random seeds.\n results_dir (str, optional): The path where the hpsearch should store\n its results.\n config: The Namespace object containing argument names and values.\n If provided, all random seeds will be gathered from zero, with no\n reference run.\n ignore_kwds (list, optional): A list of keywords in the config file\n to exclude from the grid.\n forced_params (dict, optional): Dict of key-value pairs specifying\n hyperparameter values that should be fixed across runs.\n summary_keys (list, optional): If provided, those mean and std of those\n summary keys will be written by function\n :func:`write_seeds_summary`. Otherwise, the performance key defined\n in ``grid_module`` will be used.\n summary_sem (bool): Whether SEM or SD should be calculated in function\n :func:`write_seeds_summary`.\n summary_precs (list or int, optional): The precision with which the\n summary statistics according to ``summary_keys`` should be listed.\n hpmod_path (str, optional): If the hpsearch doesn't reside in the same\n directory as the calling script, then we need to know from where to\n start the hpsearch.\n " if (ignore_kwds is None): ignore_kwds = [] if (forced_params is None): forced_params = {} parser = argparse.ArgumentParser(description='Gathering random seeds for the specified experiment.') parser.add_argument('--seeds_dir', type=str, default=, help=(((((('If provided, all other arguments (except ' + '"grid_module") are ignored! ') + 'This is supposed to be the output folder of a ') + 'random seed gathering experiment. If provided, ') + 'the results (for different seeds) within this ') + 'directory are gathered and written to a human-') + 'readible text file.')) parser.add_argument('--run_dir', type=str, default=, help=(((((('The output directory of a simulation or a ' + 'hyperparameter search. For single runs, the configuration will be ') + 'loaded and run with different seeds.') + 'For multiple runs, i.e. results of ') + 'hyperparameter searches, the configuration ') + 'leading to the best performance will be ') + 'selected and run with different seeds.')) parser.add_argument('--config_name', type=str, default='hpsearch_random_seeds', help=(((('A name for this call of gathering random ' + 'seeds. As multiple gatherings might be running ') + 'in parallel, it is important that this name is ') + 'unique name for each experiment. ') + 'Default: %(default)s.')) parser.add_argument('--grid_module', type=str, default=grid_module, help=(('See CLI argument "grid_module" of ' + 'hyperparameter search script "hpsearch". ') + ('Default: %(default)s.' if (grid_module is not None) else ))) parser.add_argument('--num_seeds', type=int, default=10, help='The number of different random seeds.') parser.add_argument('--seeds_list', type=str, default=, help=('The list of seeds to use. If specified, ' + '"num_seeds" will be ignored.')) parser.add_argument('--vary_data_seed', action='store_true', help=(('If activated, "data_random_seed"s are set ' + 'equal to "random_seed"s. Otherwise only ') + '"random_seed"s are varied.')) parser.add_argument('--start_gathering', action='store_true', help=('If activated, the actual gathering of random ' + 'seeds is started via the "hpsearch.py" script.')) hpgroup = parser.add_argument_group('Hpsearch call options') hpgroup.add_argument('--hps_num_hours', type=int, metavar='N', default=24, help=((('If "run_cluster" is activated, then this ' + 'option determines the maximum number of hours ') + 'the entire search may run on the cluster. ') + 'Default: %(default)s.')) hpgroup.add_argument('--hps_resources', type=str, default='"rusage[mem=8000]"', help=(((('If "run_cluster" is activated and "scheduler" ' + 'is "lsf", then this option determines the ') + 'resources assigned to the entire ') + 'hyperparameter search (option -R of bsub). ') + 'Default: %(default)s.')) hpgroup.add_argument('--hps_slurm_mem', type=str, default='8G', help=('See option "slum_mem". This argument effects ' + 'hyperparameter search itself. Default: %(default)s.')) rsgroup = parser.add_argument_group('Random seed hpsearch options') hpsearch.hpsearch_cli_arguments(rsgroup, show_out_dir=False, show_grid_module=False) cmd_args = parser.parse_args() grid_module = cmd_args.grid_module if (grid_module is None): raise ValueError('"grid_module" needs to be specified.') grid_module = importlib.import_module(grid_module) hpsearch._read_config(grid_module, require_perf_eval_handle=True) if (summary_keys is None): summary_keys = [hpsearch._PERFORMANCE_KEY] if len(cmd_args.seeds_dir): print('Writing seed summary ...') write_seeds_summary(cmd_args.seeds_dir, summary_keys, summary_sem, summary_precs) exit(0) if (len(cmd_args.seeds_list) > 0): seeds_list = misc.str_to_ints(cmd_args.seeds_list) cmd_args.num_seeds = len(seeds_list) else: seeds_list = list(range(cmd_args.num_seeds)) if ((config is not None) and (cmd_args.run_dir != )): raise ValueError(('"run_dir" may not be specified if configuration ' + 'is provided directly.')) hpsearch_dir = None if (config is None): if (not os.path.exists(cmd_args.run_dir)): raise_error = True if (cmd_args.run_cwd != ): tmp_dir = os.path.join(cmd_args.run_cwd, cmd_args.run_dir) if os.path.exists(tmp_dir): cmd_args.run_dir = tmp_dir raise_error = False if raise_error: raise ValueError(('Directory "%s" does not exist!' % cmd_args.run_dir)) single_run = False if os.path.exists(os.path.join(cmd_args.run_dir, 'config.pickle')): single_run = True if single_run: config = get_single_run_config(cmd_args.run_dir) run_dir = cmd_args.run_dir else: (config, run_dir) = get_best_hpsearch_config(cmd_args.run_dir) try: performance_dict = hpsearch._SUMMARY_PARSER_HANDLE(run_dir, (- 1)) has_finished = int(performance_dict['finished'][0]) if (not has_finished): raise Exception() use_run = True except: use_run = False if use_run: run_dir = os.path.normpath(run_dir) if (not os.path.isabs(results_dir)): if os.path.isdir(cmd_args.run_cwd): results_dir = os.path.join(cmd_args.run_cwd, results_dir) results_dir = os.path.abspath(results_dir) hpsearch_dir = os.path.join(results_dir, os.path.basename(run_dir)) if os.path.exists(hpsearch_dir): warn(('Folder "%s" already exists.' % hpsearch_dir)) print('Attempting to aggregate random seed results ...') gathered_seeds = write_seeds_summary(hpsearch_dir, summary_keys, summary_sem, summary_precs, ret_seeds=True) if (len(gathered_seeds) >= len(seeds_list)): print('Already enough seeds have been gathered!') exit(0) for gs in gathered_seeds: if (gs in seeds_list): seeds_list.remove(gs) else: ignored_seed = seeds_list.pop() if (len(cmd_args.seeds_list) > 0): print((('Seed %d is ignored as seed %d already ' % (ignored_seed, gs)) + 'exists.')) else: os.makedirs(hpsearch_dir) shutil.copytree(run_dir, os.path.join(hpsearch_dir, os.path.basename(run_dir))) if (config.random_seed in seeds_list): seeds_list.remove(config.random_seed) else: ignored_seed = seeds_list.pop() if (len(cmd_args.seeds_list) > 0): print(('Seed %d is ignored as seed %d already exists.' % (ignored_seed, config.random_seed))) print(('%d random seeds will be gathered!' % len(seeds_list))) if (hpsearch._OUT_ARG not in ignore_kwds): ignore_kwds.append(hpsearch._OUT_ARG) for kwd in ignore_kwds: delattr(config, kwd) if (len(forced_params.keys()) > 0): for (kwd, value) in forced_params.items(): setattr(config, kwd, value) (config_dn, config_bn) = os.path.split(cmd_args.config_name) if (len(config_dn) == 0): config_dn = tempfile.gettempdir() else: config_dn = os.path.abspath(config_dn) config_fn_prefix = os.path.splitext(config_bn)[0] config_name = os.path.join(config_dn, (config_fn_prefix + '.pickle')) if os.path.exists(config_name): if (len(config_dn) > 0): overwrite = input((('The config file "%s" ' % config_name) + 'already exists! Do you want to overwrite the file? [y/n] ')) if (not (overwrite in ['yes', 'y', 'Y'])): exit(1) else: config_name_temp = tempfile.NamedTemporaryFile(prefix=config_fn_prefix, suffix='.pickle') print(('Search grid "%s" already exists, using name "%s" instead!' % (config_name, config_name_temp.name))) config_name = config_name_temp.name config_name_temp.close() (grid, conditions) = build_grid_and_conditions(cmd_args, config, seeds_list) rseed_config = {'grid': grid, 'conditions': conditions} with open(config_name, 'wb') as f: pickle.dump(rseed_config, f) if cmd_args.start_gathering: cmd_str = get_hpsearch_call(cmd_args, len(seeds_list), config_name, hpsearch_dir=hpsearch_dir) print(cmd_str) if (hpmod_path is not None): backup_curr_path = os.getcwd() os.chdir(hpmod_path) if (cmd_args.run_cluster and (cmd_args.scheduler == 'slurm')): job_script_fn = hpsearch._write_slurm_script(Namespace(**{'num_hours': cmd_args.hps_num_hours, 'slurm_mem': cmd_args.hps_slurm_mem, 'slurm_gres': 'gpu:0', 'slurm_partition': cmd_args.slurm_partition, 'slurm_qos': cmd_args.slurm_qos, 'slurm_constraint': cmd_args.slurm_constraint}), cmd_str, 'random_seeds') cmd_str = ('sbatch %s' % job_script_fn) print(('We will execute command "%s".' % cmd_str)) print('Starting gathering random seeds...') ret = call(cmd_str, shell=True, executable='/bin/bash') print(('Call finished with return code %d.' % ret)) if (hpmod_path is not None): os.chdir(backup_curr_path) if ((not cmd_args.run_cluster) and (hpsearch_dir is not None)): write_seeds_summary(hpsearch_dir, summary_keys, summary_sem, summary_precs) print('Random seed gathering finished successfully!') exit(0) print((hpsearch_dir is None)) if (hpsearch_dir is not None): print((('IMPORTANT: At least one random seed has already been ' + 'gathered! Please ensure that the hpsearch forces the correct ') + 'output path.')) print('Below is a possible hpsearch call:') call_appendix = if (hpsearch_dir is not None): call_appendix = ('--force_out_dir --dont_force_new_dir ' + ('--out_dir=%s' % hpsearch_dir)) print() print(('python3 hpsearch.py --grid_module=%s --grid_config=%s %s' % (cmd_args.grid_module, config_name, call_appendix))) print() if (hpsearch_dir is None): print(('Below is the "grid_module" name and the path to the ' + '"grid_config".')) print(cmd_args.grid_module, config_name) else: print((('Below is the "grid_module" name, the path to the ' + '"grid_config" and the output path that should be used for the ') + 'hpsearch.')) print(cmd_args.grid_module, config_name, hpsearch_dir)<|docstring|>Run the script. Args: grid_module (str, optional): Name of the reference module which contains the hyperparameter search config that can be modified to gather random seeds. results_dir (str, optional): The path where the hpsearch should store its results. config: The Namespace object containing argument names and values. If provided, all random seeds will be gathered from zero, with no reference run. ignore_kwds (list, optional): A list of keywords in the config file to exclude from the grid. forced_params (dict, optional): Dict of key-value pairs specifying hyperparameter values that should be fixed across runs. summary_keys (list, optional): If provided, those mean and std of those summary keys will be written by function :func:`write_seeds_summary`. Otherwise, the performance key defined in ``grid_module`` will be used. summary_sem (bool): Whether SEM or SD should be calculated in function :func:`write_seeds_summary`. summary_precs (list or int, optional): The precision with which the summary statistics according to ``summary_keys`` should be listed. hpmod_path (str, optional): If the hpsearch doesn't reside in the same directory as the calling script, then we need to know from where to start the hpsearch.<|endoftext|>
f4246c08450ca6fe2daed8261fd4a9d9915e6b94503dd42663449788dbc5970d
def get_domains_to_update_es_filter(): "\n Returns ES filter to filter domains that are never updated or\n domains that haven't been updated since a week or domains that\n have been updated within last week but have new form submissions\n in the last day.\n " last_week = (datetime.utcnow() - timedelta(days=7)) more_than_a_week_ago = filters.date_range('cp_last_updated', lt=last_week) less_than_a_week_ago = filters.date_range('cp_last_updated', gte=last_week) not_updated = filters.missing('cp_last_updated') domains_submitted_today = FormES().submitted(gte=(datetime.utcnow() - timedelta(days=1))).terms_aggregation('domain', 'domain').size(0).run().aggregations.domain.keys return filters.OR(not_updated, more_than_a_week_ago, filters.AND(less_than_a_week_ago, filters.term('name', domains_submitted_today)))
Returns ES filter to filter domains that are never updated or domains that haven't been updated since a week or domains that have been updated within last week but have new form submissions in the last day.
corehq/apps/reports/tasks.py
get_domains_to_update_es_filter
kkrampa/commcare-hq
1
python
def get_domains_to_update_es_filter(): "\n Returns ES filter to filter domains that are never updated or\n domains that haven't been updated since a week or domains that\n have been updated within last week but have new form submissions\n in the last day.\n " last_week = (datetime.utcnow() - timedelta(days=7)) more_than_a_week_ago = filters.date_range('cp_last_updated', lt=last_week) less_than_a_week_ago = filters.date_range('cp_last_updated', gte=last_week) not_updated = filters.missing('cp_last_updated') domains_submitted_today = FormES().submitted(gte=(datetime.utcnow() - timedelta(days=1))).terms_aggregation('domain', 'domain').size(0).run().aggregations.domain.keys return filters.OR(not_updated, more_than_a_week_ago, filters.AND(less_than_a_week_ago, filters.term('name', domains_submitted_today)))
def get_domains_to_update_es_filter(): "\n Returns ES filter to filter domains that are never updated or\n domains that haven't been updated since a week or domains that\n have been updated within last week but have new form submissions\n in the last day.\n " last_week = (datetime.utcnow() - timedelta(days=7)) more_than_a_week_ago = filters.date_range('cp_last_updated', lt=last_week) less_than_a_week_ago = filters.date_range('cp_last_updated', gte=last_week) not_updated = filters.missing('cp_last_updated') domains_submitted_today = FormES().submitted(gte=(datetime.utcnow() - timedelta(days=1))).terms_aggregation('domain', 'domain').size(0).run().aggregations.domain.keys return filters.OR(not_updated, more_than_a_week_ago, filters.AND(less_than_a_week_ago, filters.term('name', domains_submitted_today)))<|docstring|>Returns ES filter to filter domains that are never updated or domains that haven't been updated since a week or domains that have been updated within last week but have new form submissions in the last day.<|endoftext|>
be391f733777f9ae0dd4e4955c5abde3f76bf1feb4babc675fc1c8082ca41bd3
def _get_export_properties(export_id): '\n Return a list of strings corresponding to form questions that are\n included in the export.\n ' properties = set() if export_id: from corehq.apps.export.models import FormExportInstance export = FormExportInstance.get(export_id) for table in export.tables: for column in table.columns: if (column.selected and column.item): path_parts = [n.name for n in column.item.path] path_parts = (path_parts[1:] if (path_parts[0] == 'form') else path_parts) properties.add('-'.join(path_parts)) return properties
Return a list of strings corresponding to form questions that are included in the export.
corehq/apps/reports/tasks.py
_get_export_properties
kkrampa/commcare-hq
1
python
def _get_export_properties(export_id): '\n Return a list of strings corresponding to form questions that are\n included in the export.\n ' properties = set() if export_id: from corehq.apps.export.models import FormExportInstance export = FormExportInstance.get(export_id) for table in export.tables: for column in table.columns: if (column.selected and column.item): path_parts = [n.name for n in column.item.path] path_parts = (path_parts[1:] if (path_parts[0] == 'form') else path_parts) properties.add('-'.join(path_parts)) return properties
def _get_export_properties(export_id): '\n Return a list of strings corresponding to form questions that are\n included in the export.\n ' properties = set() if export_id: from corehq.apps.export.models import FormExportInstance export = FormExportInstance.get(export_id) for table in export.tables: for column in table.columns: if (column.selected and column.item): path_parts = [n.name for n in column.item.path] path_parts = (path_parts[1:] if (path_parts[0] == 'form') else path_parts) properties.add('-'.join(path_parts)) return properties<|docstring|>Return a list of strings corresponding to form questions that are included in the export.<|endoftext|>
ec2fe7c6668e3ffa633f4686b0a9a71cd5b08ab4dda34a3e887416ff15f92d73
def _extract_form_attachment_info(form, properties): '\n This is a helper function for build_form_multimedia_zip.\n Return a dict containing information about the given form and its relevant\n attachments\n ' def find_question_id(form, value): for (k, v) in six.iteritems(form): if isinstance(v, dict): ret = find_question_id(v, value) if ret: return ([k] + ret) elif isinstance(v, list): for repeat in v: ret = find_question_id(repeat, value) if ret: return ([k] + ret) elif (v == value): return [k] return None unknown_number = 0 case_blocks = extract_case_blocks(form.form_data) form_info = {'form': form, 'attachments': [], 'case_ids': {c['@case_id'] for c in case_blocks}, 'username': form.get_data('form/meta/username')} for (attachment_name, attachment) in six.iteritems(form.attachments): if hasattr(attachment, 'content_type'): content_type = attachment.content_type else: content_type = attachment['content_type'] if (content_type == 'text/xml'): continue try: question_id = six.text_type('-'.join(find_question_id(form.form_data, attachment_name))) except TypeError: question_id = ('unknown' + six.text_type(unknown_number)) unknown_number += 1 if ((not properties) or (question_id in properties)): extension = six.text_type(os.path.splitext(attachment_name)[1]) if hasattr(attachment, 'content_length'): size = attachment.content_length elif ('content_length' in attachment): size = attachment['content_length'] else: size = attachment['length'] form_info['attachments'].append({'size': size, 'name': attachment_name, 'question_id': question_id, 'extension': extension, 'timestamp': form.received_on.timetuple()}) return form_info
This is a helper function for build_form_multimedia_zip. Return a dict containing information about the given form and its relevant attachments
corehq/apps/reports/tasks.py
_extract_form_attachment_info
kkrampa/commcare-hq
1
python
def _extract_form_attachment_info(form, properties): '\n This is a helper function for build_form_multimedia_zip.\n Return a dict containing information about the given form and its relevant\n attachments\n ' def find_question_id(form, value): for (k, v) in six.iteritems(form): if isinstance(v, dict): ret = find_question_id(v, value) if ret: return ([k] + ret) elif isinstance(v, list): for repeat in v: ret = find_question_id(repeat, value) if ret: return ([k] + ret) elif (v == value): return [k] return None unknown_number = 0 case_blocks = extract_case_blocks(form.form_data) form_info = {'form': form, 'attachments': [], 'case_ids': {c['@case_id'] for c in case_blocks}, 'username': form.get_data('form/meta/username')} for (attachment_name, attachment) in six.iteritems(form.attachments): if hasattr(attachment, 'content_type'): content_type = attachment.content_type else: content_type = attachment['content_type'] if (content_type == 'text/xml'): continue try: question_id = six.text_type('-'.join(find_question_id(form.form_data, attachment_name))) except TypeError: question_id = ('unknown' + six.text_type(unknown_number)) unknown_number += 1 if ((not properties) or (question_id in properties)): extension = six.text_type(os.path.splitext(attachment_name)[1]) if hasattr(attachment, 'content_length'): size = attachment.content_length elif ('content_length' in attachment): size = attachment['content_length'] else: size = attachment['length'] form_info['attachments'].append({'size': size, 'name': attachment_name, 'question_id': question_id, 'extension': extension, 'timestamp': form.received_on.timetuple()}) return form_info
def _extract_form_attachment_info(form, properties): '\n This is a helper function for build_form_multimedia_zip.\n Return a dict containing information about the given form and its relevant\n attachments\n ' def find_question_id(form, value): for (k, v) in six.iteritems(form): if isinstance(v, dict): ret = find_question_id(v, value) if ret: return ([k] + ret) elif isinstance(v, list): for repeat in v: ret = find_question_id(repeat, value) if ret: return ([k] + ret) elif (v == value): return [k] return None unknown_number = 0 case_blocks = extract_case_blocks(form.form_data) form_info = {'form': form, 'attachments': [], 'case_ids': {c['@case_id'] for c in case_blocks}, 'username': form.get_data('form/meta/username')} for (attachment_name, attachment) in six.iteritems(form.attachments): if hasattr(attachment, 'content_type'): content_type = attachment.content_type else: content_type = attachment['content_type'] if (content_type == 'text/xml'): continue try: question_id = six.text_type('-'.join(find_question_id(form.form_data, attachment_name))) except TypeError: question_id = ('unknown' + six.text_type(unknown_number)) unknown_number += 1 if ((not properties) or (question_id in properties)): extension = six.text_type(os.path.splitext(attachment_name)[1]) if hasattr(attachment, 'content_length'): size = attachment.content_length elif ('content_length' in attachment): size = attachment['content_length'] else: size = attachment['length'] form_info['attachments'].append({'size': size, 'name': attachment_name, 'question_id': question_id, 'extension': extension, 'timestamp': form.received_on.timetuple()}) return form_info<|docstring|>This is a helper function for build_form_multimedia_zip. Return a dict containing information about the given form and its relevant attachments<|endoftext|>
781d7c27232059dae25f9d95c09eb7fc35787735be1f2f950635659a167d65ba
def read_csv(filename): '\n\n Parameters\n ----------\n filename : str\n Path to the CSV file.\n\n Returns\n -------\n df_new : dataframe\n Normalised coordinates of 3D pose.\n\n ' dataframe = pd.read_csv(filename, index_col='Body Part') xmax = (- 10000) ymax = (- 10000) zmax = (- 10000) xmin = 10000 ymin = 10000 zmin = 10000 for key in dataframe.keys(): data = list(dataframe[key][1:]) data = list(map(float, data)) data_num = np.array(data) data_num = data_num[np.where((~ np.isnan(data_num)))] keys = key.split('.') if (len(keys) == 1): key_new = (keys[0], 'x') xmax = max(xmax, np.max(data_num)) xmin = min(xmin, np.min(data_num)) elif ((len(keys) == 2) and (keys[1] == '1')): key_new = (keys[0], 'y') ymax = max(ymax, np.max(data_num)) ymin = min(ymin, np.min(data_num)) elif ((len(keys) == 2) and (keys[1] == '2')): key_new = (keys[0], 'y') zmax = max(zmax, np.max(data_num)) zmin = min(zmin, np.min(data_num)) if (key == 'MidHip'): data_midhip = data_num if (key == 'Neck'): data_neck = data_num xc = ((np.mean(data_neck) + np.mean(data_midhip)) / 2) yc = ((ymax + ymin) / 2) zc = ((zmax + zmin) / 2) width = (2 * max((xc - xmin), (xmax - xc))) height = (ymax - ymin) depth = (zmax - zmin) sq = max(width, height) sq = max(sq, depth) sq = (np.ceil((sq / 100)) * 100) depth = width = height = sq xmin = (xc - (sq / 2)) ymin = (yc - (sq / 2)) zmin = (zc - (sq / 2)) df = dict() for key in dataframe.keys(): data = list(dataframe[key][1:]) data = list(map(float, data)) nan_idx = np.where(np.isnan(data))[0] if (len(nan_idx) == len(data)): data[:] = 0 elif (len(nan_idx) > 0): for jj in nan_idx: if (jj == 0): data[jj] = np.where((~ np.isnan(data)))[0][0] else: data[jj] = data[(jj - 1)] keys = key.split('.') if (len(keys) == 1): key_new = (keys[0], 'x') data = np.round(list(((np.array(data) - xmin) / width)), 5) elif ((len(keys) == 2) and (keys[1] == '1')): key_new = (keys[0], 'y') data = np.round(list(((np.array(data) - ymin) / height)), 5) elif ((len(keys) == 2) and (keys[1] == '2')): key_new = (keys[0], 'z') data = np.round(list(((np.array(data) - zmin) / depth)), 5) else: key_new = (keys[0], 'c') data = np.array(data) df[key_new] = data df_new = pd.DataFrame(df) return df_new
Parameters ---------- filename : str Path to the CSV file. Returns ------- df_new : dataframe Normalised coordinates of 3D pose.
utils/create_blank_3d.py
read_csv
alisonrclarke/raga-pose-estimation-1
1
python
def read_csv(filename): '\n\n Parameters\n ----------\n filename : str\n Path to the CSV file.\n\n Returns\n -------\n df_new : dataframe\n Normalised coordinates of 3D pose.\n\n ' dataframe = pd.read_csv(filename, index_col='Body Part') xmax = (- 10000) ymax = (- 10000) zmax = (- 10000) xmin = 10000 ymin = 10000 zmin = 10000 for key in dataframe.keys(): data = list(dataframe[key][1:]) data = list(map(float, data)) data_num = np.array(data) data_num = data_num[np.where((~ np.isnan(data_num)))] keys = key.split('.') if (len(keys) == 1): key_new = (keys[0], 'x') xmax = max(xmax, np.max(data_num)) xmin = min(xmin, np.min(data_num)) elif ((len(keys) == 2) and (keys[1] == '1')): key_new = (keys[0], 'y') ymax = max(ymax, np.max(data_num)) ymin = min(ymin, np.min(data_num)) elif ((len(keys) == 2) and (keys[1] == '2')): key_new = (keys[0], 'y') zmax = max(zmax, np.max(data_num)) zmin = min(zmin, np.min(data_num)) if (key == 'MidHip'): data_midhip = data_num if (key == 'Neck'): data_neck = data_num xc = ((np.mean(data_neck) + np.mean(data_midhip)) / 2) yc = ((ymax + ymin) / 2) zc = ((zmax + zmin) / 2) width = (2 * max((xc - xmin), (xmax - xc))) height = (ymax - ymin) depth = (zmax - zmin) sq = max(width, height) sq = max(sq, depth) sq = (np.ceil((sq / 100)) * 100) depth = width = height = sq xmin = (xc - (sq / 2)) ymin = (yc - (sq / 2)) zmin = (zc - (sq / 2)) df = dict() for key in dataframe.keys(): data = list(dataframe[key][1:]) data = list(map(float, data)) nan_idx = np.where(np.isnan(data))[0] if (len(nan_idx) == len(data)): data[:] = 0 elif (len(nan_idx) > 0): for jj in nan_idx: if (jj == 0): data[jj] = np.where((~ np.isnan(data)))[0][0] else: data[jj] = data[(jj - 1)] keys = key.split('.') if (len(keys) == 1): key_new = (keys[0], 'x') data = np.round(list(((np.array(data) - xmin) / width)), 5) elif ((len(keys) == 2) and (keys[1] == '1')): key_new = (keys[0], 'y') data = np.round(list(((np.array(data) - ymin) / height)), 5) elif ((len(keys) == 2) and (keys[1] == '2')): key_new = (keys[0], 'z') data = np.round(list(((np.array(data) - zmin) / depth)), 5) else: key_new = (keys[0], 'c') data = np.array(data) df[key_new] = data df_new = pd.DataFrame(df) return df_new
def read_csv(filename): '\n\n Parameters\n ----------\n filename : str\n Path to the CSV file.\n\n Returns\n -------\n df_new : dataframe\n Normalised coordinates of 3D pose.\n\n ' dataframe = pd.read_csv(filename, index_col='Body Part') xmax = (- 10000) ymax = (- 10000) zmax = (- 10000) xmin = 10000 ymin = 10000 zmin = 10000 for key in dataframe.keys(): data = list(dataframe[key][1:]) data = list(map(float, data)) data_num = np.array(data) data_num = data_num[np.where((~ np.isnan(data_num)))] keys = key.split('.') if (len(keys) == 1): key_new = (keys[0], 'x') xmax = max(xmax, np.max(data_num)) xmin = min(xmin, np.min(data_num)) elif ((len(keys) == 2) and (keys[1] == '1')): key_new = (keys[0], 'y') ymax = max(ymax, np.max(data_num)) ymin = min(ymin, np.min(data_num)) elif ((len(keys) == 2) and (keys[1] == '2')): key_new = (keys[0], 'y') zmax = max(zmax, np.max(data_num)) zmin = min(zmin, np.min(data_num)) if (key == 'MidHip'): data_midhip = data_num if (key == 'Neck'): data_neck = data_num xc = ((np.mean(data_neck) + np.mean(data_midhip)) / 2) yc = ((ymax + ymin) / 2) zc = ((zmax + zmin) / 2) width = (2 * max((xc - xmin), (xmax - xc))) height = (ymax - ymin) depth = (zmax - zmin) sq = max(width, height) sq = max(sq, depth) sq = (np.ceil((sq / 100)) * 100) depth = width = height = sq xmin = (xc - (sq / 2)) ymin = (yc - (sq / 2)) zmin = (zc - (sq / 2)) df = dict() for key in dataframe.keys(): data = list(dataframe[key][1:]) data = list(map(float, data)) nan_idx = np.where(np.isnan(data))[0] if (len(nan_idx) == len(data)): data[:] = 0 elif (len(nan_idx) > 0): for jj in nan_idx: if (jj == 0): data[jj] = np.where((~ np.isnan(data)))[0][0] else: data[jj] = data[(jj - 1)] keys = key.split('.') if (len(keys) == 1): key_new = (keys[0], 'x') data = np.round(list(((np.array(data) - xmin) / width)), 5) elif ((len(keys) == 2) and (keys[1] == '1')): key_new = (keys[0], 'y') data = np.round(list(((np.array(data) - ymin) / height)), 5) elif ((len(keys) == 2) and (keys[1] == '2')): key_new = (keys[0], 'z') data = np.round(list(((np.array(data) - zmin) / depth)), 5) else: key_new = (keys[0], 'c') data = np.array(data) df[key_new] = data df_new = pd.DataFrame(df) return df_new<|docstring|>Parameters ---------- filename : str Path to the CSV file. Returns ------- df_new : dataframe Normalised coordinates of 3D pose.<|endoftext|>
cf21f847fd6c089dc03aa470f49e7bc154155d85750c428f31319c061692bb64
def create_3d_video(output_path, df, parts=PARTS, skeleton=SKELETON_EDGES, output=True): '\n\n Parameters\n ----------\n output_path : str\n Path for the created video.\n df : dataframe\n 3D pose\n parts : list, optional\n The name of body parts. The default is PARTS.\n skeleton : narray, optional\n Indicating which two keypoints are connected. The default is SKELETON_EDGES.\n output : bool, optional\n True for storing the video file, otherwise only show the real-time window. The default is True.\n\n Returns\n -------\n None.\n\n ' canvas_3d = np.zeros((360, 640, 3), dtype=np.uint8) canvas_3d_window_name = 'Pose_3D' cv2.namedWindow(canvas_3d_window_name) cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback) plotter = Plotter3d(canvas_3d.shape[:2], parts=parts, skeleton_edges=skeleton) depth = width = height = 200 if output: fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') out = cv2.VideoWriter(output_path, fourcc, 25, (640, 360)) poses_3d_array = np.zeros((len(df), int((len(df.keys()) / 4)), 3)) for (ii, part) in enumerate(plotter.parts): poses_3d_array[(:, ii, 0)] = ((df[(part, 'x')] * width) - 125) poses_3d_array[(:, ii, 1)] = ((df[(part, 'y')] * height) - 125) poses_3d_array[(:, ii, 2)] = (df[(part, 'z')] * depth) for num in range(len(df)): poses_3d = poses_3d_array[num] edges = plotter.skeleton_edges plotter.plot(canvas_3d, poses_3d, edges) if (not output): cv2.imshow(canvas_3d_window_name, canvas_3d) delay = 1 esc_code = 27 p_code = 112 space_code = 32 key = cv2.waitKey(delay) if (key == esc_code): break if (key == p_code): if (delay == 1): delay = 0 else: delay = 1 if (delay == 0): key = 0 while ((key != p_code) and (key != esc_code) and (key != space_code)): plotter.plot(canvas_3d, poses_3d, edges) cv2.imshow(canvas_3d_window_name, canvas_3d) key = cv2.waitKey(33) if (key == esc_code): break else: delay = 1 else: out.write(canvas_3d) if output: out.release()
Parameters ---------- output_path : str Path for the created video. df : dataframe 3D pose parts : list, optional The name of body parts. The default is PARTS. skeleton : narray, optional Indicating which two keypoints are connected. The default is SKELETON_EDGES. output : bool, optional True for storing the video file, otherwise only show the real-time window. The default is True. Returns ------- None.
utils/create_blank_3d.py
create_3d_video
alisonrclarke/raga-pose-estimation-1
1
python
def create_3d_video(output_path, df, parts=PARTS, skeleton=SKELETON_EDGES, output=True): '\n\n Parameters\n ----------\n output_path : str\n Path for the created video.\n df : dataframe\n 3D pose\n parts : list, optional\n The name of body parts. The default is PARTS.\n skeleton : narray, optional\n Indicating which two keypoints are connected. The default is SKELETON_EDGES.\n output : bool, optional\n True for storing the video file, otherwise only show the real-time window. The default is True.\n\n Returns\n -------\n None.\n\n ' canvas_3d = np.zeros((360, 640, 3), dtype=np.uint8) canvas_3d_window_name = 'Pose_3D' cv2.namedWindow(canvas_3d_window_name) cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback) plotter = Plotter3d(canvas_3d.shape[:2], parts=parts, skeleton_edges=skeleton) depth = width = height = 200 if output: fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') out = cv2.VideoWriter(output_path, fourcc, 25, (640, 360)) poses_3d_array = np.zeros((len(df), int((len(df.keys()) / 4)), 3)) for (ii, part) in enumerate(plotter.parts): poses_3d_array[(:, ii, 0)] = ((df[(part, 'x')] * width) - 125) poses_3d_array[(:, ii, 1)] = ((df[(part, 'y')] * height) - 125) poses_3d_array[(:, ii, 2)] = (df[(part, 'z')] * depth) for num in range(len(df)): poses_3d = poses_3d_array[num] edges = plotter.skeleton_edges plotter.plot(canvas_3d, poses_3d, edges) if (not output): cv2.imshow(canvas_3d_window_name, canvas_3d) delay = 1 esc_code = 27 p_code = 112 space_code = 32 key = cv2.waitKey(delay) if (key == esc_code): break if (key == p_code): if (delay == 1): delay = 0 else: delay = 1 if (delay == 0): key = 0 while ((key != p_code) and (key != esc_code) and (key != space_code)): plotter.plot(canvas_3d, poses_3d, edges) cv2.imshow(canvas_3d_window_name, canvas_3d) key = cv2.waitKey(33) if (key == esc_code): break else: delay = 1 else: out.write(canvas_3d) if output: out.release()
def create_3d_video(output_path, df, parts=PARTS, skeleton=SKELETON_EDGES, output=True): '\n\n Parameters\n ----------\n output_path : str\n Path for the created video.\n df : dataframe\n 3D pose\n parts : list, optional\n The name of body parts. The default is PARTS.\n skeleton : narray, optional\n Indicating which two keypoints are connected. The default is SKELETON_EDGES.\n output : bool, optional\n True for storing the video file, otherwise only show the real-time window. The default is True.\n\n Returns\n -------\n None.\n\n ' canvas_3d = np.zeros((360, 640, 3), dtype=np.uint8) canvas_3d_window_name = 'Pose_3D' cv2.namedWindow(canvas_3d_window_name) cv2.setMouseCallback(canvas_3d_window_name, Plotter3d.mouse_callback) plotter = Plotter3d(canvas_3d.shape[:2], parts=parts, skeleton_edges=skeleton) depth = width = height = 200 if output: fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') out = cv2.VideoWriter(output_path, fourcc, 25, (640, 360)) poses_3d_array = np.zeros((len(df), int((len(df.keys()) / 4)), 3)) for (ii, part) in enumerate(plotter.parts): poses_3d_array[(:, ii, 0)] = ((df[(part, 'x')] * width) - 125) poses_3d_array[(:, ii, 1)] = ((df[(part, 'y')] * height) - 125) poses_3d_array[(:, ii, 2)] = (df[(part, 'z')] * depth) for num in range(len(df)): poses_3d = poses_3d_array[num] edges = plotter.skeleton_edges plotter.plot(canvas_3d, poses_3d, edges) if (not output): cv2.imshow(canvas_3d_window_name, canvas_3d) delay = 1 esc_code = 27 p_code = 112 space_code = 32 key = cv2.waitKey(delay) if (key == esc_code): break if (key == p_code): if (delay == 1): delay = 0 else: delay = 1 if (delay == 0): key = 0 while ((key != p_code) and (key != esc_code) and (key != space_code)): plotter.plot(canvas_3d, poses_3d, edges) cv2.imshow(canvas_3d_window_name, canvas_3d) key = cv2.waitKey(33) if (key == esc_code): break else: delay = 1 else: out.write(canvas_3d) if output: out.release()<|docstring|>Parameters ---------- output_path : str Path for the created video. df : dataframe 3D pose parts : list, optional The name of body parts. The default is PARTS. skeleton : narray, optional Indicating which two keypoints are connected. The default is SKELETON_EDGES. output : bool, optional True for storing the video file, otherwise only show the real-time window. The default is True. Returns ------- None.<|endoftext|>
be5344ab918a97f12d47f11c8a705e418a5dc1a6aca20cf2eace2bc331b41bb1
def _read_in_raw_data(data_dir: str) -> Tuple[(DataFrame, DataFrame)]: 'Read in the raw water pump features and labels.\n\n Parameters\n ----------\n data_dir : str\n Path of the directory where `water_pump_features.csv` and\n `water_pump_labels.csv` can be found.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame]\n DataFrames of the features and labels respectively.\n ' features_file_name = os.path.join(data_dir, 'water_pump_features.csv') features = pd.read_csv(features_file_name, parse_dates=['date_recorded'], dtype={'region_code': str, 'district_code': str}) labels_file_name = os.path.join(data_dir, 'water_pump_labels.csv') labels = pd.read_csv(labels_file_name) return (features, labels)
Read in the raw water pump features and labels. Parameters ---------- data_dir : str Path of the directory where `water_pump_features.csv` and `water_pump_labels.csv` can be found. Returns ------- Tuple[DataFrame, DataFrame] DataFrames of the features and labels respectively.
src/data/dataset.py
_read_in_raw_data
amritpurshotam/mlops-example
0
python
def _read_in_raw_data(data_dir: str) -> Tuple[(DataFrame, DataFrame)]: 'Read in the raw water pump features and labels.\n\n Parameters\n ----------\n data_dir : str\n Path of the directory where `water_pump_features.csv` and\n `water_pump_labels.csv` can be found.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame]\n DataFrames of the features and labels respectively.\n ' features_file_name = os.path.join(data_dir, 'water_pump_features.csv') features = pd.read_csv(features_file_name, parse_dates=['date_recorded'], dtype={'region_code': str, 'district_code': str}) labels_file_name = os.path.join(data_dir, 'water_pump_labels.csv') labels = pd.read_csv(labels_file_name) return (features, labels)
def _read_in_raw_data(data_dir: str) -> Tuple[(DataFrame, DataFrame)]: 'Read in the raw water pump features and labels.\n\n Parameters\n ----------\n data_dir : str\n Path of the directory where `water_pump_features.csv` and\n `water_pump_labels.csv` can be found.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame]\n DataFrames of the features and labels respectively.\n ' features_file_name = os.path.join(data_dir, 'water_pump_features.csv') features = pd.read_csv(features_file_name, parse_dates=['date_recorded'], dtype={'region_code': str, 'district_code': str}) labels_file_name = os.path.join(data_dir, 'water_pump_labels.csv') labels = pd.read_csv(labels_file_name) return (features, labels)<|docstring|>Read in the raw water pump features and labels. Parameters ---------- data_dir : str Path of the directory where `water_pump_features.csv` and `water_pump_labels.csv` can be found. Returns ------- Tuple[DataFrame, DataFrame] DataFrames of the features and labels respectively.<|endoftext|>
817da33f6dc3a7652e253e2ec0501f838394d2bba1f64cca3509023b3b6dda83
def _align_features_and_labels(features: DataFrame, labels: DataFrame) -> Tuple[(DataFrame, DataFrame)]: "Align the `feature`s and `labels` DataFrames so they're both in the same order\n removing the need to check the `id` columns in each.\n\n Parameters\n ----------\n features : DataFrame\n DataFrame containing the `id` attribute.\n labels : DataFrame\n DataFrame containing the `id` attribute that corresponds to the `id` in\n `features`.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame]\n DataFrames of the features and labels respectively with the `id` column\n in `labels` dropped making it more amenable to pass into classifiers for\n training.\n " aligned = features.merge(labels, on='id', validate='one_to_one') labels = aligned['status_group'] features = aligned.drop(columns=['status_group']) return (features, labels)
Align the `feature`s and `labels` DataFrames so they're both in the same order removing the need to check the `id` columns in each. Parameters ---------- features : DataFrame DataFrame containing the `id` attribute. labels : DataFrame DataFrame containing the `id` attribute that corresponds to the `id` in `features`. Returns ------- Tuple[DataFrame, DataFrame] DataFrames of the features and labels respectively with the `id` column in `labels` dropped making it more amenable to pass into classifiers for training.
src/data/dataset.py
_align_features_and_labels
amritpurshotam/mlops-example
0
python
def _align_features_and_labels(features: DataFrame, labels: DataFrame) -> Tuple[(DataFrame, DataFrame)]: "Align the `feature`s and `labels` DataFrames so they're both in the same order\n removing the need to check the `id` columns in each.\n\n Parameters\n ----------\n features : DataFrame\n DataFrame containing the `id` attribute.\n labels : DataFrame\n DataFrame containing the `id` attribute that corresponds to the `id` in\n `features`.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame]\n DataFrames of the features and labels respectively with the `id` column\n in `labels` dropped making it more amenable to pass into classifiers for\n training.\n " aligned = features.merge(labels, on='id', validate='one_to_one') labels = aligned['status_group'] features = aligned.drop(columns=['status_group']) return (features, labels)
def _align_features_and_labels(features: DataFrame, labels: DataFrame) -> Tuple[(DataFrame, DataFrame)]: "Align the `feature`s and `labels` DataFrames so they're both in the same order\n removing the need to check the `id` columns in each.\n\n Parameters\n ----------\n features : DataFrame\n DataFrame containing the `id` attribute.\n labels : DataFrame\n DataFrame containing the `id` attribute that corresponds to the `id` in\n `features`.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame]\n DataFrames of the features and labels respectively with the `id` column\n in `labels` dropped making it more amenable to pass into classifiers for\n training.\n " aligned = features.merge(labels, on='id', validate='one_to_one') labels = aligned['status_group'] features = aligned.drop(columns=['status_group']) return (features, labels)<|docstring|>Align the `feature`s and `labels` DataFrames so they're both in the same order removing the need to check the `id` columns in each. Parameters ---------- features : DataFrame DataFrame containing the `id` attribute. labels : DataFrame DataFrame containing the `id` attribute that corresponds to the `id` in `features`. Returns ------- Tuple[DataFrame, DataFrame] DataFrames of the features and labels respectively with the `id` column in `labels` dropped making it more amenable to pass into classifiers for training.<|endoftext|>
e9e0a5b65a0eec28e67d45df31da0b9bedb6f2e0f075b87e61d35d301d806e82
def _split(features: DataFrame, labels: DataFrame, random_state: int=42) -> Tuple[(DataFrame, DataFrame, DataFrame, DataFrame, DataFrame, DataFrame)]: 'Deterministic random 80/10/10 train/val/test split of the dataset stratified by\n the labels.\n\n Parameters\n ----------\n features : DataFrame\n\n labels : DataFrame\n\n Returns\n -------\n Tuple[DataFrame, DataFrame, DataFrame, DataFrame]\n A tuple of four DataFrames corresponding to the training features,\n testing features, training labels, and testing labels respectively.\n ' (train_features, test_features, train_labels, test_labels) = train_test_split(features, labels, test_size=0.2, random_state=random_state, stratify=labels) (val_features, test_features, val_labels, test_labels) = train_test_split(test_features, test_labels, test_size=0.5, random_state=random_state, stratify=test_labels) return (train_features, val_features, test_features, train_labels, val_labels, test_labels)
Deterministic random 80/10/10 train/val/test split of the dataset stratified by the labels. Parameters ---------- features : DataFrame labels : DataFrame Returns ------- Tuple[DataFrame, DataFrame, DataFrame, DataFrame] A tuple of four DataFrames corresponding to the training features, testing features, training labels, and testing labels respectively.
src/data/dataset.py
_split
amritpurshotam/mlops-example
0
python
def _split(features: DataFrame, labels: DataFrame, random_state: int=42) -> Tuple[(DataFrame, DataFrame, DataFrame, DataFrame, DataFrame, DataFrame)]: 'Deterministic random 80/10/10 train/val/test split of the dataset stratified by\n the labels.\n\n Parameters\n ----------\n features : DataFrame\n\n labels : DataFrame\n\n Returns\n -------\n Tuple[DataFrame, DataFrame, DataFrame, DataFrame]\n A tuple of four DataFrames corresponding to the training features,\n testing features, training labels, and testing labels respectively.\n ' (train_features, test_features, train_labels, test_labels) = train_test_split(features, labels, test_size=0.2, random_state=random_state, stratify=labels) (val_features, test_features, val_labels, test_labels) = train_test_split(test_features, test_labels, test_size=0.5, random_state=random_state, stratify=test_labels) return (train_features, val_features, test_features, train_labels, val_labels, test_labels)
def _split(features: DataFrame, labels: DataFrame, random_state: int=42) -> Tuple[(DataFrame, DataFrame, DataFrame, DataFrame, DataFrame, DataFrame)]: 'Deterministic random 80/10/10 train/val/test split of the dataset stratified by\n the labels.\n\n Parameters\n ----------\n features : DataFrame\n\n labels : DataFrame\n\n Returns\n -------\n Tuple[DataFrame, DataFrame, DataFrame, DataFrame]\n A tuple of four DataFrames corresponding to the training features,\n testing features, training labels, and testing labels respectively.\n ' (train_features, test_features, train_labels, test_labels) = train_test_split(features, labels, test_size=0.2, random_state=random_state, stratify=labels) (val_features, test_features, val_labels, test_labels) = train_test_split(test_features, test_labels, test_size=0.5, random_state=random_state, stratify=test_labels) return (train_features, val_features, test_features, train_labels, val_labels, test_labels)<|docstring|>Deterministic random 80/10/10 train/val/test split of the dataset stratified by the labels. Parameters ---------- features : DataFrame labels : DataFrame Returns ------- Tuple[DataFrame, DataFrame, DataFrame, DataFrame] A tuple of four DataFrames corresponding to the training features, testing features, training labels, and testing labels respectively.<|endoftext|>
546ee13fed348901d420beb9db19ca2e0ab84f411543c1c43de567a4606c493c
def load_dataset(data_dir: str) -> Tuple[(DataFrame, DataFrame, DataFrame, DataFrame, DataFrame, DataFrame)]: 'Read in the water pump dataset and split into the training and test sets.\n\n Parameters\n ----------\n data_dir : str\n Path of the directory where `water_pump_features.csv` and\n `water_pump_labels.csv` can be found.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame, DataFrame, DataFrame]\n A tuple of four DataFrames corresponding to the training features,\n testing features, training labels, and testing labels respectively.\n ' (features, labels) = _read_in_raw_data(data_dir) (features, labels) = _align_features_and_labels(features, labels) return _split(features, labels)
Read in the water pump dataset and split into the training and test sets. Parameters ---------- data_dir : str Path of the directory where `water_pump_features.csv` and `water_pump_labels.csv` can be found. Returns ------- Tuple[DataFrame, DataFrame, DataFrame, DataFrame] A tuple of four DataFrames corresponding to the training features, testing features, training labels, and testing labels respectively.
src/data/dataset.py
load_dataset
amritpurshotam/mlops-example
0
python
def load_dataset(data_dir: str) -> Tuple[(DataFrame, DataFrame, DataFrame, DataFrame, DataFrame, DataFrame)]: 'Read in the water pump dataset and split into the training and test sets.\n\n Parameters\n ----------\n data_dir : str\n Path of the directory where `water_pump_features.csv` and\n `water_pump_labels.csv` can be found.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame, DataFrame, DataFrame]\n A tuple of four DataFrames corresponding to the training features,\n testing features, training labels, and testing labels respectively.\n ' (features, labels) = _read_in_raw_data(data_dir) (features, labels) = _align_features_and_labels(features, labels) return _split(features, labels)
def load_dataset(data_dir: str) -> Tuple[(DataFrame, DataFrame, DataFrame, DataFrame, DataFrame, DataFrame)]: 'Read in the water pump dataset and split into the training and test sets.\n\n Parameters\n ----------\n data_dir : str\n Path of the directory where `water_pump_features.csv` and\n `water_pump_labels.csv` can be found.\n\n Returns\n -------\n Tuple[DataFrame, DataFrame, DataFrame, DataFrame]\n A tuple of four DataFrames corresponding to the training features,\n testing features, training labels, and testing labels respectively.\n ' (features, labels) = _read_in_raw_data(data_dir) (features, labels) = _align_features_and_labels(features, labels) return _split(features, labels)<|docstring|>Read in the water pump dataset and split into the training and test sets. Parameters ---------- data_dir : str Path of the directory where `water_pump_features.csv` and `water_pump_labels.csv` can be found. Returns ------- Tuple[DataFrame, DataFrame, DataFrame, DataFrame] A tuple of four DataFrames corresponding to the training features, testing features, training labels, and testing labels respectively.<|endoftext|>
4da268fdbe9c63b87734d4510b4787fee8435875c315ea31520ad4b9cfd966de
def __init__(self, options): 'Constructor\n\n Args -\n options - The result of OptionParser which contains, as attributes, all the options for the running program.\n ' self.options = options
Constructor Args - options - The result of OptionParser which contains, as attributes, all the options for the running program.
testify/test_reporter.py
__init__
osarood/Testify
1
python
def __init__(self, options): 'Constructor\n\n Args -\n options - The result of OptionParser which contains, as attributes, all the options for the running program.\n ' self.options = options
def __init__(self, options): 'Constructor\n\n Args -\n options - The result of OptionParser which contains, as attributes, all the options for the running program.\n ' self.options = options<|docstring|>Constructor Args - options - The result of OptionParser which contains, as attributes, all the options for the running program.<|endoftext|>
27faa50a741dca4491aaa5fa883d9d9565c5a79135dfa8df1ee00976c6db1385
def test_counts(self, test_case_count, test_method_count): 'Called after discovery finishes. May not be called by all test runners, e.g. TestRunnerClient.' pass
Called after discovery finishes. May not be called by all test runners, e.g. TestRunnerClient.
testify/test_reporter.py
test_counts
osarood/Testify
1
python
def test_counts(self, test_case_count, test_method_count): pass
def test_counts(self, test_case_count, test_method_count): pass<|docstring|>Called after discovery finishes. May not be called by all test runners, e.g. TestRunnerClient.<|endoftext|>
60e39a6b259a988906f68b1de0aff06ef8860a253286e7df625fb31ae9390159
def test_start(self, result): 'Called when a test method is being run. Gets passed a TestResult dict which should not be complete.' pass
Called when a test method is being run. Gets passed a TestResult dict which should not be complete.
testify/test_reporter.py
test_start
osarood/Testify
1
python
def test_start(self, result): pass
def test_start(self, result): pass<|docstring|>Called when a test method is being run. Gets passed a TestResult dict which should not be complete.<|endoftext|>
1e86bd749a2113cb2f9f129271295bd92158abebc81a814f6124a04b1e6842fa
def test_complete(self, result): 'Called when a test method is complete. result is a TestResult dict which should be complete.' pass
Called when a test method is complete. result is a TestResult dict which should be complete.
testify/test_reporter.py
test_complete
osarood/Testify
1
python
def test_complete(self, result): pass
def test_complete(self, result): pass<|docstring|>Called when a test method is complete. result is a TestResult dict which should be complete.<|endoftext|>
598d94cede61f31b4983d1ac402a3dfee04530e3a72f6edeca10169433940ada
def test_discovery_failure(self, exc): 'Called when there was a failure during test discovery. exc is the exception object generated during the error.'
Called when there was a failure during test discovery. exc is the exception object generated during the error.
testify/test_reporter.py
test_discovery_failure
osarood/Testify
1
python
def test_discovery_failure(self, exc):
def test_discovery_failure(self, exc): <|docstring|>Called when there was a failure during test discovery. exc is the exception object generated during the error.<|endoftext|>
ef580bcb794087e4ef1d68c1ead6f4730a47d083e2796d829155d4dbc075f375
def class_setup_start(self, result): 'Called when a class_setup or the first half of a class_setup_teardown starts' pass
Called when a class_setup or the first half of a class_setup_teardown starts
testify/test_reporter.py
class_setup_start
osarood/Testify
1
python
def class_setup_start(self, result): pass
def class_setup_start(self, result): pass<|docstring|>Called when a class_setup or the first half of a class_setup_teardown starts<|endoftext|>
be9757e522dad5bc0d97ab659b097f294a945c307631e4cc60f7d7cec2dfd3a1
def class_setup_complete(self, result): 'Called when a class_setup or the first half of a class_setup_teardown finishes' pass
Called when a class_setup or the first half of a class_setup_teardown finishes
testify/test_reporter.py
class_setup_complete
osarood/Testify
1
python
def class_setup_complete(self, result): pass
def class_setup_complete(self, result): pass<|docstring|>Called when a class_setup or the first half of a class_setup_teardown finishes<|endoftext|>
b13993cbc2dac87e4a77f8f46bb4904b05ad5c96affe7891df7b71276f1b23fb
def class_teardown_start(self, result): 'Called when a class_teardown or the second half of a class_setup_teardown starts' pass
Called when a class_teardown or the second half of a class_setup_teardown starts
testify/test_reporter.py
class_teardown_start
osarood/Testify
1
python
def class_teardown_start(self, result): pass
def class_teardown_start(self, result): pass<|docstring|>Called when a class_teardown or the second half of a class_setup_teardown starts<|endoftext|>
b8769243139eb21f367fcf56641ffa963348c6ee59739328c945126a93dbbcdd
def class_teardown_complete(self, result): 'Called when a class_teardown or the second half of a class_setup_teardown finishes' pass
Called when a class_teardown or the second half of a class_setup_teardown finishes
testify/test_reporter.py
class_teardown_complete
osarood/Testify
1
python
def class_teardown_complete(self, result): pass
def class_teardown_complete(self, result): pass<|docstring|>Called when a class_teardown or the second half of a class_setup_teardown finishes<|endoftext|>
405ef5616bc14f45c089fafcb2b260cd0476646f85dfdfb1c91da92a4180c8cb
def test_case_start(self, result): 'Called when a test case is being run. Gets passed the special "run" method as a TestResult.' pass
Called when a test case is being run. Gets passed the special "run" method as a TestResult.
testify/test_reporter.py
test_case_start
osarood/Testify
1
python
def test_case_start(self, result): pass
def test_case_start(self, result): pass<|docstring|>Called when a test case is being run. Gets passed the special "run" method as a TestResult.<|endoftext|>
f1f622bc51f7ca932fff4ae5cf67c5ca3c4c4421d489740345cea21ea2072c9d
def test_case_complete(self, result): 'Called when a test case and all of its fixtures have been run.' pass
Called when a test case and all of its fixtures have been run.
testify/test_reporter.py
test_case_complete
osarood/Testify
1
python
def test_case_complete(self, result): pass
def test_case_complete(self, result): pass<|docstring|>Called when a test case and all of its fixtures have been run.<|endoftext|>
45fe5e328eb28c269bd948f1123006c9409c3bcb722d0a6e188d28dff32a575a
def report(self): 'Called at the end of the test run to report results\n\n Should return a bool to indicate if the reporter thinks the test run was successful\n ' return True
Called at the end of the test run to report results Should return a bool to indicate if the reporter thinks the test run was successful
testify/test_reporter.py
report
osarood/Testify
1
python
def report(self): 'Called at the end of the test run to report results\n\n Should return a bool to indicate if the reporter thinks the test run was successful\n ' return True
def report(self): 'Called at the end of the test run to report results\n\n Should return a bool to indicate if the reporter thinks the test run was successful\n ' return True<|docstring|>Called at the end of the test run to report results Should return a bool to indicate if the reporter thinks the test run was successful<|endoftext|>
6986547924c4b504463a555bf5f4f919765ba60da31b8ed95b5f52da9401448a
def html_fragment(source): '\n Parse an HTML string representing a single element, and return that element\n ' return BeautifulSoup(source, 'html.parser').contents[0]
Parse an HTML string representing a single element, and return that element
chirun/filter.py
html_fragment
sthagen/chirun-ncl-chirun
5
python
def html_fragment(source): '\n \n ' return BeautifulSoup(source, 'html.parser').contents[0]
def html_fragment(source): '\n \n ' return BeautifulSoup(source, 'html.parser').contents[0]<|docstring|>Parse an HTML string representing a single element, and return that element<|endoftext|>
32691050f1c4a0af72bfb7b250b57755c343758a2dae61d86eb039e362bb2fcf
def fix_local_links(soup, item): "\n Rewrite URLs relative to the top level, i.e. those starting with a /,\n to use the course's root URL or into paths relative to the item.\n " tags = {'a': ['href'], 'img': ['src'], 'source': ['src'], 'section': ['data-background', 'data-background-video']} for (tag, attrs) in tags.items(): for el in soup.find_all(tag): for attr in attrs: url = el.get(attr) if (url and (url[0] == '/')): el[attr] = item.course.make_relative_url(item, url[1:])
Rewrite URLs relative to the top level, i.e. those starting with a /, to use the course's root URL or into paths relative to the item.
chirun/filter.py
fix_local_links
sthagen/chirun-ncl-chirun
5
python
def fix_local_links(soup, item): "\n Rewrite URLs relative to the top level, i.e. those starting with a /,\n to use the course's root URL or into paths relative to the item.\n " tags = {'a': ['href'], 'img': ['src'], 'source': ['src'], 'section': ['data-background', 'data-background-video']} for (tag, attrs) in tags.items(): for el in soup.find_all(tag): for attr in attrs: url = el.get(attr) if (url and (url[0] == '/')): el[attr] = item.course.make_relative_url(item, url[1:])
def fix_local_links(soup, item): "\n Rewrite URLs relative to the top level, i.e. those starting with a /,\n to use the course's root URL or into paths relative to the item.\n " tags = {'a': ['href'], 'img': ['src'], 'source': ['src'], 'section': ['data-background', 'data-background-video']} for (tag, attrs) in tags.items(): for el in soup.find_all(tag): for attr in attrs: url = el.get(attr) if (url and (url[0] == '/')): el[attr] = item.course.make_relative_url(item, url[1:])<|docstring|>Rewrite URLs relative to the top level, i.e. those starting with a /, to use the course's root URL or into paths relative to the item.<|endoftext|>