body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def main(): 'Orchestration function for the CLI.' args = _parse_args() path = pathlib.Path('lists', args.words) try: words = _load_words(path) except IOError: print('Exiting.') return if (args.quiz_length is not None): if (args.quiz_length == 0): print('Starting quiz in endless mode. Answer `quit` to end the quiz.') (correct, answered) = _quiz_endless(words) elif (args.quiz_length > 0): print(f'''Starting quiz with length {args.quiz_length}... ''') (correct, answered, _) = _quiz(words, args.quiz_length) else: raise ValueError(f'Invalid quiz length: {args.quiz_length}.') print(f''' You successfully answered {correct} out of {answered} questions!''') elif args.add_words: print('Entering word addition mode...') _add_words(words) elif args.load_words: print(f'Importing word file {args.load_words}...') (added, reps) = _import_words(words, args.load_words) print(f'{added} words successfully imported. {reps} duplicates skipped.') elif args.reset_scores: print('Resetting scores') words = WordList() words.new() _import_words(words, path.with_suffix('.csv')) _save_and_exit(words, path)
2,238,173,093,987,113,500
Orchestration function for the CLI.
deutscheflash.py
main
n-Holmes/deutscheflash
python
def main(): args = _parse_args() path = pathlib.Path('lists', args.words) try: words = _load_words(path) except IOError: print('Exiting.') return if (args.quiz_length is not None): if (args.quiz_length == 0): print('Starting quiz in endless mode. Answer `quit` to end the quiz.') (correct, answered) = _quiz_endless(words) elif (args.quiz_length > 0): print(f'Starting quiz with length {args.quiz_length}... ') (correct, answered, _) = _quiz(words, args.quiz_length) else: raise ValueError(f'Invalid quiz length: {args.quiz_length}.') print(f' You successfully answered {correct} out of {answered} questions!') elif args.add_words: print('Entering word addition mode...') _add_words(words) elif args.load_words: print(f'Importing word file {args.load_words}...') (added, reps) = _import_words(words, args.load_words) print(f'{added} words successfully imported. {reps} duplicates skipped.') elif args.reset_scores: print('Resetting scores') words = WordList() words.new() _import_words(words, path.with_suffix('.csv')) _save_and_exit(words, path)
def _load_words(path): 'Encapsulates the loading/newfile creation logic.' try: words = WordList(path) print('Words successfully loaded.') except FileNotFoundError: print(f'No word list found with given name.') newfile = force_console_input('Would you like to create a new wordlist with the specified name? Y/N: ', options=['y', 'yes', 'n', 'no']) if (newfile[0] == 'y'): words = WordList() language = force_console_input(query='Which language should be used?\n', onfail='Language not recognised, please try again or check genders.json\n', options=get_languages()) words.new(language=language) print(f'New WordList for language {language} successfully created.') else: raise IOError return words
-75,499,848,688,464,940
Encapsulates the loading/newfile creation logic.
deutscheflash.py
_load_words
n-Holmes/deutscheflash
python
def _load_words(path): try: words = WordList(path) print('Words successfully loaded.') except FileNotFoundError: print(f'No word list found with given name.') newfile = force_console_input('Would you like to create a new wordlist with the specified name? Y/N: ', options=['y', 'yes', 'n', 'no']) if (newfile[0] == 'y'): words = WordList() language = force_console_input(query='Which language should be used?\n', onfail='Language not recognised, please try again or check genders.json\n', options=get_languages()) words.new(language=language) print(f'New WordList for language {language} successfully created.') else: raise IOError return words
def _quiz(wordlist, quiz_length): 'Runs a command line quiz of the specified length.' pd.options.mode.chained_assignment = None (answered, correct) = (0, 0) for (word, gender) in wordlist.get_words(quiz_length): guess = input(f'What is the gender of {word}? ').lower() if (guess in ('quit', 'exit')): break answered += 1 try: guess = wordlist.format_gender(guess) except ValueError: print('Unrecognised guess, skipping.\n') continue accurate = (gender == guess) wordlist.update_weight(word, accurate) if accurate: print('Correct!\n') correct += 1 else: print(f'''Incorrect! The correct gender is {gender}. ''') return (correct, answered, (answered == quiz_length))
4,043,416,046,314,861,000
Runs a command line quiz of the specified length.
deutscheflash.py
_quiz
n-Holmes/deutscheflash
python
def _quiz(wordlist, quiz_length): pd.options.mode.chained_assignment = None (answered, correct) = (0, 0) for (word, gender) in wordlist.get_words(quiz_length): guess = input(f'What is the gender of {word}? ').lower() if (guess in ('quit', 'exit')): break answered += 1 try: guess = wordlist.format_gender(guess) except ValueError: print('Unrecognised guess, skipping.\n') continue accurate = (gender == guess) wordlist.update_weight(word, accurate) if accurate: print('Correct!\n') correct += 1 else: print(f'Incorrect! The correct gender is {gender}. ') return (correct, answered, (answered == quiz_length))
def _quiz_endless(wordlist): 'Runs quizzes in batches of 20 until quit or exit is answered.' (correct, answered) = (0, 0) finished = False while (not finished): results = _quiz(wordlist, 20) correct += results[0] answered += results[1] finished = (not results[2]) return (correct, answered)
-2,987,331,019,402,010,000
Runs quizzes in batches of 20 until quit or exit is answered.
deutscheflash.py
_quiz_endless
n-Holmes/deutscheflash
python
def _quiz_endless(wordlist): (correct, answered) = (0, 0) finished = False while (not finished): results = _quiz(wordlist, 20) correct += results[0] answered += results[1] finished = (not results[2]) return (correct, answered)
def _add_words(wordlist): 'CLI for adding words individually to the wordlist.' print('Type a word with gender eg `m Mann` or `quit` when finished.') while True: input_str = input() if (input_str in ('quit', 'exit')): print('Exiting word addition mode...') break try: (gender, word) = input_str.split() wordlist.add(gender, word) except ValueError as e: print(e)
3,481,984,767,297,867,000
CLI for adding words individually to the wordlist.
deutscheflash.py
_add_words
n-Holmes/deutscheflash
python
def _add_words(wordlist): print('Type a word with gender eg `m Mann` or `quit` when finished.') while True: input_str = input() if (input_str in ('quit', 'exit')): print('Exiting word addition mode...') break try: (gender, word) = input_str.split() wordlist.add(gender, word) except ValueError as e: print(e)
def _import_words(wordlist, import_path): 'Loads words from a csv file at import_path into `wordlist`.' new_words = pd.read_csv(import_path) words_added = 0 repetitions = 0 for (_, row) in new_words.iterrows(): try: wordlist.add(row.Gender, row.Word) words_added += 1 except ValueError: repetitions += 1 return (words_added, repetitions)
-6,439,961,186,866,336,000
Loads words from a csv file at import_path into `wordlist`.
deutscheflash.py
_import_words
n-Holmes/deutscheflash
python
def _import_words(wordlist, import_path): new_words = pd.read_csv(import_path) words_added = 0 repetitions = 0 for (_, row) in new_words.iterrows(): try: wordlist.add(row.Gender, row.Word) words_added += 1 except ValueError: repetitions += 1 return (words_added, repetitions)
def load(self, path: pathlib.Path): 'Load stored data.' try: self.words = pd.read_csv(path.with_suffix('.csv')) with path.with_suffix('.json').open() as f: self.structure = json.loads(f.read()) self.words.set_index(self.structure['index'], inplace=True) except FileNotFoundError as exception: raise FileNotFoundError('No word list found with the specified name.') from exception
-6,622,118,397,276,711,000
Load stored data.
deutscheflash.py
load
n-Holmes/deutscheflash
python
def load(self, path: pathlib.Path): try: self.words = pd.read_csv(path.with_suffix('.csv')) with path.with_suffix('.json').open() as f: self.structure = json.loads(f.read()) self.words.set_index(self.structure['index'], inplace=True) except FileNotFoundError as exception: raise FileNotFoundError('No word list found with the specified name.') from exception
def new(self, language: str='german', score_inertia: int=2): 'Create a new wordlist.\n \n Args:\n language (str): The name of a language in the GENDERS dictionary.\n score_inertia (int): Determines how resistant scores are to change.\n Must be a positive integer. Higher values will require more consecutive\n correct answers to reduce the frequency of a specific word.\n ' gender_options = get_languages() try: genders = gender_options[language] except KeyError as exception: raise ValueError(f'Unknown language: {language}') from exception columns = ['Word', 'Gender', 'Correct', 'Wrong', 'Weight'] self.structure = {'language': language, 'genders': genders, 'aliases': self._get_aliases(genders), 'default guesses': score_inertia, 'index': 'Word', 'column count': 3} self.words = pd.DataFrame(columns=columns) self.words.set_index(self.structure['index'], inplace=True)
1,183,356,809,231,257,900
Create a new wordlist. Args: language (str): The name of a language in the GENDERS dictionary. score_inertia (int): Determines how resistant scores are to change. Must be a positive integer. Higher values will require more consecutive correct answers to reduce the frequency of a specific word.
deutscheflash.py
new
n-Holmes/deutscheflash
python
def new(self, language: str='german', score_inertia: int=2): 'Create a new wordlist.\n \n Args:\n language (str): The name of a language in the GENDERS dictionary.\n score_inertia (int): Determines how resistant scores are to change.\n Must be a positive integer. Higher values will require more consecutive\n correct answers to reduce the frequency of a specific word.\n ' gender_options = get_languages() try: genders = gender_options[language] except KeyError as exception: raise ValueError(f'Unknown language: {language}') from exception columns = ['Word', 'Gender', 'Correct', 'Wrong', 'Weight'] self.structure = {'language': language, 'genders': genders, 'aliases': self._get_aliases(genders), 'default guesses': score_inertia, 'index': 'Word', 'column count': 3} self.words = pd.DataFrame(columns=columns) self.words.set_index(self.structure['index'], inplace=True)
def save(self, path: pathlib.Path): 'Saves words to a .csv file and structure to a .json.' self.words.to_csv(path.with_suffix('.csv')) with path.with_suffix('.json').open(mode='w') as f: f.write(json.dumps(self.structure))
-4,607,551,903,324,488,700
Saves words to a .csv file and structure to a .json.
deutscheflash.py
save
n-Holmes/deutscheflash
python
def save(self, path: pathlib.Path): self.words.to_csv(path.with_suffix('.csv')) with path.with_suffix('.json').open(mode='w') as f: f.write(json.dumps(self.structure))
def format_gender(self, gender_string: str): 'Attempts to find a matching gender for gender_string.\n \n Args:\n gender_string (str): A gender for the word list or an alias of a gender.\n \n Returns:\n The associated gender.\n \n Raises:\n ValueError: `gender_string` does not match any gender or alias.\n ' gender_string = gender_string.lower() if (gender_string in self.structure['genders']): return gender_string if (gender_string in self.structure['aliases']): return self.structure['aliases'][gender_string] raise ValueError(f'Unknown gender: {gender_string}')
-3,906,294,576,666,651,000
Attempts to find a matching gender for gender_string. Args: gender_string (str): A gender for the word list or an alias of a gender. Returns: The associated gender. Raises: ValueError: `gender_string` does not match any gender or alias.
deutscheflash.py
format_gender
n-Holmes/deutscheflash
python
def format_gender(self, gender_string: str): 'Attempts to find a matching gender for gender_string.\n \n Args:\n gender_string (str): A gender for the word list or an alias of a gender.\n \n Returns:\n The associated gender.\n \n Raises:\n ValueError: `gender_string` does not match any gender or alias.\n ' gender_string = gender_string.lower() if (gender_string in self.structure['genders']): return gender_string if (gender_string in self.structure['aliases']): return self.structure['aliases'][gender_string] raise ValueError(f'Unknown gender: {gender_string}')
def add(self, gender: str, word: str): 'Add a new word to the list.\n \n Args:\n gender (str): The gender of the word being added.\n word (str): The word to add.\n \n Raises:\n ValueError: `gender` does not match the current wordlist or the word is\n already present in the list.\n ' gender = self.format_gender(gender) word = word.capitalize() if (gender not in self.structure['genders']): raise ValueError(f'{gender} is not a valid gender for the current wordlist.') if (word in self.words.index): raise ValueError(f'{word} is already included.') n_genders = len(self.structure['genders']) row = [gender, self.structure['default guesses'], (self.structure['default guesses'] * (n_genders - 1)), ((n_genders - 1) / n_genders)] self.words.loc[word] = row
8,154,714,252,581,393,000
Add a new word to the list. Args: gender (str): The gender of the word being added. word (str): The word to add. Raises: ValueError: `gender` does not match the current wordlist or the word is already present in the list.
deutscheflash.py
add
n-Holmes/deutscheflash
python
def add(self, gender: str, word: str): 'Add a new word to the list.\n \n Args:\n gender (str): The gender of the word being added.\n word (str): The word to add.\n \n Raises:\n ValueError: `gender` does not match the current wordlist or the word is\n already present in the list.\n ' gender = self.format_gender(gender) word = word.capitalize() if (gender not in self.structure['genders']): raise ValueError(f'{gender} is not a valid gender for the current wordlist.') if (word in self.words.index): raise ValueError(f'{word} is already included.') n_genders = len(self.structure['genders']) row = [gender, self.structure['default guesses'], (self.structure['default guesses'] * (n_genders - 1)), ((n_genders - 1) / n_genders)] self.words.loc[word] = row
def get_words(self, n: int, distribution: str='weighted'): 'Selects and returns a sample of words and their genders.\n\n Args:\n n (int): The number of results wanted.\n distribution (str): The sampling method to use. Either `uniform` or\n `weighted`.\n\n Yields:\n A tuple of strings in the format (word, gender).\n ' if (distribution == 'uniform'): sample = self.words.sample(n=n) elif (distribution == 'weighted'): sample = self.words.sample(n=n, weights='Weight') else: raise ValueError(f'Unknown value for distribution: {distribution}') for row in sample.iterrows(): (yield (row[0], row[1].Gender))
3,524,817,988,150,667,000
Selects and returns a sample of words and their genders. Args: n (int): The number of results wanted. distribution (str): The sampling method to use. Either `uniform` or `weighted`. Yields: A tuple of strings in the format (word, gender).
deutscheflash.py
get_words
n-Holmes/deutscheflash
python
def get_words(self, n: int, distribution: str='weighted'): 'Selects and returns a sample of words and their genders.\n\n Args:\n n (int): The number of results wanted.\n distribution (str): The sampling method to use. Either `uniform` or\n `weighted`.\n\n Yields:\n A tuple of strings in the format (word, gender).\n ' if (distribution == 'uniform'): sample = self.words.sample(n=n) elif (distribution == 'weighted'): sample = self.words.sample(n=n, weights='Weight') else: raise ValueError(f'Unknown value for distribution: {distribution}') for row in sample.iterrows(): (yield (row[0], row[1].Gender))
def update_weight(self, word, guess): 'Update the weighting on a word based on the most recent guess.\n \n Args:\n word (str): The word to update. Should be in the index of self.words.\n guess (bool): Whether the guess was correct or not.\n ' row = self.words.loc[word] if guess: row.Correct += 1 else: row.Wrong += 1 n_genders = len(self.structure['genders']) total = (row.Correct + row.Wrong) if (not (total % n_genders)): if row.Correct: wrongs_to_throw = min((row.Wrong - 1), (n_genders - 1)) row.Wrong -= wrongs_to_throw row.Correct -= (n_genders - wrongs_to_throw) else: row.wrong -= n_genders row.Weight = (row.Wrong / (row.Correct + row.Wrong)) self.words.loc[word] = row
-5,768,436,137,946,433,000
Update the weighting on a word based on the most recent guess. Args: word (str): The word to update. Should be in the index of self.words. guess (bool): Whether the guess was correct or not.
deutscheflash.py
update_weight
n-Holmes/deutscheflash
python
def update_weight(self, word, guess): 'Update the weighting on a word based on the most recent guess.\n \n Args:\n word (str): The word to update. Should be in the index of self.words.\n guess (bool): Whether the guess was correct or not.\n ' row = self.words.loc[word] if guess: row.Correct += 1 else: row.Wrong += 1 n_genders = len(self.structure['genders']) total = (row.Correct + row.Wrong) if (not (total % n_genders)): if row.Correct: wrongs_to_throw = min((row.Wrong - 1), (n_genders - 1)) row.Wrong -= wrongs_to_throw row.Correct -= (n_genders - wrongs_to_throw) else: row.wrong -= n_genders row.Weight = (row.Wrong / (row.Correct + row.Wrong)) self.words.loc[word] = row
@staticmethod def _get_aliases(genders: dict): 'Create a dictionary of aliases and the genders they refer to.\n May have issues if multiple genders have the same article or first letter.\n ' aliases = {} for (gender, article) in genders.items(): aliases[gender[0]] = gender aliases[article] = gender return aliases
-1,152,857,438,026,829,700
Create a dictionary of aliases and the genders they refer to. May have issues if multiple genders have the same article or first letter.
deutscheflash.py
_get_aliases
n-Holmes/deutscheflash
python
@staticmethod def _get_aliases(genders: dict): 'Create a dictionary of aliases and the genders they refer to.\n May have issues if multiple genders have the same article or first letter.\n ' aliases = {} for (gender, article) in genders.items(): aliases[gender[0]] = gender aliases[article] = gender return aliases
def check_files(test_dir, expected): '\n Walk test_dir.\n Check that all dirs are readable.\n Check that all files are:\n * non-special,\n * readable,\n * have a posix path that ends with one of the expected tuple paths.\n ' result = [] locs = [] if filetype.is_file(test_dir): test_dir = fileutils.parent_directory(test_dir) test_dir_path = fileutils.as_posixpath(test_dir) for (top, _, files) in os.walk(test_dir): for f in files: location = os.path.join(top, f) locs.append(location) path = fileutils.as_posixpath(location) path = path.replace(test_dir_path, '').strip('/') result.append(path) assert (sorted(expected) == sorted(result)) for location in locs: assert filetype.is_file(location) assert (not filetype.is_special(location)) assert filetype.is_readable(location)
-2,608,846,497,619,735,000
Walk test_dir. Check that all dirs are readable. Check that all files are: * non-special, * readable, * have a posix path that ends with one of the expected tuple paths.
tests/extractcode/extractcode_assert_utils.py
check_files
adityaviki/scancode-toolk
python
def check_files(test_dir, expected): '\n Walk test_dir.\n Check that all dirs are readable.\n Check that all files are:\n * non-special,\n * readable,\n * have a posix path that ends with one of the expected tuple paths.\n ' result = [] locs = [] if filetype.is_file(test_dir): test_dir = fileutils.parent_directory(test_dir) test_dir_path = fileutils.as_posixpath(test_dir) for (top, _, files) in os.walk(test_dir): for f in files: location = os.path.join(top, f) locs.append(location) path = fileutils.as_posixpath(location) path = path.replace(test_dir_path, ).strip('/') result.append(path) assert (sorted(expected) == sorted(result)) for location in locs: assert filetype.is_file(location) assert (not filetype.is_special(location)) assert filetype.is_readable(location)
def check_no_error(result): '\n Check that every ExtractEvent in the `result` list has no error or warning.\n ' for r in result: assert (not r.errors) assert (not r.warnings)
4,965,643,873,960,140,000
Check that every ExtractEvent in the `result` list has no error or warning.
tests/extractcode/extractcode_assert_utils.py
check_no_error
adityaviki/scancode-toolk
python
def check_no_error(result): '\n \n ' for r in result: assert (not r.errors) assert (not r.warnings)
def is_posixpath(location): '\n Return True if the `location` path is likely a POSIX-like path using POSIX path\n separators (slash or "/")or has no path separator.\n\n Return False if the `location` path is likely a Windows-like path using backslash\n as path separators (e.g. "").\n ' has_slashes = ('/' in location) has_backslashes = ('\\' in location) if location: (drive, _) = ntpath.splitdrive(location) if drive: return False is_posix = True if (has_backslashes and (not has_slashes)): is_posix = False return is_posix
8,070,831,654,675,916,000
Return True if the `location` path is likely a POSIX-like path using POSIX path separators (slash or "/")or has no path separator. Return False if the `location` path is likely a Windows-like path using backslash as path separators (e.g. "").
tests/extractcode/extractcode_assert_utils.py
is_posixpath
adityaviki/scancode-toolk
python
def is_posixpath(location): '\n Return True if the `location` path is likely a POSIX-like path using POSIX path\n separators (slash or "/")or has no path separator.\n\n Return False if the `location` path is likely a Windows-like path using backslash\n as path separators (e.g. ).\n ' has_slashes = ('/' in location) has_backslashes = ('\\' in location) if location: (drive, _) = ntpath.splitdrive(location) if drive: return False is_posix = True if (has_backslashes and (not has_slashes)): is_posix = False return is_posix
def to_posix(path): '\n Return a path using the posix path separator given a path that may contain posix\n or windows separators, converting \\ to /. NB: this path will still be valid in\n the windows explorer (except as a UNC or share name). It will be a valid path\n everywhere in Python. It will not be valid for windows command line operations.\n ' is_unicode = isinstance(path, compat.unicode) ntpath_sep = ((is_unicode and u'\\') or '\\') posixpath_sep = ((is_unicode and u'/') or '/') if is_posixpath(path): if on_windows: return path.replace(ntpath_sep, posixpath_sep) else: return path return path.replace(ntpath_sep, posixpath_sep)
7,799,554,777,917,881,000
Return a path using the posix path separator given a path that may contain posix or windows separators, converting \ to /. NB: this path will still be valid in the windows explorer (except as a UNC or share name). It will be a valid path everywhere in Python. It will not be valid for windows command line operations.
tests/extractcode/extractcode_assert_utils.py
to_posix
adityaviki/scancode-toolk
python
def to_posix(path): '\n Return a path using the posix path separator given a path that may contain posix\n or windows separators, converting \\ to /. NB: this path will still be valid in\n the windows explorer (except as a UNC or share name). It will be a valid path\n everywhere in Python. It will not be valid for windows command line operations.\n ' is_unicode = isinstance(path, compat.unicode) ntpath_sep = ((is_unicode and u'\\') or '\\') posixpath_sep = ((is_unicode and u'/') or '/') if is_posixpath(path): if on_windows: return path.replace(ntpath_sep, posixpath_sep) else: return path return path.replace(ntpath_sep, posixpath_sep)
def assertRaisesInstance(self, excInstance, callableObj, *args, **kwargs): '\n This assertion accepts an instance instead of a class for refined\n exception testing.\n ' kwargs = (kwargs or {}) excClass = excInstance.__class__ try: callableObj(*args, **kwargs) except excClass as e: assert str(e).startswith(str(excInstance)) else: if hasattr(excClass, '__name__'): excName = excClass.__name__ else: excName = str(excClass) raise self.failureException(('%s not raised' % excName))
-8,746,952,931,495,039,000
This assertion accepts an instance instead of a class for refined exception testing.
tests/extractcode/extractcode_assert_utils.py
assertRaisesInstance
adityaviki/scancode-toolk
python
def assertRaisesInstance(self, excInstance, callableObj, *args, **kwargs): '\n This assertion accepts an instance instead of a class for refined\n exception testing.\n ' kwargs = (kwargs or {}) excClass = excInstance.__class__ try: callableObj(*args, **kwargs) except excClass as e: assert str(e).startswith(str(excInstance)) else: if hasattr(excClass, '__name__'): excName = excClass.__name__ else: excName = str(excClass) raise self.failureException(('%s not raised' % excName))
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False): '\n Run the extraction `test_function` on `test_file` checking that a map of\n expected paths --> size exist in the extracted target directory.\n Does not test the presence of all files unless `check_all` is True.\n ' from extractcode import archive test_file = self.get_test_loc(test_file) test_dir = self.get_temp_dir() warnings = test_function(test_file, test_dir) if (expected_warnings is not None): assert (expected_warnings == warnings) if check_all: len_test_dir = len(test_dir) extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)} expected = {os.path.join(test_dir, exp_path): exp_size for (exp_path, exp_size) in expected.items()} assert (sorted(expected.items()) == sorted(extracted.items())) else: for (exp_path, exp_size) in expected.items(): exp_loc = os.path.join(test_dir, exp_path) msg = 'When extracting: %(test_file)s\n With function: %(test_function)r\n Failed to find expected path: %(exp_loc)s' assert os.path.exists(exp_loc), (msg % locals()) if (exp_size is not None): res_size = os.stat(exp_loc).st_size msg = 'When extracting: %(test_file)s\n With function: %(test_function)r\n Failed to assert the correct size %(exp_size)d\n Got instead: %(res_size)d\n for expected path: %(exp_loc)s' assert (exp_size == res_size), (msg % locals())
-4,760,245,005,146,296,000
Run the extraction `test_function` on `test_file` checking that a map of expected paths --> size exist in the extracted target directory. Does not test the presence of all files unless `check_all` is True.
tests/extractcode/extractcode_assert_utils.py
check_extract
adityaviki/scancode-toolk
python
def check_extract(self, test_function, test_file, expected, expected_warnings=None, check_all=False): '\n Run the extraction `test_function` on `test_file` checking that a map of\n expected paths --> size exist in the extracted target directory.\n Does not test the presence of all files unless `check_all` is True.\n ' from extractcode import archive test_file = self.get_test_loc(test_file) test_dir = self.get_temp_dir() warnings = test_function(test_file, test_dir) if (expected_warnings is not None): assert (expected_warnings == warnings) if check_all: len_test_dir = len(test_dir) extracted = {path[len_test_dir:]: filetype.get_size(path) for path in fileutils.resource_iter(test_dir, with_dirs=False)} expected = {os.path.join(test_dir, exp_path): exp_size for (exp_path, exp_size) in expected.items()} assert (sorted(expected.items()) == sorted(extracted.items())) else: for (exp_path, exp_size) in expected.items(): exp_loc = os.path.join(test_dir, exp_path) msg = 'When extracting: %(test_file)s\n With function: %(test_function)r\n Failed to find expected path: %(exp_loc)s' assert os.path.exists(exp_loc), (msg % locals()) if (exp_size is not None): res_size = os.stat(exp_loc).st_size msg = 'When extracting: %(test_file)s\n With function: %(test_function)r\n Failed to assert the correct size %(exp_size)d\n Got instead: %(res_size)d\n for expected path: %(exp_loc)s' assert (exp_size == res_size), (msg % locals())
def helperUpdate(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n Test `zstash update`.\n ' self.hpss_path = hpss_path use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path) print_starred('Running update on the newly created directory, nothing should happen') self.assertWorkspace() os.chdir(self.test_dir) cmd = '{}zstash update -v --hpss={}'.format(zstash_path, self.hpss_path) (output, err) = run_cmd(cmd) os.chdir(TOP_LEVEL) self.check_strings(cmd, (output + err), ['Nothing to update'], ['ERROR'])
4,622,368,254,584,358,000
Test `zstash update`.
tests/test_update.py
helperUpdate
E3SM-Project/zstash
python
def helperUpdate(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n \n ' self.hpss_path = hpss_path use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path) print_starred('Running update on the newly created directory, nothing should happen') self.assertWorkspace() os.chdir(self.test_dir) cmd = '{}zstash update -v --hpss={}'.format(zstash_path, self.hpss_path) (output, err) = run_cmd(cmd) os.chdir(TOP_LEVEL) self.check_strings(cmd, (output + err), ['Nothing to update'], ['ERROR'])
def helperUpdateDryRun(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n Test `zstash update --dry-run`.\n ' self.hpss_path = hpss_path use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path) print_starred('Testing update with an actual change') self.assertWorkspace() if (not os.path.exists('{}/dir2'.format(self.test_dir))): os.mkdir('{}/dir2'.format(self.test_dir)) write_file('{}/dir2/file2.txt'.format(self.test_dir), 'file2 stuff') write_file('{}/dir/file1.txt'.format(self.test_dir), 'file1 stuff with changes') os.chdir(self.test_dir) cmd = '{}zstash update --dry-run --hpss={}'.format(zstash_path, self.hpss_path) (output, err) = run_cmd(cmd) os.chdir(TOP_LEVEL) expected_present = ['List of files to be updated', 'dir/file1.txt', 'dir2/file2.txt'] expected_absent = ['ERROR', 'file0', 'file_empty', 'empty_dir', 'INFO: Creating new tar archive'] self.check_strings(cmd, (output + err), expected_present, expected_absent)
-5,544,989,246,331,524,000
Test `zstash update --dry-run`.
tests/test_update.py
helperUpdateDryRun
E3SM-Project/zstash
python
def helperUpdateDryRun(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n \n ' self.hpss_path = hpss_path use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path) print_starred('Testing update with an actual change') self.assertWorkspace() if (not os.path.exists('{}/dir2'.format(self.test_dir))): os.mkdir('{}/dir2'.format(self.test_dir)) write_file('{}/dir2/file2.txt'.format(self.test_dir), 'file2 stuff') write_file('{}/dir/file1.txt'.format(self.test_dir), 'file1 stuff with changes') os.chdir(self.test_dir) cmd = '{}zstash update --dry-run --hpss={}'.format(zstash_path, self.hpss_path) (output, err) = run_cmd(cmd) os.chdir(TOP_LEVEL) expected_present = ['List of files to be updated', 'dir/file1.txt', 'dir2/file2.txt'] expected_absent = ['ERROR', 'file0', 'file_empty', 'empty_dir', 'INFO: Creating new tar archive'] self.check_strings(cmd, (output + err), expected_present, expected_absent)
def helperUpdateKeep(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n Test `zstash update --keep`.\n ' self.hpss_path = hpss_path use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path) self.add_files(use_hpss, zstash_path, keep=True) files = os.listdir('{}/{}'.format(self.test_dir, self.cache)) if use_hpss: expected_files = ['index.db', '000003.tar', '000004.tar', '000001.tar', '000002.tar'] else: expected_files = ['index.db', '000003.tar', '000004.tar', '000000.tar', '000001.tar', '000002.tar'] if (not compare(files, expected_files)): error_message = 'The zstash cache does not contain expected files.\nIt has: {}'.format(files) self.stop(error_message) os.chdir(TOP_LEVEL)
-7,607,693,718,521,320,000
Test `zstash update --keep`.
tests/test_update.py
helperUpdateKeep
E3SM-Project/zstash
python
def helperUpdateKeep(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n \n ' self.hpss_path = hpss_path use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path) self.add_files(use_hpss, zstash_path, keep=True) files = os.listdir('{}/{}'.format(self.test_dir, self.cache)) if use_hpss: expected_files = ['index.db', '000003.tar', '000004.tar', '000001.tar', '000002.tar'] else: expected_files = ['index.db', '000003.tar', '000004.tar', '000000.tar', '000001.tar', '000002.tar'] if (not compare(files, expected_files)): error_message = 'The zstash cache does not contain expected files.\nIt has: {}'.format(files) self.stop(error_message) os.chdir(TOP_LEVEL)
def helperUpdateCache(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n Test `zstash update --cache`.\n ' self.hpss_path = hpss_path self.cache = 'my_cache' use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path, cache=self.cache) self.add_files(use_hpss, zstash_path, cache=self.cache) files = os.listdir('{}/{}'.format(self.test_dir, self.cache)) if use_hpss: expected_files = ['index.db'] else: expected_files = ['index.db', '000003.tar', '000004.tar', '000000.tar', '000001.tar', '000002.tar'] if (not compare(files, expected_files)): error_message = 'The zstash cache does not contain expected files.\nIt has: {}'.format(files) self.stop(error_message)
3,580,585,394,761,558,500
Test `zstash update --cache`.
tests/test_update.py
helperUpdateCache
E3SM-Project/zstash
python
def helperUpdateCache(self, test_name, hpss_path, zstash_path=ZSTASH_PATH): '\n \n ' self.hpss_path = hpss_path self.cache = 'my_cache' use_hpss = self.setupDirs(test_name) self.create(use_hpss, zstash_path, cache=self.cache) self.add_files(use_hpss, zstash_path, cache=self.cache) files = os.listdir('{}/{}'.format(self.test_dir, self.cache)) if use_hpss: expected_files = ['index.db'] else: expected_files = ['index.db', '000003.tar', '000004.tar', '000000.tar', '000001.tar', '000002.tar'] if (not compare(files, expected_files)): error_message = 'The zstash cache does not contain expected files.\nIt has: {}'.format(files) self.stop(error_message)
def run(self, video_path=0, start_frame=0, conf_thresh=0.6): ' Runs the test on a video (or webcam)\n \n # Arguments\n video_path: A file path to a video to be tested on. Can also be a number, \n in which case the webcam with the same number (i.e. 0) is \n used instead\n \n start_frame: The number of the first frame of the video to be processed\n by the network. \n \n conf_thresh: Threshold of confidence. Any boxes with lower confidence \n are not visualized.\n \n ' vid = cv2.VideoCapture(video_path) if (not vid.isOpened()): raise IOError("Couldn't open video file or webcam. If you're trying to open a webcam, make sure you video_path is an integer!") vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH) vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT) vidar = (vidw / vidh) if (start_frame > 0): vid.set(cv2.CAP_PROP_POS_MSEC, start_frame) accum_time = 0 curr_fps = 0 fps = 'FPS: ??' prev_time = timer() while True: (retval, orig_image) = vid.read() if (not retval): print('Done!') return im_size = (self.input_shape[0], self.input_shape[1]) resized = cv2.resize(orig_image, im_size) rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB) to_draw = cv2.resize(resized, (int((self.input_shape[0] * vidar)), self.input_shape[1])) inputs = [image.img_to_array(rgb)] tmp_inp = np.array(inputs) x = preprocess_input(tmp_inp) y = self.model.predict(x) results = self.bbox_util.detection_out(y) if ((len(results) > 0) and (len(results[0]) > 0)): det_label = results[0][:, 0] det_conf = results[0][:, 1] det_xmin = results[0][:, 2] det_ymin = results[0][:, 3] det_xmax = results[0][:, 4] det_ymax = results[0][:, 5] top_indices = [i for (i, conf) in enumerate(det_conf) if (conf >= conf_thresh)] top_conf = det_conf[top_indices] top_label_indices = det_label[top_indices].tolist() top_xmin = det_xmin[top_indices] top_ymin = det_ymin[top_indices] top_xmax = det_xmax[top_indices] top_ymax = det_ymax[top_indices] for i in range(top_conf.shape[0]): xmin = int(round((top_xmin[i] * to_draw.shape[1]))) ymin = int(round((top_ymin[i] * to_draw.shape[0]))) xmax = int(round((top_xmax[i] * to_draw.shape[1]))) ymax = int(round((top_ymax[i] * to_draw.shape[0]))) class_num = int(top_label_indices[i]) cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax), self.class_colors[class_num], 2) text = ((self.class_names[class_num] + ' ') + ('%.2f' % top_conf[i])) text_top = (xmin, (ymin - 10)) text_bot = ((xmin + 80), (ymin + 5)) text_pos = ((xmin + 5), ymin) cv2.rectangle(to_draw, text_top, text_bot, self.class_colors[class_num], (- 1)) cv2.putText(to_draw, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1) curr_time = timer() exec_time = (curr_time - prev_time) prev_time = curr_time accum_time = (accum_time + exec_time) curr_fps = (curr_fps + 1) if (accum_time > 1): accum_time = (accum_time - 1) fps = ('FPS: ' + str(curr_fps)) curr_fps = 0 cv2.rectangle(to_draw, (0, 0), (50, 17), (255, 255, 255), (- 1)) cv2.putText(to_draw, fps, (3, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1) cv2.imshow('SSD result', to_draw) cv2.waitKey(10)
-1,364,219,550,252,942,600
Runs the test on a video (or webcam) # Arguments video_path: A file path to a video to be tested on. Can also be a number, in which case the webcam with the same number (i.e. 0) is used instead start_frame: The number of the first frame of the video to be processed by the network. conf_thresh: Threshold of confidence. Any boxes with lower confidence are not visualized.
testing_utils/videotest.py
run
hanhejia/SSD
python
def run(self, video_path=0, start_frame=0, conf_thresh=0.6): ' Runs the test on a video (or webcam)\n \n # Arguments\n video_path: A file path to a video to be tested on. Can also be a number, \n in which case the webcam with the same number (i.e. 0) is \n used instead\n \n start_frame: The number of the first frame of the video to be processed\n by the network. \n \n conf_thresh: Threshold of confidence. Any boxes with lower confidence \n are not visualized.\n \n ' vid = cv2.VideoCapture(video_path) if (not vid.isOpened()): raise IOError("Couldn't open video file or webcam. If you're trying to open a webcam, make sure you video_path is an integer!") vidw = vid.get(cv2.CAP_PROP_FRAME_WIDTH) vidh = vid.get(cv2.CAP_PROP_FRAME_HEIGHT) vidar = (vidw / vidh) if (start_frame > 0): vid.set(cv2.CAP_PROP_POS_MSEC, start_frame) accum_time = 0 curr_fps = 0 fps = 'FPS: ??' prev_time = timer() while True: (retval, orig_image) = vid.read() if (not retval): print('Done!') return im_size = (self.input_shape[0], self.input_shape[1]) resized = cv2.resize(orig_image, im_size) rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB) to_draw = cv2.resize(resized, (int((self.input_shape[0] * vidar)), self.input_shape[1])) inputs = [image.img_to_array(rgb)] tmp_inp = np.array(inputs) x = preprocess_input(tmp_inp) y = self.model.predict(x) results = self.bbox_util.detection_out(y) if ((len(results) > 0) and (len(results[0]) > 0)): det_label = results[0][:, 0] det_conf = results[0][:, 1] det_xmin = results[0][:, 2] det_ymin = results[0][:, 3] det_xmax = results[0][:, 4] det_ymax = results[0][:, 5] top_indices = [i for (i, conf) in enumerate(det_conf) if (conf >= conf_thresh)] top_conf = det_conf[top_indices] top_label_indices = det_label[top_indices].tolist() top_xmin = det_xmin[top_indices] top_ymin = det_ymin[top_indices] top_xmax = det_xmax[top_indices] top_ymax = det_ymax[top_indices] for i in range(top_conf.shape[0]): xmin = int(round((top_xmin[i] * to_draw.shape[1]))) ymin = int(round((top_ymin[i] * to_draw.shape[0]))) xmax = int(round((top_xmax[i] * to_draw.shape[1]))) ymax = int(round((top_ymax[i] * to_draw.shape[0]))) class_num = int(top_label_indices[i]) cv2.rectangle(to_draw, (xmin, ymin), (xmax, ymax), self.class_colors[class_num], 2) text = ((self.class_names[class_num] + ' ') + ('%.2f' % top_conf[i])) text_top = (xmin, (ymin - 10)) text_bot = ((xmin + 80), (ymin + 5)) text_pos = ((xmin + 5), ymin) cv2.rectangle(to_draw, text_top, text_bot, self.class_colors[class_num], (- 1)) cv2.putText(to_draw, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1) curr_time = timer() exec_time = (curr_time - prev_time) prev_time = curr_time accum_time = (accum_time + exec_time) curr_fps = (curr_fps + 1) if (accum_time > 1): accum_time = (accum_time - 1) fps = ('FPS: ' + str(curr_fps)) curr_fps = 0 cv2.rectangle(to_draw, (0, 0), (50, 17), (255, 255, 255), (- 1)) cv2.putText(to_draw, fps, (3, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 0), 1) cv2.imshow('SSD result', to_draw) cv2.waitKey(10)
def navier_stokes_rk(tableau: ButcherTableau, equation: ExplicitNavierStokesODE, time_step: float) -> TimeStepFn: 'Create a forward Runge-Kutta time-stepper for incompressible Navier-Stokes.\n\n This function implements the reference method (equations 16-21), rather than\n the fast projection method, from:\n "Fast-Projection Methods for the Incompressible Navier–Stokes Equations"\n Fluids 2020, 5, 222; doi:10.3390/fluids5040222\n\n Args:\n tableau: Butcher tableau.\n equation: equation to use.\n time_step: overall time-step size.\n\n Returns:\n Function that advances one time-step forward.\n ' dt = time_step F = tree_math.pytree_to_vector_fun(equation.explicit_terms) P = tree_math.pytree_to_vector_fun(equation.pressure_projection) a = tableau.a b = tableau.b num_steps = len(b) @tree_math.vector_to_pytree_fun def step_fn(u0): u = ([None] * num_steps) k = ([None] * num_steps) u[0] = u0 k[0] = F(u0) for i in range(1, num_steps): u_star = (u0 + (dt * sum(((a[(i - 1)][j] * k[j]) for j in range(i) if a[(i - 1)][j])))) u[i] = P(u_star) k[i] = F(u[i]) u_star = (u0 + (dt * sum(((b[j] * k[j]) for j in range(num_steps) if b[j])))) u_final = P(u_star) return u_final return step_fn
4,482,756,570,041,704,000
Create a forward Runge-Kutta time-stepper for incompressible Navier-Stokes. This function implements the reference method (equations 16-21), rather than the fast projection method, from: "Fast-Projection Methods for the Incompressible Navier–Stokes Equations" Fluids 2020, 5, 222; doi:10.3390/fluids5040222 Args: tableau: Butcher tableau. equation: equation to use. time_step: overall time-step size. Returns: Function that advances one time-step forward.
jax_cfd/base/time_stepping.py
navier_stokes_rk
google/jax-cfd
python
def navier_stokes_rk(tableau: ButcherTableau, equation: ExplicitNavierStokesODE, time_step: float) -> TimeStepFn: 'Create a forward Runge-Kutta time-stepper for incompressible Navier-Stokes.\n\n This function implements the reference method (equations 16-21), rather than\n the fast projection method, from:\n "Fast-Projection Methods for the Incompressible Navier–Stokes Equations"\n Fluids 2020, 5, 222; doi:10.3390/fluids5040222\n\n Args:\n tableau: Butcher tableau.\n equation: equation to use.\n time_step: overall time-step size.\n\n Returns:\n Function that advances one time-step forward.\n ' dt = time_step F = tree_math.pytree_to_vector_fun(equation.explicit_terms) P = tree_math.pytree_to_vector_fun(equation.pressure_projection) a = tableau.a b = tableau.b num_steps = len(b) @tree_math.vector_to_pytree_fun def step_fn(u0): u = ([None] * num_steps) k = ([None] * num_steps) u[0] = u0 k[0] = F(u0) for i in range(1, num_steps): u_star = (u0 + (dt * sum(((a[(i - 1)][j] * k[j]) for j in range(i) if a[(i - 1)][j])))) u[i] = P(u_star) k[i] = F(u[i]) u_star = (u0 + (dt * sum(((b[j] * k[j]) for j in range(num_steps) if b[j])))) u_final = P(u_star) return u_final return step_fn
def explicit_terms(self, state): 'Explicitly evaluate the ODE.' raise NotImplementedError
-551,977,913,440,895,400
Explicitly evaluate the ODE.
jax_cfd/base/time_stepping.py
explicit_terms
google/jax-cfd
python
def explicit_terms(self, state): raise NotImplementedError
def pressure_projection(self, state): 'Enforce the incompressibility constraint.' raise NotImplementedError
1,062,503,044,627,755,400
Enforce the incompressibility constraint.
jax_cfd/base/time_stepping.py
pressure_projection
google/jax-cfd
python
def pressure_projection(self, state): raise NotImplementedError
def _check_fc_port_and_init(self, wwns, hostid, fabric_map, nsinfos): 'Check FC port on array and wwn on host is connected to switch.\n\n If no FC port on array is connected to switch or no ini on host is\n connected to switch, raise a error.\n ' if (not fabric_map): msg = _('No FC port on array is connected to switch.') LOG.error(msg) raise exception.CinderException(msg) no_wwn_connected_to_switch = True for wwn in wwns: formatted_initiator = fczm_utils.get_formatted_wwn(wwn) for fabric in fabric_map: nsinfo = nsinfos[fabric] if (formatted_initiator in nsinfo): no_wwn_connected_to_switch = False self.client.ensure_fc_initiator_added(wwn, hostid) break if no_wwn_connected_to_switch: msg = _('No wwn on host is connected to switch.') LOG.error(msg) raise exception.CinderException(msg)
7,124,849,976,200,785,000
Check FC port on array and wwn on host is connected to switch. If no FC port on array is connected to switch or no ini on host is connected to switch, raise a error.
Cinder/Mitaka/extend/fc_zone_helper.py
_check_fc_port_and_init
Huawei/OpenStack_Driver
python
def _check_fc_port_and_init(self, wwns, hostid, fabric_map, nsinfos): 'Check FC port on array and wwn on host is connected to switch.\n\n If no FC port on array is connected to switch or no ini on host is\n connected to switch, raise a error.\n ' if (not fabric_map): msg = _('No FC port on array is connected to switch.') LOG.error(msg) raise exception.CinderException(msg) no_wwn_connected_to_switch = True for wwn in wwns: formatted_initiator = fczm_utils.get_formatted_wwn(wwn) for fabric in fabric_map: nsinfo = nsinfos[fabric] if (formatted_initiator in nsinfo): no_wwn_connected_to_switch = False self.client.ensure_fc_initiator_added(wwn, hostid) break if no_wwn_connected_to_switch: msg = _('No wwn on host is connected to switch.') LOG.error(msg) raise exception.CinderException(msg)
def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos, cfgmap_from_fabrics, fabric_maps): 'Get on FC port per one controller.\n\n task flow:\n 1. Get all the FC port from the array.\n 2. Filter out ports belonged to the specific controller\n and the status is connected.\n 3. Filter out ports connected to the fabric configured in cinder.conf.\n 4. Get active zones set from switch.\n 5. Find a port according to three cases.\n ' LOG.info(_LI('Get in function _get_one_fc_port_for_zone. Initiator: %s'), initiator) formatted_initiator = fczm_utils.get_formatted_wwn(initiator) fabric_map = fabric_maps[contr] if (not fabric_map): return (None, False) port_zone_number_map = {} for fabric in fabric_map: LOG.info(_LI('Dealing with fabric: %s'), fabric) nsinfo = nsinfos[fabric] if (formatted_initiator not in nsinfo): continue final_port_list_per_fabric = fabric_map[fabric] cfgmap_from_fabric = cfgmap_from_fabrics[fabric] zones_members = cfgmap_from_fabric['zones'].values() for port in final_port_list_per_fabric: port_zone_number_map[port] = 0 formatted_port = fczm_utils.get_formatted_wwn(port) for zones_member in zones_members: if (formatted_port in zones_member): if (formatted_initiator in zones_member): return (port, False) port_zone_number_map[port] += 1 if (port_zone_number_map == {}): return (None, False) temp_list = [] temp_list = sorted(port_zone_number_map.items(), key=(lambda d: d[1])) return (temp_list[0][0], True)
3,187,681,805,082,868,000
Get on FC port per one controller. task flow: 1. Get all the FC port from the array. 2. Filter out ports belonged to the specific controller and the status is connected. 3. Filter out ports connected to the fabric configured in cinder.conf. 4. Get active zones set from switch. 5. Find a port according to three cases.
Cinder/Mitaka/extend/fc_zone_helper.py
_get_one_fc_port_for_zone
Huawei/OpenStack_Driver
python
def _get_one_fc_port_for_zone(self, initiator, contr, nsinfos, cfgmap_from_fabrics, fabric_maps): 'Get on FC port per one controller.\n\n task flow:\n 1. Get all the FC port from the array.\n 2. Filter out ports belonged to the specific controller\n and the status is connected.\n 3. Filter out ports connected to the fabric configured in cinder.conf.\n 4. Get active zones set from switch.\n 5. Find a port according to three cases.\n ' LOG.info(_LI('Get in function _get_one_fc_port_for_zone. Initiator: %s'), initiator) formatted_initiator = fczm_utils.get_formatted_wwn(initiator) fabric_map = fabric_maps[contr] if (not fabric_map): return (None, False) port_zone_number_map = {} for fabric in fabric_map: LOG.info(_LI('Dealing with fabric: %s'), fabric) nsinfo = nsinfos[fabric] if (formatted_initiator not in nsinfo): continue final_port_list_per_fabric = fabric_map[fabric] cfgmap_from_fabric = cfgmap_from_fabrics[fabric] zones_members = cfgmap_from_fabric['zones'].values() for port in final_port_list_per_fabric: port_zone_number_map[port] = 0 formatted_port = fczm_utils.get_formatted_wwn(port) for zones_member in zones_members: if (formatted_port in zones_member): if (formatted_initiator in zones_member): return (port, False) port_zone_number_map[port] += 1 if (port_zone_number_map == {}): return (None, False) temp_list = [] temp_list = sorted(port_zone_number_map.items(), key=(lambda d: d[1])) return (temp_list[0][0], True)
def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False): 'Setup the basic code to take a single timepoint from a timecourse experiment.\n\n Parameters:\n data_dir: directory where the data and metadata-files should be read/written.\n io_threads: number of threads to use to save image data out.\n loglevel: level from logging library at which to log information to the\n logfile in data_dir. (Subclasses can log information with self.logger)\n If not specified, fall back to the class attribute LOG_LEVEL. This\n allows a subclass to set a default log level, which still can be\n over-ridden from the command line.\n scope_host: IP address to connect to the scope server. If None, run without\n a scope server.\n dry_run: if True, do not write any files (including log files; log entries\n will be printed to the console).\n ' self.data_dir = pathlib.Path(data_dir).resolve() self.experiment_metadata_path = (self.data_dir / 'experiment_metadata.json') with self.experiment_metadata_path.open('r') as f: self.experiment_metadata = json.load(f) self.experiment_metadata['node'] = platform.node() self.positions = self.experiment_metadata['positions'] self.skip_positions = set() annotations = load_data.read_annotations(self.data_dir) for position in self.positions.keys(): if (position in annotations): (position_annotations, timepoint_annotations) = annotations[position] if position_annotations.get('exclude'): self.skip_positions.add(position) else: for annotation in timepoint_annotations.values(): if (annotation.get('stage') == 'dead'): self.skip_positions.add(position) break if (scope_host is not None): from .. import scope_client self.scope = scope_client.ScopeClient(scope_host) if hasattr(self.scope, 'camera'): self.scope.camera.return_to_default_state() else: self.scope = None self.write_files = (not dry_run) self.logger = log_util.get_logger(str(data_dir)) if (log_level is None): log_level = self.LOG_LEVEL elif isinstance(log_level, str): log_level = getattr(logging, log_level) self.logger.setLevel(log_level) if self.write_files: self.image_io = threaded_io.ThreadedIO(self.IO_THREADS, self.MAX_IO_JOBS) handler = logging.FileHandler(str((self.data_dir / 'acquisitions.log'))) else: self.image_io = DummyIO(self.logger) handler = logging.StreamHandler() handler.setFormatter(log_util.get_formatter()) self.logger.addHandler(handler) self._job_thread = None
3,253,398,444,944,715,300
Setup the basic code to take a single timepoint from a timecourse experiment. Parameters: data_dir: directory where the data and metadata-files should be read/written. io_threads: number of threads to use to save image data out. loglevel: level from logging library at which to log information to the logfile in data_dir. (Subclasses can log information with self.logger) If not specified, fall back to the class attribute LOG_LEVEL. This allows a subclass to set a default log level, which still can be over-ridden from the command line. scope_host: IP address to connect to the scope server. If None, run without a scope server. dry_run: if True, do not write any files (including log files; log entries will be printed to the console).
scope/timecourse/base_handler.py
__init__
drew-sinha/rpc-scope
python
def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False): 'Setup the basic code to take a single timepoint from a timecourse experiment.\n\n Parameters:\n data_dir: directory where the data and metadata-files should be read/written.\n io_threads: number of threads to use to save image data out.\n loglevel: level from logging library at which to log information to the\n logfile in data_dir. (Subclasses can log information with self.logger)\n If not specified, fall back to the class attribute LOG_LEVEL. This\n allows a subclass to set a default log level, which still can be\n over-ridden from the command line.\n scope_host: IP address to connect to the scope server. If None, run without\n a scope server.\n dry_run: if True, do not write any files (including log files; log entries\n will be printed to the console).\n ' self.data_dir = pathlib.Path(data_dir).resolve() self.experiment_metadata_path = (self.data_dir / 'experiment_metadata.json') with self.experiment_metadata_path.open('r') as f: self.experiment_metadata = json.load(f) self.experiment_metadata['node'] = platform.node() self.positions = self.experiment_metadata['positions'] self.skip_positions = set() annotations = load_data.read_annotations(self.data_dir) for position in self.positions.keys(): if (position in annotations): (position_annotations, timepoint_annotations) = annotations[position] if position_annotations.get('exclude'): self.skip_positions.add(position) else: for annotation in timepoint_annotations.values(): if (annotation.get('stage') == 'dead'): self.skip_positions.add(position) break if (scope_host is not None): from .. import scope_client self.scope = scope_client.ScopeClient(scope_host) if hasattr(self.scope, 'camera'): self.scope.camera.return_to_default_state() else: self.scope = None self.write_files = (not dry_run) self.logger = log_util.get_logger(str(data_dir)) if (log_level is None): log_level = self.LOG_LEVEL elif isinstance(log_level, str): log_level = getattr(logging, log_level) self.logger.setLevel(log_level) if self.write_files: self.image_io = threaded_io.ThreadedIO(self.IO_THREADS, self.MAX_IO_JOBS) handler = logging.FileHandler(str((self.data_dir / 'acquisitions.log'))) else: self.image_io = DummyIO(self.logger) handler = logging.StreamHandler() handler.setFormatter(log_util.get_formatter()) self.logger.addHandler(handler) self._job_thread = None
def add_background_job(self, function, *args, **kws): 'Add a function with parameters *args and **kws to a queue to be completed\n asynchronously with the rest of the timepoint acquisition. This will be\n run in a background thread, so make sure that the function acts in a\n threadsafe manner. (NB: self.logger *is* thread-safe.)\n\n All queued functions will be waited for completion before the timepoint\n ends. Any exceptions will be propagated to the foreground after all\n functions queued either finish or raise an exception.\n ' if (self._job_thread is None): self._job_thread = futures.ThreadPoolExecutor(max_workers=1) self._job_futures.append(self._job_thread.submit(function, *args, **kws))
2,761,595,359,836,609,000
Add a function with parameters *args and **kws to a queue to be completed asynchronously with the rest of the timepoint acquisition. This will be run in a background thread, so make sure that the function acts in a threadsafe manner. (NB: self.logger *is* thread-safe.) All queued functions will be waited for completion before the timepoint ends. Any exceptions will be propagated to the foreground after all functions queued either finish or raise an exception.
scope/timecourse/base_handler.py
add_background_job
drew-sinha/rpc-scope
python
def add_background_job(self, function, *args, **kws): 'Add a function with parameters *args and **kws to a queue to be completed\n asynchronously with the rest of the timepoint acquisition. This will be\n run in a background thread, so make sure that the function acts in a\n threadsafe manner. (NB: self.logger *is* thread-safe.)\n\n All queued functions will be waited for completion before the timepoint\n ends. Any exceptions will be propagated to the foreground after all\n functions queued either finish or raise an exception.\n ' if (self._job_thread is None): self._job_thread = futures.ThreadPoolExecutor(max_workers=1) self._job_futures.append(self._job_thread.submit(function, *args, **kws))
def run_position(self, position_name, position_coords): 'Do everything required for taking a timepoint at a single position\n EXCEPT focusing / image acquisition. This includes moving the stage to\n the right x,y position, loading and saving metadata, and saving image\n data, as generated by acquire_images()' timestamp = time.time() (position_dir, metadata_path, position_metadata) = self._position_metadata(position_name) position_dir.mkdir(exist_ok=True) if (self.scope is not None): with self.debug_timing('Stage positioning'): self.scope.stage.position = position_coords (images, image_names, new_metadata) = self.acquire_images(position_name, position_dir, position_metadata) new_metadata['timestamp'] = timestamp new_metadata['timepoint'] = self.timepoint_prefix position_metadata.append(new_metadata) self.finalize_acquisition(position_name, position_dir, position_metadata) image_paths = [(position_dir / ((self.timepoint_prefix + ' ') + name)) for name in image_names] if (new_metadata is None): new_metadata = {} if self.write_files: self.image_io.write(images, image_paths, self.IMAGE_COMPRESSION) self._write_atomic_json(metadata_path, position_metadata)
-4,009,590,888,470,571,500
Do everything required for taking a timepoint at a single position EXCEPT focusing / image acquisition. This includes moving the stage to the right x,y position, loading and saving metadata, and saving image data, as generated by acquire_images()
scope/timecourse/base_handler.py
run_position
drew-sinha/rpc-scope
python
def run_position(self, position_name, position_coords): 'Do everything required for taking a timepoint at a single position\n EXCEPT focusing / image acquisition. This includes moving the stage to\n the right x,y position, loading and saving metadata, and saving image\n data, as generated by acquire_images()' timestamp = time.time() (position_dir, metadata_path, position_metadata) = self._position_metadata(position_name) position_dir.mkdir(exist_ok=True) if (self.scope is not None): with self.debug_timing('Stage positioning'): self.scope.stage.position = position_coords (images, image_names, new_metadata) = self.acquire_images(position_name, position_dir, position_metadata) new_metadata['timestamp'] = timestamp new_metadata['timepoint'] = self.timepoint_prefix position_metadata.append(new_metadata) self.finalize_acquisition(position_name, position_dir, position_metadata) image_paths = [(position_dir / ((self.timepoint_prefix + ' ') + name)) for name in image_names] if (new_metadata is None): new_metadata = {} if self.write_files: self.image_io.write(images, image_paths, self.IMAGE_COMPRESSION) self._write_atomic_json(metadata_path, position_metadata)
def configure_timepoint(self): "Override this method with global configuration for the image acquisitions\n (e.g. camera configuration). Member variables 'scope', 'experiment_metadata',\n 'timepoint_prefix', and 'positions' may be specifically useful." pass
-2,345,780,675,447,022,000
Override this method with global configuration for the image acquisitions (e.g. camera configuration). Member variables 'scope', 'experiment_metadata', 'timepoint_prefix', and 'positions' may be specifically useful.
scope/timecourse/base_handler.py
configure_timepoint
drew-sinha/rpc-scope
python
def configure_timepoint(self): "Override this method with global configuration for the image acquisitions\n (e.g. camera configuration). Member variables 'scope', 'experiment_metadata',\n 'timepoint_prefix', and 'positions' may be specifically useful." pass
def finalize_timepoint(self): 'Override this method with global finalization after the images have been\n acquired for each position. Useful for altering the self.experiment_metadata\n dictionary before it is saved out.\n ' pass
6,943,338,214,605,275,000
Override this method with global finalization after the images have been acquired for each position. Useful for altering the self.experiment_metadata dictionary before it is saved out.
scope/timecourse/base_handler.py
finalize_timepoint
drew-sinha/rpc-scope
python
def finalize_timepoint(self): 'Override this method with global finalization after the images have been\n acquired for each position. Useful for altering the self.experiment_metadata\n dictionary before it is saved out.\n ' pass
def finalize_acquisition(self, position_name, position_dir, position_metadata): 'Called after acquiring images for a single postiion.\n\n Parameters:\n position_name: name of the position in the experiment metadata file.\n position_dir: pathlib.Path object representing the directory where\n position-specific data files and outputs are written. Useful for\n reading previous image data.\n position_metadata: list of all the stored position metadata from the\n previous timepoints, in chronological order. This includes data\n from the latest timepoint, accessible as: position_metadata[-1].\n ' pass
5,483,981,047,952,919,000
Called after acquiring images for a single postiion. Parameters: position_name: name of the position in the experiment metadata file. position_dir: pathlib.Path object representing the directory where position-specific data files and outputs are written. Useful for reading previous image data. position_metadata: list of all the stored position metadata from the previous timepoints, in chronological order. This includes data from the latest timepoint, accessible as: position_metadata[-1].
scope/timecourse/base_handler.py
finalize_acquisition
drew-sinha/rpc-scope
python
def finalize_acquisition(self, position_name, position_dir, position_metadata): 'Called after acquiring images for a single postiion.\n\n Parameters:\n position_name: name of the position in the experiment metadata file.\n position_dir: pathlib.Path object representing the directory where\n position-specific data files and outputs are written. Useful for\n reading previous image data.\n position_metadata: list of all the stored position metadata from the\n previous timepoints, in chronological order. This includes data\n from the latest timepoint, accessible as: position_metadata[-1].\n ' pass
def cleanup(self): 'Override this method with any global cleanup/finalization tasks\n that may be necessary.' pass
-4,469,802,585,313,322,000
Override this method with any global cleanup/finalization tasks that may be necessary.
scope/timecourse/base_handler.py
cleanup
drew-sinha/rpc-scope
python
def cleanup(self): 'Override this method with any global cleanup/finalization tasks\n that may be necessary.' pass
def get_next_run_time(self): 'Override this method to return when the next timepoint run should be\n scheduled. Returning None means no future runs will be scheduled.' return None
1,995,302,963,786,831,400
Override this method to return when the next timepoint run should be scheduled. Returning None means no future runs will be scheduled.
scope/timecourse/base_handler.py
get_next_run_time
drew-sinha/rpc-scope
python
def get_next_run_time(self): 'Override this method to return when the next timepoint run should be\n scheduled. Returning None means no future runs will be scheduled.' return None
def acquire_images(self, position_name, position_dir, position_metadata): "Override this method in a subclass to define the image-acquisition sequence.\n\n All most subclasses will need to do is return the following as a tuple:\n (images, image_names, new_metadata), where:\n images is a list of the acquired images\n image_names is a list of the generic names for each of these images\n (not timepoint- or position-specific; e.g. 'GFP.png' or some such)\n new_metadata is a dictionary of timepoint-specific information, such\n as the latest focal plane z-position or similar. This will be\n made available to future acquisition runs via the 'position_metadata'\n argument described below.\n\n The images and metadata will be written out by the superclass, and\n must not be written by the overriding subclass.\n\n Optionally, subclasses may choose to enter 'position_name' into the\n self.skip_positions set to indicate that in the future this position\n should not be acquired. (E.g. the worm is dead.)\n\n Parameters:\n position_name: identifier for this image-acquisition position. Useful\n for adding this position to the skip_positions set.\n position_dir: pathlib.Path object representing the directory where\n position-specific data files and outputs should be written. Useful\n only if additional data needs to be read in or out during\n acquisition. (E.g. a background model or similar.)\n position_metadata: list of all the stored position metadata from the\n previous timepoints, in chronological order. In particular, this\n dictionary is guaranteed to contain 'timestamp' which is the\n time.time() at which that acquisition was started. Other values\n (such as the latest focal plane) stored by previous acquisition\n runs will also be available. The most recent metadata will be in\n position_metadata[-1].\n " raise NotImplementedError()
6,055,405,891,119,240,000
Override this method in a subclass to define the image-acquisition sequence. All most subclasses will need to do is return the following as a tuple: (images, image_names, new_metadata), where: images is a list of the acquired images image_names is a list of the generic names for each of these images (not timepoint- or position-specific; e.g. 'GFP.png' or some such) new_metadata is a dictionary of timepoint-specific information, such as the latest focal plane z-position or similar. This will be made available to future acquisition runs via the 'position_metadata' argument described below. The images and metadata will be written out by the superclass, and must not be written by the overriding subclass. Optionally, subclasses may choose to enter 'position_name' into the self.skip_positions set to indicate that in the future this position should not be acquired. (E.g. the worm is dead.) Parameters: position_name: identifier for this image-acquisition position. Useful for adding this position to the skip_positions set. position_dir: pathlib.Path object representing the directory where position-specific data files and outputs should be written. Useful only if additional data needs to be read in or out during acquisition. (E.g. a background model or similar.) position_metadata: list of all the stored position metadata from the previous timepoints, in chronological order. In particular, this dictionary is guaranteed to contain 'timestamp' which is the time.time() at which that acquisition was started. Other values (such as the latest focal plane) stored by previous acquisition runs will also be available. The most recent metadata will be in position_metadata[-1].
scope/timecourse/base_handler.py
acquire_images
drew-sinha/rpc-scope
python
def acquire_images(self, position_name, position_dir, position_metadata): "Override this method in a subclass to define the image-acquisition sequence.\n\n All most subclasses will need to do is return the following as a tuple:\n (images, image_names, new_metadata), where:\n images is a list of the acquired images\n image_names is a list of the generic names for each of these images\n (not timepoint- or position-specific; e.g. 'GFP.png' or some such)\n new_metadata is a dictionary of timepoint-specific information, such\n as the latest focal plane z-position or similar. This will be\n made available to future acquisition runs via the 'position_metadata'\n argument described below.\n\n The images and metadata will be written out by the superclass, and\n must not be written by the overriding subclass.\n\n Optionally, subclasses may choose to enter 'position_name' into the\n self.skip_positions set to indicate that in the future this position\n should not be acquired. (E.g. the worm is dead.)\n\n Parameters:\n position_name: identifier for this image-acquisition position. Useful\n for adding this position to the skip_positions set.\n position_dir: pathlib.Path object representing the directory where\n position-specific data files and outputs should be written. Useful\n only if additional data needs to be read in or out during\n acquisition. (E.g. a background model or similar.)\n position_metadata: list of all the stored position metadata from the\n previous timepoints, in chronological order. In particular, this\n dictionary is guaranteed to contain 'timestamp' which is the\n time.time() at which that acquisition was started. Other values\n (such as the latest focal plane) stored by previous acquisition\n runs will also be available. The most recent metadata will be in\n position_metadata[-1].\n " raise NotImplementedError()
@classmethod def main(cls, timepoint_dir=None, **cls_init_args): "Main method to run a timepoint.\n\n Parse sys.argv to find an (optional) scheduled_start time as a positional\n argument. Any arguments that contain an '=' will be assumed to be\n python variable definitions to pass to the class init method. (Leading\n '-' or '--' will be stripped, and internal '-'s will be converted to '_'.)\n\n e.g. this allows the following usage: ./acquire.py --dry-run=True --log-level=logging.DEBUG\n\n Parameters:\n timepoint_dir: location of timepoint directory. If not specified, default\n to the parent dir of the file that defines the class that this\n method is called on.\n **cls_init_args: dict of arguments to pass to the class init method.\n " if (timepoint_dir is None): timepoint_dir = pathlib.Path(inspect.getfile(cls)).parent scheduled_start = None for arg in sys.argv[1:]: if arg.count('='): while arg.startswith('-'): arg = arg[1:] arg = arg.replace('-', '_') exec(arg, dict(logging=logging), cls_init_args) elif (scheduled_start is None): scheduled_start = float(arg) else: raise ValueError('More than one schedule start time provided') if (scheduled_start is None): scheduled_start = time.time() handler = cls(timepoint_dir, **cls_init_args) next_run_time = handler.run_timepoint(scheduled_start) if next_run_time: print('next run:{}'.format(next_run_time))
-877,967,456,654,929,700
Main method to run a timepoint. Parse sys.argv to find an (optional) scheduled_start time as a positional argument. Any arguments that contain an '=' will be assumed to be python variable definitions to pass to the class init method. (Leading '-' or '--' will be stripped, and internal '-'s will be converted to '_'.) e.g. this allows the following usage: ./acquire.py --dry-run=True --log-level=logging.DEBUG Parameters: timepoint_dir: location of timepoint directory. If not specified, default to the parent dir of the file that defines the class that this method is called on. **cls_init_args: dict of arguments to pass to the class init method.
scope/timecourse/base_handler.py
main
drew-sinha/rpc-scope
python
@classmethod def main(cls, timepoint_dir=None, **cls_init_args): "Main method to run a timepoint.\n\n Parse sys.argv to find an (optional) scheduled_start time as a positional\n argument. Any arguments that contain an '=' will be assumed to be\n python variable definitions to pass to the class init method. (Leading\n '-' or '--' will be stripped, and internal '-'s will be converted to '_'.)\n\n e.g. this allows the following usage: ./acquire.py --dry-run=True --log-level=logging.DEBUG\n\n Parameters:\n timepoint_dir: location of timepoint directory. If not specified, default\n to the parent dir of the file that defines the class that this\n method is called on.\n **cls_init_args: dict of arguments to pass to the class init method.\n " if (timepoint_dir is None): timepoint_dir = pathlib.Path(inspect.getfile(cls)).parent scheduled_start = None for arg in sys.argv[1:]: if arg.count('='): while arg.startswith('-'): arg = arg[1:] arg = arg.replace('-', '_') exec(arg, dict(logging=logging), cls_init_args) elif (scheduled_start is None): scheduled_start = float(arg) else: raise ValueError('More than one schedule start time provided') if (scheduled_start is None): scheduled_start = time.time() handler = cls(timepoint_dir, **cls_init_args) next_run_time = handler.run_timepoint(scheduled_start) if next_run_time: print('next run:{}'.format(next_run_time))
def __init__(self, metadata=None, acl=None, local_vars_configuration=None): 'FolderUpdateRequest - a model defined in OpenAPI' if (local_vars_configuration is None): local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._metadata = None self._acl = None self.discriminator = None if (metadata is not None): self.metadata = metadata if (acl is not None): self.acl = acl
3,986,587,360,082,399,000
FolderUpdateRequest - a model defined in OpenAPI
libica/openapi/libgds/models/folder_update_request.py
__init__
umccr-illumina/libica
python
def __init__(self, metadata=None, acl=None, local_vars_configuration=None): if (local_vars_configuration is None): local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._metadata = None self._acl = None self.discriminator = None if (metadata is not None): self.metadata = metadata if (acl is not None): self.acl = acl
@property def metadata(self): 'Gets the metadata of this FolderUpdateRequest. # noqa: E501\n\n Metadata about this folder and its contents # noqa: E501\n\n :return: The metadata of this FolderUpdateRequest. # noqa: E501\n :rtype: object\n ' return self._metadata
8,785,109,363,569,681,000
Gets the metadata of this FolderUpdateRequest. # noqa: E501 Metadata about this folder and its contents # noqa: E501 :return: The metadata of this FolderUpdateRequest. # noqa: E501 :rtype: object
libica/openapi/libgds/models/folder_update_request.py
metadata
umccr-illumina/libica
python
@property def metadata(self): 'Gets the metadata of this FolderUpdateRequest. # noqa: E501\n\n Metadata about this folder and its contents # noqa: E501\n\n :return: The metadata of this FolderUpdateRequest. # noqa: E501\n :rtype: object\n ' return self._metadata
@metadata.setter def metadata(self, metadata): 'Sets the metadata of this FolderUpdateRequest.\n\n Metadata about this folder and its contents # noqa: E501\n\n :param metadata: The metadata of this FolderUpdateRequest. # noqa: E501\n :type: object\n ' self._metadata = metadata
2,897,326,740,404,116,500
Sets the metadata of this FolderUpdateRequest. Metadata about this folder and its contents # noqa: E501 :param metadata: The metadata of this FolderUpdateRequest. # noqa: E501 :type: object
libica/openapi/libgds/models/folder_update_request.py
metadata
umccr-illumina/libica
python
@metadata.setter def metadata(self, metadata): 'Sets the metadata of this FolderUpdateRequest.\n\n Metadata about this folder and its contents # noqa: E501\n\n :param metadata: The metadata of this FolderUpdateRequest. # noqa: E501\n :type: object\n ' self._metadata = metadata
@property def acl(self): 'Gets the acl of this FolderUpdateRequest. # noqa: E501\n\n Optional array to replace the acl on the resource. # noqa: E501\n\n :return: The acl of this FolderUpdateRequest. # noqa: E501\n :rtype: list[str]\n ' return self._acl
2,604,555,036,963,380,700
Gets the acl of this FolderUpdateRequest. # noqa: E501 Optional array to replace the acl on the resource. # noqa: E501 :return: The acl of this FolderUpdateRequest. # noqa: E501 :rtype: list[str]
libica/openapi/libgds/models/folder_update_request.py
acl
umccr-illumina/libica
python
@property def acl(self): 'Gets the acl of this FolderUpdateRequest. # noqa: E501\n\n Optional array to replace the acl on the resource. # noqa: E501\n\n :return: The acl of this FolderUpdateRequest. # noqa: E501\n :rtype: list[str]\n ' return self._acl
@acl.setter def acl(self, acl): 'Sets the acl of this FolderUpdateRequest.\n\n Optional array to replace the acl on the resource. # noqa: E501\n\n :param acl: The acl of this FolderUpdateRequest. # noqa: E501\n :type: list[str]\n ' self._acl = acl
-4,355,485,165,373,844,500
Sets the acl of this FolderUpdateRequest. Optional array to replace the acl on the resource. # noqa: E501 :param acl: The acl of this FolderUpdateRequest. # noqa: E501 :type: list[str]
libica/openapi/libgds/models/folder_update_request.py
acl
umccr-illumina/libica
python
@acl.setter def acl(self, acl): 'Sets the acl of this FolderUpdateRequest.\n\n Optional array to replace the acl on the resource. # noqa: E501\n\n :param acl: The acl of this FolderUpdateRequest. # noqa: E501\n :type: list[str]\n ' self._acl = acl
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
8,442,519,487,048,767,000
Returns the model properties as a dict
libica/openapi/libgds/models/folder_update_request.py
to_dict
umccr-illumina/libica
python
def to_dict(self): result = {} for (attr, _) in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
5,849,158,643,760,736,000
Returns the string representation of the model
libica/openapi/libgds/models/folder_update_request.py
to_str
umccr-illumina/libica
python
def to_str(self): return pprint.pformat(self.to_dict())
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
-8,960,031,694,814,905,000
For `print` and `pprint`
libica/openapi/libgds/models/folder_update_request.py
__repr__
umccr-illumina/libica
python
def __repr__(self): return self.to_str()
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, FolderUpdateRequest)): return False return (self.to_dict() == other.to_dict())
6,448,287,465,076,176,000
Returns true if both objects are equal
libica/openapi/libgds/models/folder_update_request.py
__eq__
umccr-illumina/libica
python
def __eq__(self, other): if (not isinstance(other, FolderUpdateRequest)): return False return (self.to_dict() == other.to_dict())
def __ne__(self, other): 'Returns true if both objects are not equal' if (not isinstance(other, FolderUpdateRequest)): return True return (self.to_dict() != other.to_dict())
-7,576,450,624,861,716,000
Returns true if both objects are not equal
libica/openapi/libgds/models/folder_update_request.py
__ne__
umccr-illumina/libica
python
def __ne__(self, other): if (not isinstance(other, FolderUpdateRequest)): return True return (self.to_dict() != other.to_dict())
def set_basis_shells(self, basis, element): 'Expands parameters into a basis set' basis[element] = even_temper_expansion(self.shells)
2,004,290,286,107,989,000
Expands parameters into a basis set
basisopt/opt/eventemper.py
set_basis_shells
robashaw/basisopt
python
def set_basis_shells(self, basis, element): basis[element] = even_temper_expansion(self.shells)
def __init__(self, destination, filesToMove=None, filesToRetrieve=None, dumpOnException=True): 'Establish the new and return directories' self.initial = pathTools.armiAbsPath(os.getcwd()) self.destination = None if (destination is not None): self.destination = pathTools.armiAbsPath(destination) self._filesToMove = (filesToMove or []) self._filesToRetrieve = (filesToRetrieve or []) self._dumpOnException = dumpOnException
899,575,214,240,729,200
Establish the new and return directories
armi/utils/directoryChangers.py
__init__
sammiller11235/armi
python
def __init__(self, destination, filesToMove=None, filesToRetrieve=None, dumpOnException=True): self.initial = pathTools.armiAbsPath(os.getcwd()) self.destination = None if (destination is not None): self.destination = pathTools.armiAbsPath(destination) self._filesToMove = (filesToMove or []) self._filesToRetrieve = (filesToRetrieve or []) self._dumpOnException = dumpOnException
def __enter__(self): 'At the inception of a with command, navigate to a new directory if one is supplied.' runLog.debug('Changing directory to {}'.format(self.destination)) self.moveFiles() self.open() return self
1,383,282,025,872,974,300
At the inception of a with command, navigate to a new directory if one is supplied.
armi/utils/directoryChangers.py
__enter__
sammiller11235/armi
python
def __enter__(self): runLog.debug('Changing directory to {}'.format(self.destination)) self.moveFiles() self.open() return self
def __exit__(self, exc_type, exc_value, traceback): 'At the termination of a with command, navigate back to the original directory.' runLog.debug('Returning to directory {}'.format(self.initial)) if ((exc_type is not None) and self._dumpOnException): runLog.info('An exception was raised within a DirectoryChanger. Retrieving entire folder for debugging.') self._retrieveEntireFolder() else: self.retrieveFiles() self.close()
-3,199,904,071,785,294,300
At the termination of a with command, navigate back to the original directory.
armi/utils/directoryChangers.py
__exit__
sammiller11235/armi
python
def __exit__(self, exc_type, exc_value, traceback): runLog.debug('Returning to directory {}'.format(self.initial)) if ((exc_type is not None) and self._dumpOnException): runLog.info('An exception was raised within a DirectoryChanger. Retrieving entire folder for debugging.') self._retrieveEntireFolder() else: self.retrieveFiles() self.close()
def __repr__(self): 'Print the initial and destination paths' return '<{} {} to {}>'.format(self.__class__.__name__, self.initial, self.destination)
-8,354,109,074,681,529,000
Print the initial and destination paths
armi/utils/directoryChangers.py
__repr__
sammiller11235/armi
python
def __repr__(self): return '<{} {} to {}>'.format(self.__class__.__name__, self.initial, self.destination)
def open(self): '\n User requested open, used to stalling the close from a with statement.\n\n This method has been made for old uses of :code:`os.chdir()` and is not\n recommended. Please use the with statements\n ' if self.destination: _changeDirectory(self.destination)
-3,969,173,263,933,147,000
User requested open, used to stalling the close from a with statement. This method has been made for old uses of :code:`os.chdir()` and is not recommended. Please use the with statements
armi/utils/directoryChangers.py
open
sammiller11235/armi
python
def open(self): '\n User requested open, used to stalling the close from a with statement.\n\n This method has been made for old uses of :code:`os.chdir()` and is not\n recommended. Please use the with statements\n ' if self.destination: _changeDirectory(self.destination)
def close(self): 'User requested close.' if (self.initial != os.getcwd()): _changeDirectory(self.initial)
-180,057,568,129,970,720
User requested close.
armi/utils/directoryChangers.py
close
sammiller11235/armi
python
def close(self): if (self.initial != os.getcwd()): _changeDirectory(self.initial)
def retrieveFiles(self): 'Retrieve any desired files.' initialPath = self.destination destinationPath = self.initial fileList = self._filesToRetrieve self._transferFiles(initialPath, destinationPath, fileList)
-7,277,374,237,445,609,000
Retrieve any desired files.
armi/utils/directoryChangers.py
retrieveFiles
sammiller11235/armi
python
def retrieveFiles(self): initialPath = self.destination destinationPath = self.initial fileList = self._filesToRetrieve self._transferFiles(initialPath, destinationPath, fileList)
def _retrieveEntireFolder(self): 'Retrieve all files.' initialPath = self.destination destinationPath = self.initial folderName = os.path.split(self.destination)[1] destinationPath = os.path.join(destinationPath, f'dump-{folderName}') fileList = os.listdir(self.destination) self._transferFiles(initialPath, destinationPath, fileList)
-994,987,351,423,495,400
Retrieve all files.
armi/utils/directoryChangers.py
_retrieveEntireFolder
sammiller11235/armi
python
def _retrieveEntireFolder(self): initialPath = self.destination destinationPath = self.initial folderName = os.path.split(self.destination)[1] destinationPath = os.path.join(destinationPath, f'dump-{folderName}') fileList = os.listdir(self.destination) self._transferFiles(initialPath, destinationPath, fileList)
@staticmethod def _transferFiles(initialPath, destinationPath, fileList): '\n Transfer files into or out of the directory.\n\n .. warning:: On Windows the max number of characters in a path is 260.\n If you exceed this you will see FileNotFound errors here.\n\n ' if (not fileList): return if (not os.path.exists(destinationPath)): os.mkdir(destinationPath) for ff in fileList: if isinstance(ff, tuple): (fromName, destName) = ff else: (fromName, destName) = (ff, ff) fromPath = os.path.join(initialPath, fromName) toPath = os.path.join(destinationPath, destName) runLog.extra('Copying {} to {}'.format(fromPath, toPath)) shutil.copy(fromPath, toPath)
6,407,889,324,405,903,000
Transfer files into or out of the directory. .. warning:: On Windows the max number of characters in a path is 260. If you exceed this you will see FileNotFound errors here.
armi/utils/directoryChangers.py
_transferFiles
sammiller11235/armi
python
@staticmethod def _transferFiles(initialPath, destinationPath, fileList): '\n Transfer files into or out of the directory.\n\n .. warning:: On Windows the max number of characters in a path is 260.\n If you exceed this you will see FileNotFound errors here.\n\n ' if (not fileList): return if (not os.path.exists(destinationPath)): os.mkdir(destinationPath) for ff in fileList: if isinstance(ff, tuple): (fromName, destName) = ff else: (fromName, destName) = (ff, ff) fromPath = os.path.join(initialPath, fromName) toPath = os.path.join(destinationPath, destName) runLog.extra('Copying {} to {}'.format(fromPath, toPath)) shutil.copy(fromPath, toPath)
def convert_cerberus_schema_to_pyspark(schema: Mapping[(str, Any)]) -> StructType: '\n Convert a cerberus validation schema to a pyspark schema.\n\n Assumes that schema is not nested.\n The following are required in spark schema:\n * `nullable` is False by default\n * `metadata` is an empty dict by default\n * `name` is the name of the field\n ' fields = [{'metadata': {}, 'name': name, 'nullable': True, **values} for (name, values) in schema.items() if isinstance(values, dict)] return StructType.fromJson({'fields': fields, 'type': 'struct'})
-1,252,817,147,515,248,600
Convert a cerberus validation schema to a pyspark schema. Assumes that schema is not nested. The following are required in spark schema: * `nullable` is False by default * `metadata` is an empty dict by default * `name` is the name of the field
cishouseholds/pyspark_utils.py
convert_cerberus_schema_to_pyspark
ONS-SST/cis_households
python
def convert_cerberus_schema_to_pyspark(schema: Mapping[(str, Any)]) -> StructType: '\n Convert a cerberus validation schema to a pyspark schema.\n\n Assumes that schema is not nested.\n The following are required in spark schema:\n * `nullable` is False by default\n * `metadata` is an empty dict by default\n * `name` is the name of the field\n ' fields = [{'metadata': {}, 'name': name, 'nullable': True, **values} for (name, values) in schema.items() if isinstance(values, dict)] return StructType.fromJson({'fields': fields, 'type': 'struct'})
def get_or_create_spark_session() -> SparkSession: '\n Create a spark_session, hiding console progress and enabling HIVE table overwrite.\n Session size is configured via pipeline config.\n ' config = get_config() session_size = config.get('pyspark_session_size', 'm') spark_session = sessions[session_size] return spark_session
5,581,483,572,705,639,000
Create a spark_session, hiding console progress and enabling HIVE table overwrite. Session size is configured via pipeline config.
cishouseholds/pyspark_utils.py
get_or_create_spark_session
ONS-SST/cis_households
python
def get_or_create_spark_session() -> SparkSession: '\n Create a spark_session, hiding console progress and enabling HIVE table overwrite.\n Session size is configured via pipeline config.\n ' config = get_config() session_size = config.get('pyspark_session_size', 'm') spark_session = sessions[session_size] return spark_session
def column_to_list(df: DataFrame, column_name: str): 'Fast collection of all records in a column to a standard list.' return [row[column_name] for row in df.collect()]
-1,705,344,995,723,576,600
Fast collection of all records in a column to a standard list.
cishouseholds/pyspark_utils.py
column_to_list
ONS-SST/cis_households
python
def column_to_list(df: DataFrame, column_name: str): return [row[column_name] for row in df.collect()]
def __init__(self, storage_name='TUT-urban-acoustic-scenes-2018-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-urban-acoustic-scenes-2018-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen', 'title': 'TUT Urban Acoustic Scenes 2018, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Zoom F8', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 1 kwargs['meta_filename'] = 'meta.csv' filename_base = 'TUT-urban-acoustic-scenes-2018-development' source_url = 'https://zenodo.org/record/1228142/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': ((source_url + filename_base) + '.doc.zip'), 'remote_bytes': 10517, 'remote_md5': '28a4a9c46a6f46709ecc8eece365a3a4', 'filename': (filename_base + '.doc.zip')}, {'content_type': 'meta', 'remote_file': ((source_url + filename_base) + '.meta.zip'), 'remote_bytes': 69272, 'remote_md5': 'e196065ee83c07af03a11a310364377d', 'filename': (filename_base + '.meta.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.1.zip'), 'remote_bytes': 1657811579, 'remote_md5': '62f97087c447e29def8716204469bf89', 'filename': (filename_base + '.audio.1.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.2.zip'), 'remote_bytes': 1783489370, 'remote_md5': '8e569a92025d82bff6b02b956d7c6dc9', 'filename': (filename_base + '.audio.2.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.3.zip'), 'remote_bytes': 1809675304, 'remote_md5': '00d2020582a4535af5e65322fb2bad56', 'filename': (filename_base + '.audio.3.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.4.zip'), 'remote_bytes': 1756582525, 'remote_md5': 'd691eb4271f83ba6ba9a28797accc497', 'filename': (filename_base + '.audio.4.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.5.zip'), 'remote_bytes': 1724002546, 'remote_md5': 'c4d64b5483b60f85e9fe080b3435a6be', 'filename': (filename_base + '.audio.5.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.6.zip'), 'remote_bytes': 1645753049, 'remote_md5': '2f0feee78f216697eb19497714d97642', 'filename': (filename_base + '.audio.6.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.7.zip'), 'remote_bytes': 1671903917, 'remote_md5': '07cfefe80a0731de6819181841239f3a', 'filename': (filename_base + '.audio.7.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.8.zip'), 'remote_bytes': 1673304843, 'remote_md5': '213f3c012859c2e9dcb74aacc8558458', 'filename': (filename_base + '.audio.8.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.9.zip'), 'remote_bytes': 1674839259, 'remote_md5': 'b724442b09abcb3bd095ebff497cef85', 'filename': (filename_base + '.audio.9.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.10.zip'), 'remote_bytes': 1662932947, 'remote_md5': 'a27a32fa52e283ed8013375b8a16f269', 'filename': (filename_base + '.audio.10.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.11.zip'), 'remote_bytes': 1751473843, 'remote_md5': '7073a121e825ffef99832507f30d6644', 'filename': (filename_base + '.audio.11.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.12.zip'), 'remote_bytes': 1742332198, 'remote_md5': '6567aa61db12776568b6267ce122fb18', 'filename': (filename_base + '.audio.12.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.13.zip'), 'remote_bytes': 798990513, 'remote_md5': 'd00eeb2db0e093d8975521323a96c519', 'filename': (filename_base + '.audio.13.zip')}] kwargs['audio_paths'] = ['audio'] super(TUTUrbanAcousticScenes_2018_DevelopmentSet, self).__init__(**kwargs)
-6,900,135,253,286,699,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-urban-acoustic-scenes-2018-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-urban-acoustic-scenes-2018-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-urban-acoustic-scenes-2018-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen', 'title': 'TUT Urban Acoustic Scenes 2018, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Zoom F8', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 1 kwargs['meta_filename'] = 'meta.csv' filename_base = 'TUT-urban-acoustic-scenes-2018-development' source_url = 'https://zenodo.org/record/1228142/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': ((source_url + filename_base) + '.doc.zip'), 'remote_bytes': 10517, 'remote_md5': '28a4a9c46a6f46709ecc8eece365a3a4', 'filename': (filename_base + '.doc.zip')}, {'content_type': 'meta', 'remote_file': ((source_url + filename_base) + '.meta.zip'), 'remote_bytes': 69272, 'remote_md5': 'e196065ee83c07af03a11a310364377d', 'filename': (filename_base + '.meta.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.1.zip'), 'remote_bytes': 1657811579, 'remote_md5': '62f97087c447e29def8716204469bf89', 'filename': (filename_base + '.audio.1.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.2.zip'), 'remote_bytes': 1783489370, 'remote_md5': '8e569a92025d82bff6b02b956d7c6dc9', 'filename': (filename_base + '.audio.2.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.3.zip'), 'remote_bytes': 1809675304, 'remote_md5': '00d2020582a4535af5e65322fb2bad56', 'filename': (filename_base + '.audio.3.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.4.zip'), 'remote_bytes': 1756582525, 'remote_md5': 'd691eb4271f83ba6ba9a28797accc497', 'filename': (filename_base + '.audio.4.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.5.zip'), 'remote_bytes': 1724002546, 'remote_md5': 'c4d64b5483b60f85e9fe080b3435a6be', 'filename': (filename_base + '.audio.5.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.6.zip'), 'remote_bytes': 1645753049, 'remote_md5': '2f0feee78f216697eb19497714d97642', 'filename': (filename_base + '.audio.6.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.7.zip'), 'remote_bytes': 1671903917, 'remote_md5': '07cfefe80a0731de6819181841239f3a', 'filename': (filename_base + '.audio.7.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.8.zip'), 'remote_bytes': 1673304843, 'remote_md5': '213f3c012859c2e9dcb74aacc8558458', 'filename': (filename_base + '.audio.8.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.9.zip'), 'remote_bytes': 1674839259, 'remote_md5': 'b724442b09abcb3bd095ebff497cef85', 'filename': (filename_base + '.audio.9.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.10.zip'), 'remote_bytes': 1662932947, 'remote_md5': 'a27a32fa52e283ed8013375b8a16f269', 'filename': (filename_base + '.audio.10.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.11.zip'), 'remote_bytes': 1751473843, 'remote_md5': '7073a121e825ffef99832507f30d6644', 'filename': (filename_base + '.audio.11.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.12.zip'), 'remote_bytes': 1742332198, 'remote_md5': '6567aa61db12776568b6267ce122fb18', 'filename': (filename_base + '.audio.12.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.13.zip'), 'remote_bytes': 798990513, 'remote_md5': 'd00eeb2db0e093d8975521323a96c519', 'filename': (filename_base + '.audio.13.zip')}] kwargs['audio_paths'] = ['audio'] super(TUTUrbanAcousticScenes_2018_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (not item.identifier): item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[(- 1)])[0].split('-')[1:(- 2)])
8,631,365,302,105,927,000
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (not item.identifier): item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[(- 1)])[0].split('-')[1:(- 2)])
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = collections.OrderedDict() for fold in self.folds(): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
3,391,747,241,571,636,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = collections.OrderedDict() for fold in self.folds(): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-urban-acoustic-scenes-2018-mobile-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-urban-acoustic-scenes-2018-mobile-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen', 'title': 'TUT Urban Acoustic Scenes 2018 Mobile, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Various', 'microphone_model': 'Various', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 1 kwargs['meta_filename'] = 'meta.csv' filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-development' source_url = 'https://zenodo.org/record/1228235/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': ((source_url + filename_base) + '.doc.zip'), 'remote_bytes': 12144, 'remote_md5': '5694e9cdffa11cef8ec270673dc19ba0', 'filename': (filename_base + '.doc.zip')}, {'content_type': 'meta', 'remote_file': ((source_url + filename_base) + '.meta.zip'), 'remote_bytes': 88425, 'remote_md5': 'b557b6d5d620aa4f15564ab38f1594d4', 'filename': (filename_base + '.meta.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.1.zip'), 'remote_bytes': 1692337547, 'remote_md5': 'd6f2671af84032b97f393354c124517d', 'filename': (filename_base + '.audio.1.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.2.zip'), 'remote_bytes': 1769203601, 'remote_md5': 'db8b3603af5d4e559869a592930a7620', 'filename': (filename_base + '.audio.2.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.3.zip'), 'remote_bytes': 1674610746, 'remote_md5': '703bf73523a6ad1f40d4923cb8ba3ff0', 'filename': (filename_base + '.audio.3.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.4.zip'), 'remote_bytes': 1634599587, 'remote_md5': '18af04ab5d6f15a72c66f16bfec0ca07', 'filename': (filename_base + '.audio.4.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.5.zip'), 'remote_bytes': 1640894390, 'remote_md5': 'a579efb032f209a7e77fe22e4808e9ca', 'filename': (filename_base + '.audio.5.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.6.zip'), 'remote_bytes': 1693974078, 'remote_md5': 'c2c56691047b3be3d98cb0ffd6858d9f', 'filename': (filename_base + '.audio.6.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.7.zip'), 'remote_bytes': 1165383562, 'remote_md5': 'e182e5300867f4ed4b580389cc5b931e', 'filename': (filename_base + '.audio.7.zip')}] kwargs['audio_paths'] = ['audio'] super(TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet, self).__init__(**kwargs)
1,561,665,692,632,164,600
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-urban-acoustic-scenes-2018-mobile-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-urban-acoustic-scenes-2018-mobile-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-urban-acoustic-scenes-2018-mobile-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Toni Heittola, Annamaria Mesaros, and Tuomas Virtanen', 'title': 'TUT Urban Acoustic Scenes 2018 Mobile, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Various', 'microphone_model': 'Various', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 1 kwargs['meta_filename'] = 'meta.csv' filename_base = 'TUT-urban-acoustic-scenes-2018-mobile-development' source_url = 'https://zenodo.org/record/1228235/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': ((source_url + filename_base) + '.doc.zip'), 'remote_bytes': 12144, 'remote_md5': '5694e9cdffa11cef8ec270673dc19ba0', 'filename': (filename_base + '.doc.zip')}, {'content_type': 'meta', 'remote_file': ((source_url + filename_base) + '.meta.zip'), 'remote_bytes': 88425, 'remote_md5': 'b557b6d5d620aa4f15564ab38f1594d4', 'filename': (filename_base + '.meta.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.1.zip'), 'remote_bytes': 1692337547, 'remote_md5': 'd6f2671af84032b97f393354c124517d', 'filename': (filename_base + '.audio.1.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.2.zip'), 'remote_bytes': 1769203601, 'remote_md5': 'db8b3603af5d4e559869a592930a7620', 'filename': (filename_base + '.audio.2.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.3.zip'), 'remote_bytes': 1674610746, 'remote_md5': '703bf73523a6ad1f40d4923cb8ba3ff0', 'filename': (filename_base + '.audio.3.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.4.zip'), 'remote_bytes': 1634599587, 'remote_md5': '18af04ab5d6f15a72c66f16bfec0ca07', 'filename': (filename_base + '.audio.4.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.5.zip'), 'remote_bytes': 1640894390, 'remote_md5': 'a579efb032f209a7e77fe22e4808e9ca', 'filename': (filename_base + '.audio.5.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.6.zip'), 'remote_bytes': 1693974078, 'remote_md5': 'c2c56691047b3be3d98cb0ffd6858d9f', 'filename': (filename_base + '.audio.6.zip')}, {'content_type': 'audio', 'remote_file': ((source_url + filename_base) + '.audio.7.zip'), 'remote_bytes': 1165383562, 'remote_md5': 'e182e5300867f4ed4b580389cc5b931e', 'filename': (filename_base + '.audio.7.zip')}] kwargs['audio_paths'] = ['audio'] super(TUTUrbanAcousticScenes_2018_Mobile_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (not item.identifier): item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[(- 1)])[0].split('-')[1:(- 2)]) if (not item.source_label): item.source_label = os.path.splitext(os.path.split(item.filename)[(- 1)])[0].split('-')[(- 1)]
-5,714,219,792,530,615,000
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (not item.identifier): item.identifier = '-'.join(os.path.splitext(os.path.split(item.filename)[(- 1)])[0].split('-')[1:(- 2)]) if (not item.source_label): item.source_label = os.path.splitext(os.path.split(item.filename)[(- 1)])[0].split('-')[(- 1)]
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = collections.OrderedDict() for fold in self.folds(): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
3,391,747,241,571,636,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = collections.OrderedDict() for fold in self.folds(): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-acoustic-scenes-2017-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2017, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/400515/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.doc.zip'), 'remote_bytes': 54796, 'remote_md5': '2065495aaf3f1103e795c9899e2af1df', 'filename': 'TUT-acoustic-scenes-2017-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.meta.zip'), 'remote_bytes': 104321, 'remote_md5': '9007fd4772d816590c5db5f5e9568f5d', 'filename': 'TUT-acoustic-scenes-2017-development.meta.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.error.zip'), 'remote_bytes': 1432, 'remote_md5': '802c700b021769e52a2c1e3b9c117a1b', 'filename': 'TUT-acoustic-scenes-2017-development.error.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip'), 'remote_bytes': 1071445248, 'remote_md5': '251325a9afaaad0326ad1c57f57d514a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip'), 'remote_bytes': 1073453613, 'remote_md5': 'c26861e05147dc319b4250eb103d9d99', 'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip'), 'remote_bytes': 1073077819, 'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip'), 'remote_bytes': 1072822038, 'remote_md5': '1732b03afe8c53ef8bba80ba14766e57', 'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip'), 'remote_bytes': 1072644652, 'remote_md5': '611be754a0c951185c6ae4b7643c19a0', 'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip'), 'remote_bytes': 1072667888, 'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7', 'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip'), 'remote_bytes': 1073417661, 'remote_md5': 'c7d79db84264401c0f8680dcc36013ad', 'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip'), 'remote_bytes': 1072381222, 'remote_md5': '35043f25123439392338c790494c7a19', 'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip'), 'remote_bytes': 1072087738, 'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip'), 'remote_bytes': 1046262120, 'remote_md5': '5df83a191295a04e290b125c634e13e7', 'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs)
-5,582,155,284,692,130,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-acoustic-scenes-2017-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-acoustic-scenes-2017-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2017, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/400515/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.doc.zip'), 'remote_bytes': 54796, 'remote_md5': '2065495aaf3f1103e795c9899e2af1df', 'filename': 'TUT-acoustic-scenes-2017-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.meta.zip'), 'remote_bytes': 104321, 'remote_md5': '9007fd4772d816590c5db5f5e9568f5d', 'filename': 'TUT-acoustic-scenes-2017-development.meta.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.error.zip'), 'remote_bytes': 1432, 'remote_md5': '802c700b021769e52a2c1e3b9c117a1b', 'filename': 'TUT-acoustic-scenes-2017-development.error.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.1.zip'), 'remote_bytes': 1071445248, 'remote_md5': '251325a9afaaad0326ad1c57f57d514a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.2.zip'), 'remote_bytes': 1073453613, 'remote_md5': 'c26861e05147dc319b4250eb103d9d99', 'filename': 'TUT-acoustic-scenes-2017-development.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.3.zip'), 'remote_bytes': 1073077819, 'remote_md5': 'a4815775f8a5e629179726ee4cd4f55a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.4.zip'), 'remote_bytes': 1072822038, 'remote_md5': '1732b03afe8c53ef8bba80ba14766e57', 'filename': 'TUT-acoustic-scenes-2017-development.audio.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.5.zip'), 'remote_bytes': 1072644652, 'remote_md5': '611be754a0c951185c6ae4b7643c19a0', 'filename': 'TUT-acoustic-scenes-2017-development.audio.5.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.6.zip'), 'remote_bytes': 1072667888, 'remote_md5': '165a201db800d3ea76fce5a9c2bd97d7', 'filename': 'TUT-acoustic-scenes-2017-development.audio.6.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.7.zip'), 'remote_bytes': 1073417661, 'remote_md5': 'c7d79db84264401c0f8680dcc36013ad', 'filename': 'TUT-acoustic-scenes-2017-development.audio.7.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.8.zip'), 'remote_bytes': 1072381222, 'remote_md5': '35043f25123439392338c790494c7a19', 'filename': 'TUT-acoustic-scenes-2017-development.audio.8.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.9.zip'), 'remote_bytes': 1072087738, 'remote_md5': '0805dcf5d8e6871dc9610182b2efb93a', 'filename': 'TUT-acoustic-scenes-2017-development.audio.9.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-development.audio.10.zip'), 'remote_bytes': 1046262120, 'remote_md5': '5df83a191295a04e290b125c634e13e7', 'filename': 'TUT-acoustic-scenes-2017-development.audio.10.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0]
-1,739,020,471,136,129,800
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0]
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = collections.OrderedDict() for fold in self.folds(): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
3,391,747,241,571,636,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = collections.OrderedDict() for fold in self.folds(): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-acoustic-scenes-2017-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2017, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/1040168/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.doc.zip'), 'remote_bytes': 53687, 'remote_md5': '53709a07416ea3b617c02fcf67dbeb9c', 'filename': 'TUT-acoustic-scenes-2017-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.meta.zip'), 'remote_bytes': 4473, 'remote_md5': '200eee9493e8044403e1326e3d05cfde', 'filename': 'TUT-acoustic-scenes-2017-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'), 'remote_bytes': 1071856687, 'remote_md5': '3d6dda4445871e9544e0fefe7d14c7d9', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'), 'remote_bytes': 1073362972, 'remote_md5': '4085ef5fa286f2169074993a4e405953', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'), 'remote_bytes': 1071521152, 'remote_md5': 'cac432579e7cf2dff0aec7aaed248956', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'), 'remote_bytes': 382756463, 'remote_md5': '664bf09c3d24bd26c6b587f1d709de36', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(**kwargs)
-9,213,234,814,557,370,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-acoustic-scenes-2017-evaluation' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-acoustic-scenes-2017-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2017, development dataset', 'url': None, 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/1040168/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.doc.zip'), 'remote_bytes': 53687, 'remote_md5': '53709a07416ea3b617c02fcf67dbeb9c', 'filename': 'TUT-acoustic-scenes-2017-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.meta.zip'), 'remote_bytes': 4473, 'remote_md5': '200eee9493e8044403e1326e3d05cfde', 'filename': 'TUT-acoustic-scenes-2017-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'), 'remote_bytes': 1071856687, 'remote_md5': '3d6dda4445871e9544e0fefe7d14c7d9', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'), 'remote_bytes': 1073362972, 'remote_md5': '4085ef5fa286f2169074993a4e405953', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'), 'remote_bytes': 1071521152, 'remote_md5': 'cac432579e7cf2dff0aec7aaed248956', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'), 'remote_bytes': 382756463, 'remote_md5': '664bf09c3d24bd26c6b587f1d709de36', 'filename': 'TUT-acoustic-scenes-2017-evaluation.audio.4.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2017_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, filename_map=None, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n filename_map : OneToOneMappingContainer\n Filename map\n Default value None\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (filename_map and (item.filename in filename_map)): filename_mapped = filename_map.map(item.filename) item.identifier = os.path.split(filename_mapped)[1].split('_')[0]
-9,129,877,326,963,806,000
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True filename_map : OneToOneMappingContainer Filename map Default value None
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, filename_map=None, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n filename_map : OneToOneMappingContainer\n Filename map\n Default value None\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (filename_map and (item.filename in filename_map)): filename_mapped = filename_map.map(item.filename) item.identifier = os.path.split(filename_mapped)[1].split('_')[0]
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')): meta_data = collections.OrderedDict() data = MetaDataContainer(filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')).load() map_filename = os.path.join(self.evaluation_setup_path, 'map.txt') if os.path.exists(map_filename): filename_map = OneToOneMappingContainer(filename=map_filename).load() else: filename_map = {} for item in data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False, filename_map=filename_map) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
-447,023,462,507,112,400
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): if os.path.isfile(self.evaluation_setup_filename(setup_part='evaluate')): meta_data = collections.OrderedDict() data = MetaDataContainer(filename=os.path.join(self.evaluation_setup_path, 'evaluate.txt')).load() map_filename = os.path.join(self.evaluation_setup_path, 'map.txt') if os.path.exists(map_filename): filename_map = OneToOneMappingContainer(filename=map_filename).load() else: filename_map = {} for item in data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False, filename_map=filename_map) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-rare-sound-events-2017-development', data_path=None, included_content_types=None, synth_parameters=None, dcase_compatibility=True, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-rare-sound-events-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n synth_parameters : dict\n Data synthesis parameters.\n Default value None\n\n dcase_compatibility : bool\n Ensure that dataset is generated same way than in DCASE2017 Challenge setup\n Default value True\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['filelisthash_exclude_dirs'] = kwargs.get('filelisthash_exclude_dirs', [os.path.join('data', 'mixture_data')]) kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Rare Sound Events 2017, development dataset', 'url': None, 'audio_source': 'Synthetic', 'audio_type': 'Natural', 'recording_device_model': 'Unknown', 'microphone_model': 'Unknown'} kwargs['crossvalidation_folds'] = 1 source_url = 'https://zenodo.org/record/401395/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.doc.zip'), 'remote_bytes': 21042, 'remote_md5': '47c424fe90d2bdc53d9fdd84341c2783', 'filename': 'TUT-rare-sound-events-2017-development.doc.zip'}, {'content_type': 'code', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.code.zip'), 'remote_bytes': 81518, 'remote_md5': '4cacdf0803daf924a60bf9daa573beb7', 'filename': 'TUT-rare-sound-events-2017-development.code.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'), 'remote_bytes': 1072175672, 'remote_md5': '6f1f4156d41b541d1188fcf44c9a8267', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'), 'remote_bytes': 1073378284, 'remote_md5': 'ff5dcbe250e45cc404b7b8a6013002ac', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'), 'remote_bytes': 1069766123, 'remote_md5': 'fb356ae309a40d2f0a38fc1c746835cb', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'), 'remote_bytes': 1070042681, 'remote_md5': '2a68575b2ec7a69e2cc8b16b87fae0c9', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'), 'remote_bytes': 1073380909, 'remote_md5': '84e70d855457a18115108e42ec04501a', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'), 'remote_bytes': 1073021941, 'remote_md5': '048ce898bd434097dd489027f7ba361d', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'), 'remote_bytes': 1069890239, 'remote_md5': '3ef1c89fcfac39918a5edc5abc6ed29b', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'), 'remote_bytes': 180860904, 'remote_md5': '69dcb81e70f4e6605e178693afcd7722', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_events.zip'), 'remote_bytes': 639119477, 'remote_md5': 'dc4b7eb77078b4cf1b670c6362679473', 'filename': 'TUT-rare-sound-events-2017-development.source_data_events.zip'}] kwargs['audio_paths'] = ['audio'] default_synth_parameters = DictContainer({'train': {'seed': 42, 'event_presence_prob': 0.5, 'mixtures_per_class': 500, 'ebr_list': [(- 6), 0, 6]}, 'test': {'seed': 42, 'event_presence_prob': 0.5, 'mixtures_per_class': 500, 'ebr_list': [(- 6), 0, 6]}}) if (synth_parameters is None): synth_parameters = {} synth_parameters = default_synth_parameters.merge(synth_parameters) kwargs['meta_filename'] = (('meta_' + synth_parameters.get_hash_for_path()) + '.txt') self.synth_parameters = synth_parameters self.synth_parameters['train']['param_hash'] = hashlib.md5(yaml.dump({'event_presence_prob': self.synth_parameters['train']['event_presence_prob'], 'mixtures_per_class': self.synth_parameters['train']['mixtures_per_class'], 'ebrs': self.synth_parameters['train']['ebr_list'], 'seed': self.synth_parameters['train']['seed']}).encode('utf-8')).hexdigest() self.synth_parameters['test']['param_hash'] = hashlib.md5(yaml.dump({'event_presence_prob': self.synth_parameters['test']['event_presence_prob'], 'mixtures_per_class': self.synth_parameters['test']['mixtures_per_class'], 'ebrs': self.synth_parameters['test']['ebr_list'], 'seed': self.synth_parameters['test']['seed']}).encode('utf-8')).hexdigest() self.dcase_compatibility = dcase_compatibility super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs) if (('code' not in self.included_content_types) or ('all' not in self.included_content_types)): self.included_content_types.append('code')
-74,513,701,993,971,360
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-rare-sound-events-2017-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None synth_parameters : dict Data synthesis parameters. Default value None dcase_compatibility : bool Ensure that dataset is generated same way than in DCASE2017 Challenge setup Default value True
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-rare-sound-events-2017-development', data_path=None, included_content_types=None, synth_parameters=None, dcase_compatibility=True, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-rare-sound-events-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n synth_parameters : dict\n Data synthesis parameters.\n Default value None\n\n dcase_compatibility : bool\n Ensure that dataset is generated same way than in DCASE2017 Challenge setup\n Default value True\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['filelisthash_exclude_dirs'] = kwargs.get('filelisthash_exclude_dirs', [os.path.join('data', 'mixture_data')]) kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Rare Sound Events 2017, development dataset', 'url': None, 'audio_source': 'Synthetic', 'audio_type': 'Natural', 'recording_device_model': 'Unknown', 'microphone_model': 'Unknown'} kwargs['crossvalidation_folds'] = 1 source_url = 'https://zenodo.org/record/401395/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.doc.zip'), 'remote_bytes': 21042, 'remote_md5': '47c424fe90d2bdc53d9fdd84341c2783', 'filename': 'TUT-rare-sound-events-2017-development.doc.zip'}, {'content_type': 'code', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.code.zip'), 'remote_bytes': 81518, 'remote_md5': '4cacdf0803daf924a60bf9daa573beb7', 'filename': 'TUT-rare-sound-events-2017-development.code.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'), 'remote_bytes': 1072175672, 'remote_md5': '6f1f4156d41b541d1188fcf44c9a8267', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'), 'remote_bytes': 1073378284, 'remote_md5': 'ff5dcbe250e45cc404b7b8a6013002ac', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'), 'remote_bytes': 1069766123, 'remote_md5': 'fb356ae309a40d2f0a38fc1c746835cb', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'), 'remote_bytes': 1070042681, 'remote_md5': '2a68575b2ec7a69e2cc8b16b87fae0c9', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'), 'remote_bytes': 1073380909, 'remote_md5': '84e70d855457a18115108e42ec04501a', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.5.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'), 'remote_bytes': 1073021941, 'remote_md5': '048ce898bd434097dd489027f7ba361d', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.6.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'), 'remote_bytes': 1069890239, 'remote_md5': '3ef1c89fcfac39918a5edc5abc6ed29b', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.7.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'), 'remote_bytes': 180860904, 'remote_md5': '69dcb81e70f4e6605e178693afcd7722', 'filename': 'TUT-rare-sound-events-2017-development.source_data_bgs_and_cvsetup.8.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-development.source_data_events.zip'), 'remote_bytes': 639119477, 'remote_md5': 'dc4b7eb77078b4cf1b670c6362679473', 'filename': 'TUT-rare-sound-events-2017-development.source_data_events.zip'}] kwargs['audio_paths'] = ['audio'] default_synth_parameters = DictContainer({'train': {'seed': 42, 'event_presence_prob': 0.5, 'mixtures_per_class': 500, 'ebr_list': [(- 6), 0, 6]}, 'test': {'seed': 42, 'event_presence_prob': 0.5, 'mixtures_per_class': 500, 'ebr_list': [(- 6), 0, 6]}}) if (synth_parameters is None): synth_parameters = {} synth_parameters = default_synth_parameters.merge(synth_parameters) kwargs['meta_filename'] = (('meta_' + synth_parameters.get_hash_for_path()) + '.txt') self.synth_parameters = synth_parameters self.synth_parameters['train']['param_hash'] = hashlib.md5(yaml.dump({'event_presence_prob': self.synth_parameters['train']['event_presence_prob'], 'mixtures_per_class': self.synth_parameters['train']['mixtures_per_class'], 'ebrs': self.synth_parameters['train']['ebr_list'], 'seed': self.synth_parameters['train']['seed']}).encode('utf-8')).hexdigest() self.synth_parameters['test']['param_hash'] = hashlib.md5(yaml.dump({'event_presence_prob': self.synth_parameters['test']['event_presence_prob'], 'mixtures_per_class': self.synth_parameters['test']['mixtures_per_class'], 'ebrs': self.synth_parameters['test']['ebr_list'], 'seed': self.synth_parameters['test']['seed']}).encode('utf-8')).hexdigest() self.dcase_compatibility = dcase_compatibility super(TUTRareSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs) if (('code' not in self.included_content_types) or ('all' not in self.included_content_types)): self.included_content_types.append('code')
def event_labels(self, scene_label=None): 'List of unique event labels in the meta data.\n\n Parameters\n ----------\n\n Returns\n -------\n labels : list\n List of event labels in alphabetical order.\n\n ' labels = ['babycry', 'glassbreak', 'gunshot'] labels.sort() return labels
5,440,641,249,336,538,000
List of unique event labels in the meta data. Parameters ---------- Returns ------- labels : list List of event labels in alphabetical order.
dcase_util/datasets/tut.py
event_labels
ankitshah009/dcase_util
python
def event_labels(self, scene_label=None): 'List of unique event labels in the meta data.\n\n Parameters\n ----------\n\n Returns\n -------\n labels : list\n List of event labels in alphabetical order.\n\n ' labels = ['babycry', 'glassbreak', 'gunshot'] labels.sort() return labels
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder)) return self
4,117,275,585,569,429,500
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder)) return self
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of training items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value "None"\n scene_label : str\n Scene label\n Default value "None"\n event_label : str\n Event label\n Default value "None"\n filename_contains : str:\n String found in filename\n Default value "None"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to training set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['train'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
4,145,418,168,248,244,700
List of training items. Parameters ---------- fold : int Fold id, if None all meta data is returned. Default value "None" scene_label : str Scene label Default value "None" event_label : str Event label Default value "None" filename_contains : str: String found in filename Default value "None" Returns ------- list : list of dicts List containing all meta data assigned to training set for given fold.
dcase_util/datasets/tut.py
train
ankitshah009/dcase_util
python
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of training items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value "None"\n scene_label : str\n Scene label\n Default value "None"\n event_label : str\n Event label\n Default value "None"\n filename_contains : str:\n String found in filename\n Default value "None"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to training set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['train'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of testing items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value "None"\n scene_label : str\n Scene label\n Default value "None"\n event_label : str\n Event label\n Default value "None"\n filename_contains : str:\n String found in filename\n Default value "None"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['test'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
-4,721,525,730,540,040,000
List of testing items. Parameters ---------- fold : int Fold id, if None all meta data is returned. Default value "None" scene_label : str Scene label Default value "None" event_label : str Event label Default value "None" filename_contains : str: String found in filename Default value "None" Returns ------- list : list of dicts List containing all meta data assigned to testing set for given fold.
dcase_util/datasets/tut.py
test
ankitshah009/dcase_util
python
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of testing items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value "None"\n scene_label : str\n Scene label\n Default value "None"\n event_label : str\n Event label\n Default value "None"\n filename_contains : str:\n String found in filename\n Default value "None"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['test'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of evaluation items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value "None"\n scene_label : str\n Scene label\n Default value "None"\n event_label : str\n Event label\n Default value "None"\n filename_contains : str:\n String found in filename\n Default value "None"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['evaluate'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
-6,001,605,031,914,855,000
List of evaluation items. Parameters ---------- fold : int Fold id, if None all meta data is returned. Default value "None" scene_label : str Scene label Default value "None" event_label : str Event label Default value "None" filename_contains : str: String found in filename Default value "None" Returns ------- list : list of dicts List containing all meta data assigned to testing set for given fold.
dcase_util/datasets/tut.py
eval
ankitshah009/dcase_util
python
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of evaluation items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value "None"\n scene_label : str\n Scene label\n Default value "None"\n event_label : str\n Event label\n Default value "None"\n filename_contains : str:\n String found in filename\n Default value "None"\n\n Returns\n -------\n list : list of dicts\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['evaluate'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
def __init__(self, storage_name='TUT-rare-sound-events-2017-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-rare-sound-events-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['reference_data_present'] = True kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Rare Sound Events 2017, evaluation dataset', 'url': None, 'audio_source': 'Synthetic', 'audio_type': 'Natural', 'recording_device_model': 'Unknown', 'microphone_model': 'Unknown'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/1160455/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.doc.zip'), 'remote_bytes': 11701, 'remote_md5': '36db98a94ce871c6bdc5bd5238383114', 'filename': 'TUT-rare-sound-events-2017-evaluation.doc.zip'}, {'content_type': 'documentation', 'remote_file': (source_url + 'LICENSE.txt'), 'remote_bytes': 0, 'remote_md5': '0707857098fc74d17beb824416fb74b1', 'filename': 'LICENSE.txt'}, {'content_type': 'documentation', 'remote_file': (source_url + 'FREESOUNDCREDITS.txt'), 'remote_bytes': 0, 'remote_md5': '3ecea52bdb0eadd6e1af52a21f735d6d', 'filename': 'FREESOUNDCREDITS.txt'}, {'content_type': ['audio', 'meta'], 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'), 'remote_bytes': 1071143794, 'remote_md5': 'db4aecd5175dead27ceb2692e7f28bb1', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'), 'remote_bytes': 1071773516, 'remote_md5': 'e97d5842c46805cdb94e6d4017870cde', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'), 'remote_bytes': 1073505512, 'remote_md5': '1fe20c762cecd26979e2c5303c8e9f48', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'), 'remote_bytes': 1071132551, 'remote_md5': '5042cd00aed9af6b37a253e24f88554f', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'), 'remote_bytes': 308314939, 'remote_md5': '72180597ed5bfaa73491755f74b84738', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTRareSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
-7,324,006,037,048,576,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-rare-sound-events-2017-evaluation' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-rare-sound-events-2017-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-rare-sound-events-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['reference_data_present'] = True kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Aleksandr Diment, Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Rare Sound Events 2017, evaluation dataset', 'url': None, 'audio_source': 'Synthetic', 'audio_type': 'Natural', 'recording_device_model': 'Unknown', 'microphone_model': 'Unknown'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/1160455/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.doc.zip'), 'remote_bytes': 11701, 'remote_md5': '36db98a94ce871c6bdc5bd5238383114', 'filename': 'TUT-rare-sound-events-2017-evaluation.doc.zip'}, {'content_type': 'documentation', 'remote_file': (source_url + 'LICENSE.txt'), 'remote_bytes': 0, 'remote_md5': '0707857098fc74d17beb824416fb74b1', 'filename': 'LICENSE.txt'}, {'content_type': 'documentation', 'remote_file': (source_url + 'FREESOUNDCREDITS.txt'), 'remote_bytes': 0, 'remote_md5': '3ecea52bdb0eadd6e1af52a21f735d6d', 'filename': 'FREESOUNDCREDITS.txt'}, {'content_type': ['audio', 'meta'], 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'), 'remote_bytes': 1071143794, 'remote_md5': 'db4aecd5175dead27ceb2692e7f28bb1', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'), 'remote_bytes': 1071773516, 'remote_md5': 'e97d5842c46805cdb94e6d4017870cde', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'), 'remote_bytes': 1073505512, 'remote_md5': '1fe20c762cecd26979e2c5303c8e9f48', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'), 'remote_bytes': 1071132551, 'remote_md5': '5042cd00aed9af6b37a253e24f88554f', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'), 'remote_bytes': 308314939, 'remote_md5': '72180597ed5bfaa73491755f74b84738', 'filename': 'TUT-rare-sound-events-2017-evaluation.mixture_data.5.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTRareSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def event_labels(self, scene_label=None): 'List of unique event labels in the meta data.\n\n Parameters\n ----------\n\n Returns\n -------\n labels : list\n List of event labels in alphabetical order.\n\n ' labels = ['babycry', 'glassbreak', 'gunshot'] labels.sort() return labels
5,440,641,249,336,538,000
List of unique event labels in the meta data. Parameters ---------- Returns ------- labels : list List of event labels in alphabetical order.
dcase_util/datasets/tut.py
event_labels
ankitshah009/dcase_util
python
def event_labels(self, scene_label=None): 'List of unique event labels in the meta data.\n\n Parameters\n ----------\n\n Returns\n -------\n labels : list\n List of event labels in alphabetical order.\n\n ' labels = ['babycry', 'glassbreak', 'gunshot'] labels.sort() return labels
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' scene_label = 'synthetic' subset_map = {'test': 'evaltest'} param_hash = 'bbb81504db15a03680a0044474633b67' Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder)) if ((not self.meta_container.exists()) and self.reference_data_present): meta_data = MetaDataContainer() for class_label in self.event_labels(): for (subset_label, subset_name_on_disk) in iteritems(subset_map): subset_name_on_disk = subset_map[subset_label] mixture_path = os.path.join('data', 'mixture_data', subset_name_on_disk, param_hash, 'audio') mixture_meta_path = os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'meta') event_list_filename = os.path.join(mixture_meta_path, (((('event_list_' + subset_name_on_disk) + '_') + class_label) + '.csv')) if os.path.isfile(event_list_filename): current_meta = MetaDataContainer(filename=event_list_filename).load(fields=['filename', 'onset', 'offset', 'event_label']) for item in current_meta: item.filename = os.path.join(mixture_path, item.filename) item.scene_label = scene_label meta_data += current_meta meta_data.save(filename=self.meta_file) test_filename = self.evaluation_setup_filename(setup_part='test', fold=None, file_extension='txt') evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate', fold=None, file_extension='txt') evaluation_setup_exists = True if ((not os.path.isfile(test_filename)) or (not os.path.isfile(evaluate_filename))): evaluation_setup_exists = False if (not evaluation_setup_exists): mixture_meta_path_test = os.path.join(self.local_path, 'data', 'mixture_data', subset_map['test'], param_hash, 'meta') mixture_path_test = os.path.join('data', 'mixture_data', subset_map['test'], param_hash, 'audio') test_meta = MetaDataContainer() for class_label in self.event_labels(): event_list_filename = os.path.join(mixture_meta_path_test, (((('event_list_' + subset_map['test']) + '_') + class_label) + '.csv')) current_meta = MetaDataContainer(filename=event_list_filename).load(fields=['filename', 'onset', 'offset', 'event_label']) current_meta_ = MetaDataContainer() for item in current_meta: item.filename = os.path.join(mixture_path_test, item.filename) current_meta_.append(MetaDataItem({'filename': item.filename, 'scene_label': scene_label})) test_meta += current_meta_ test_meta.save(filename=test_filename) eval_meta = MetaDataContainer() for class_label in self.event_labels(): event_list_filename = os.path.join(mixture_meta_path_test, (((('event_list_' + subset_map['test']) + '_') + class_label) + '.csv')) current_meta = MetaDataContainer(filename=event_list_filename).load(fields=['filename', 'onset', 'offset', 'event_label']) for item in current_meta: item.filename = os.path.join(mixture_path_test, item.filename) item.scene_label = scene_label eval_meta += current_meta eval_meta.save(filename=evaluate_filename) self.load() return self
-3,734,793,682,527,956,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' scene_label = 'synthetic' subset_map = {'test': 'evaltest'} param_hash = 'bbb81504db15a03680a0044474633b67' Path().makedirs(path=os.path.join(self.local_path, self.evaluation_setup_folder)) if ((not self.meta_container.exists()) and self.reference_data_present): meta_data = MetaDataContainer() for class_label in self.event_labels(): for (subset_label, subset_name_on_disk) in iteritems(subset_map): subset_name_on_disk = subset_map[subset_label] mixture_path = os.path.join('data', 'mixture_data', subset_name_on_disk, param_hash, 'audio') mixture_meta_path = os.path.join(self.local_path, 'data', 'mixture_data', subset_name_on_disk, param_hash, 'meta') event_list_filename = os.path.join(mixture_meta_path, (((('event_list_' + subset_name_on_disk) + '_') + class_label) + '.csv')) if os.path.isfile(event_list_filename): current_meta = MetaDataContainer(filename=event_list_filename).load(fields=['filename', 'onset', 'offset', 'event_label']) for item in current_meta: item.filename = os.path.join(mixture_path, item.filename) item.scene_label = scene_label meta_data += current_meta meta_data.save(filename=self.meta_file) test_filename = self.evaluation_setup_filename(setup_part='test', fold=None, file_extension='txt') evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate', fold=None, file_extension='txt') evaluation_setup_exists = True if ((not os.path.isfile(test_filename)) or (not os.path.isfile(evaluate_filename))): evaluation_setup_exists = False if (not evaluation_setup_exists): mixture_meta_path_test = os.path.join(self.local_path, 'data', 'mixture_data', subset_map['test'], param_hash, 'meta') mixture_path_test = os.path.join('data', 'mixture_data', subset_map['test'], param_hash, 'audio') test_meta = MetaDataContainer() for class_label in self.event_labels(): event_list_filename = os.path.join(mixture_meta_path_test, (((('event_list_' + subset_map['test']) + '_') + class_label) + '.csv')) current_meta = MetaDataContainer(filename=event_list_filename).load(fields=['filename', 'onset', 'offset', 'event_label']) current_meta_ = MetaDataContainer() for item in current_meta: item.filename = os.path.join(mixture_path_test, item.filename) current_meta_.append(MetaDataItem({'filename': item.filename, 'scene_label': scene_label})) test_meta += current_meta_ test_meta.save(filename=test_filename) eval_meta = MetaDataContainer() for class_label in self.event_labels(): event_list_filename = os.path.join(mixture_meta_path_test, (((('event_list_' + subset_map['test']) + '_') + class_label) + '.csv')) current_meta = MetaDataContainer(filename=event_list_filename).load(fields=['filename', 'onset', 'offset', 'event_label']) for item in current_meta: item.filename = os.path.join(mixture_path_test, item.filename) item.scene_label = scene_label eval_meta += current_meta eval_meta.save(filename=evaluate_filename) self.load() return self
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of training items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None"\n\n event_label : str\n Event label\n Default value None"\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to training set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['train'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
-8,536,662,320,516,184,000
List of training items. Parameters ---------- fold : int Fold id, if None all meta data is returned. Default value None scene_label : str Scene label Default value None" event_label : str Event label Default value None" filename_contains : str: String found in filename Default value None Returns ------- list List containing all meta data assigned to training set for given fold.
dcase_util/datasets/tut.py
train
ankitshah009/dcase_util
python
def train(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of training items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None"\n\n event_label : str\n Event label\n Default value None"\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to training set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['train'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of testing items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\n\n event_label : str\n Event label\n Default value None\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['test'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
3,664,463,538,941,354,000
List of testing items. Parameters ---------- fold : int Fold id, if None all meta data is returned. Default value None scene_label : str Scene label Default value None event_label : str Event label Default value None filename_contains : str: String found in filename Default value None Returns ------- list List containing all meta data assigned to testing set for given fold.
dcase_util/datasets/tut.py
test
ankitshah009/dcase_util
python
def test(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of testing items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\n\n event_label : str\n Event label\n Default value None\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['test'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of evaluation items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\n\n event_label : str\n Event label\n Default value None\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['evaluate'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
-8,291,817,922,858,719,000
List of evaluation items. Parameters ---------- fold : int Fold id, if None all meta data is returned. Default value None scene_label : str Scene label Default value None event_label : str Event label Default value None filename_contains : str: String found in filename Default value None Returns ------- list List containing all meta data assigned to testing set for given fold.
dcase_util/datasets/tut.py
eval
ankitshah009/dcase_util
python
def eval(self, fold=None, scene_label=None, event_label=None, filename_contains=None, **kwargs): 'List of evaluation items.\n\n Parameters\n ----------\n fold : int\n Fold id, if None all meta data is returned.\n Default value None\n\n scene_label : str\n Scene label\n Default value None\n\n event_label : str\n Event label\n Default value None\n\n filename_contains : str:\n String found in filename\n Default value None\n\n Returns\n -------\n list\n List containing all meta data assigned to testing set for given fold.\n\n ' if ((fold is None) or (fold == 0)): fold = 'all_data' data = self.crossvalidation_data['evaluate'][fold] if scene_label: data = data.filter(scene_label=scene_label) if event_label: data = data.filter(event_label=event_label) if filename_contains: data_ = MetaDataContainer() for item in data: if (filename_contains in item.filename): data_.append(item) data = data_ return data
def __init__(self, storage_name='TUT-sound-events-2017-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, development dataset', 'url': 'https://zenodo.org/record/45759', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/814831/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2017-development.doc.zip'), 'remote_bytes': 56150, 'remote_md': 'aa6024e70f5bff3fe15d962b01753e23', 'filename': 'TUT-sound-events-2017-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2017-development.meta.zip'), 'remote_bytes': 140684, 'remote_md': '50e870b3a89ed3452e2a35b508840929', 'filename': 'TUT-sound-events-2017-development.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2017-development.audio.1.zip'), 'remote_bytes': 1062653169, 'remote_md': '6f1cd31592b8240a14be3ee513db6a23', 'filename': 'TUT-sound-events-2017-development.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2017-development.audio.2.zip'), 'remote_bytes': 213232458, 'remote_md': 'EXAMPLE_KEY', 'filename': 'TUT-sound-events-2017-development.audio.2.zip'}] kwargs['audio_paths'] = [os.path.join('audio', 'street')] super(TUTSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
-3,263,301,835,810,722,300
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-sound-events-2017-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-sound-events-2017-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2017-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, development dataset', 'url': 'https://zenodo.org/record/45759', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/814831/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2017-development.doc.zip'), 'remote_bytes': 56150, 'remote_md': 'aa6024e70f5bff3fe15d962b01753e23', 'filename': 'TUT-sound-events-2017-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2017-development.meta.zip'), 'remote_bytes': 140684, 'remote_md': '50e870b3a89ed3452e2a35b508840929', 'filename': 'TUT-sound-events-2017-development.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2017-development.audio.1.zip'), 'remote_bytes': 1062653169, 'remote_md': '6f1cd31592b8240a14be3ee513db6a23', 'filename': 'TUT-sound-events-2017-development.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2017-development.audio.2.zip'), 'remote_bytes': 213232458, 'remote_md': 'EXAMPLE_KEY', 'filename': 'TUT-sound-events-2017-development.audio.2.zip'}] kwargs['audio_paths'] = [os.path.join('audio', 'street')] super(TUTSoundEvents_2017_DevelopmentSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0]
-1,739,020,471,136,129,800
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0]
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = MetaDataContainer() annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) for annotation_filename in annotation_files: data = MetaDataContainer(filename=annotation_filename).load() for item in data: self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
8,341,942,799,083,488,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = MetaDataContainer() annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) for annotation_filename in annotation_files: data = MetaDataContainer(filename=annotation_filename).load() for item in data: self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-sound-events-2017-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, development dataset', 'url': 'https://zenodo.org/record/45759', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/1040179/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2017-evaluation.doc.zip'), 'remote_bytes': 54606, 'remote_md5': '8bbf41671949edee15d6cdc3f9e726c9', 'filename': 'TUT-sound-events-2017-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2017-evaluation.meta.zip'), 'remote_bytes': 762, 'remote_md5': 'a951598abaea87296ca409e30fb0b379', 'filename': 'TUT-sound-events-2017-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2017-evaluation.audio.zip'), 'remote_bytes': 388173790, 'remote_md5': '1d3aa81896be0f142130ca9ca7a2b871', 'filename': 'TUT-sound-events-2017-evaluation.audio.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
7,848,956,586,064,885,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-sound-events-2017-evaluation' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-sound-events-2017-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-sound-events-2017-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, development dataset', 'url': 'https://zenodo.org/record/45759', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/1040179/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2017-evaluation.doc.zip'), 'remote_bytes': 54606, 'remote_md5': '8bbf41671949edee15d6cdc3f9e726c9', 'filename': 'TUT-sound-events-2017-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2017-evaluation.meta.zip'), 'remote_bytes': 762, 'remote_md5': 'a951598abaea87296ca409e30fb0b379', 'filename': 'TUT-sound-events-2017-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2017-evaluation.audio.zip'), 'remote_bytes': 388173790, 'remote_md5': '1d3aa81896be0f142130ca9ca7a2b871', 'filename': 'TUT-sound-events-2017-evaluation.audio.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTSoundEvents_2017_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = os.path.splitext(raw_filename)[0] item.source_label = 'mixture'
-3,948,935,948,919,123,000
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = os.path.splitext(raw_filename)[0] item.source_label = 'mixture'
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate', scene_label=self.scene_labels()[0]) eval_file = MetaDataContainer(filename=evaluate_filename) if eval_file.exists(): meta_data = MetaDataContainer() eval_file.load() for item in eval_file: self.process_meta_item(item=item, absolute_path=False) meta_data += eval_file meta_data.save(filename=self.meta_file) self.load() elif os.path.isdir(os.path.join(self.local_path, 'meta')): annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) meta_data = MetaDataContainer() for annotation_filename in annotation_files: data = MetaDataContainer(filename=annotation_filename).load() for item in data: self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
-757,908,430,912,708,200
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate', scene_label=self.scene_labels()[0]) eval_file = MetaDataContainer(filename=evaluate_filename) if eval_file.exists(): meta_data = MetaDataContainer() eval_file.load() for item in eval_file: self.process_meta_item(item=item, absolute_path=False) meta_data += eval_file meta_data.save(filename=self.meta_file) self.load() elif os.path.isdir(os.path.join(self.local_path, 'meta')): annotation_files = Path().file_list(path=os.path.join(self.local_path, 'meta'), extensions=['ann']) meta_data = MetaDataContainer() for annotation_filename in annotation_files: data = MetaDataContainer(filename=annotation_filename).load() for item in data: self.process_meta_item(item=item, absolute_path=False) meta_data += data meta_data.save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-acoustic-scenes-2016-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2016, development dataset', 'url': 'https://zenodo.org/record/45739', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/45739/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.doc.zip'), 'remote_bytes': 69671, 'remote_md5': 'f94ad46eb36325d9fbce5d60f7fc9926', 'filename': 'TUT-acoustic-scenes-2016-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.meta.zip'), 'remote_bytes': 28815, 'remote_md5': '779b33da2ebbf8bde494b3c981827251', 'filename': 'TUT-acoustic-scenes-2016-development.meta.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.error.zip'), 'remote_bytes': 1283, 'remote_md5': 'a0d3e0d81b0a36ece87d0f3a9124a386', 'filename': 'TUT-acoustic-scenes-2016-development.error.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.1.zip'), 'remote_bytes': 1070981236, 'remote_md5': 'e39546e65f2e72517b6335aaf0c8323d', 'filename': 'TUT-acoustic-scenes-2016-development.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.2.zip'), 'remote_bytes': 1067186166, 'remote_md5': 'd36cf3253e2c041f68e937a3fe804807', 'filename': 'TUT-acoustic-scenes-2016-development.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.3.zip'), 'remote_bytes': 1073644405, 'remote_md5': '0393a9620ab882b1c26d884eccdcffdd', 'filename': 'TUT-acoustic-scenes-2016-development.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.4.zip'), 'remote_bytes': 1072111347, 'remote_md5': 'fb3e4e0cd7ea82120ec07031dee558ce', 'filename': 'TUT-acoustic-scenes-2016-development.audio.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.5.zip'), 'remote_bytes': 1069681513, 'remote_md5': 'a19cf600b33c8f88f6ad607bafd74057', 'filename': 'TUT-acoustic-scenes-2016-development.audio.5.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.6.zip'), 'remote_bytes': 1072890150, 'remote_md5': '591aad3219d1155342572cc1f6af5680', 'filename': 'TUT-acoustic-scenes-2016-development.audio.6.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.7.zip'), 'remote_bytes': 1069265197, 'remote_md5': '9e6c1897789e6bce13ac69c6caedb7ab', 'filename': 'TUT-acoustic-scenes-2016-development.audio.7.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.8.zip'), 'remote_bytes': 528461098, 'remote_md5': 'c4718354f48fcc9dfc7305f6cd8325c8', 'filename': 'TUT-acoustic-scenes-2016-development.audio.8.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2016_DevelopmentSet, self).__init__(**kwargs)
-3,304,384,571,127,493,600
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-acoustic-scenes-2016-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-acoustic-scenes-2016-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2016, development dataset', 'url': 'https://zenodo.org/record/45739', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/45739/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.doc.zip'), 'remote_bytes': 69671, 'remote_md5': 'f94ad46eb36325d9fbce5d60f7fc9926', 'filename': 'TUT-acoustic-scenes-2016-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.meta.zip'), 'remote_bytes': 28815, 'remote_md5': '779b33da2ebbf8bde494b3c981827251', 'filename': 'TUT-acoustic-scenes-2016-development.meta.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.error.zip'), 'remote_bytes': 1283, 'remote_md5': 'a0d3e0d81b0a36ece87d0f3a9124a386', 'filename': 'TUT-acoustic-scenes-2016-development.error.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.1.zip'), 'remote_bytes': 1070981236, 'remote_md5': 'e39546e65f2e72517b6335aaf0c8323d', 'filename': 'TUT-acoustic-scenes-2016-development.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.2.zip'), 'remote_bytes': 1067186166, 'remote_md5': 'd36cf3253e2c041f68e937a3fe804807', 'filename': 'TUT-acoustic-scenes-2016-development.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.3.zip'), 'remote_bytes': 1073644405, 'remote_md5': '0393a9620ab882b1c26d884eccdcffdd', 'filename': 'TUT-acoustic-scenes-2016-development.audio.3.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.4.zip'), 'remote_bytes': 1072111347, 'remote_md5': 'fb3e4e0cd7ea82120ec07031dee558ce', 'filename': 'TUT-acoustic-scenes-2016-development.audio.4.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.5.zip'), 'remote_bytes': 1069681513, 'remote_md5': 'a19cf600b33c8f88f6ad607bafd74057', 'filename': 'TUT-acoustic-scenes-2016-development.audio.5.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.6.zip'), 'remote_bytes': 1072890150, 'remote_md5': '591aad3219d1155342572cc1f6af5680', 'filename': 'TUT-acoustic-scenes-2016-development.audio.6.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.7.zip'), 'remote_bytes': 1069265197, 'remote_md5': '9e6c1897789e6bce13ac69c6caedb7ab', 'filename': 'TUT-acoustic-scenes-2016-development.audio.7.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-development.audio.8.zip'), 'remote_bytes': 528461098, 'remote_md5': 'c4718354f48fcc9dfc7305f6cd8325c8', 'filename': 'TUT-acoustic-scenes-2016-development.audio.8.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2016_DevelopmentSet, self).__init__(**kwargs)
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = {} for fold in range(1, self.crossvalidation_folds): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
5,525,513,403,574,800,000
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): meta_data = {} for fold in range(1, self.crossvalidation_folds): fold_data = MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='train', fold=fold)).load() fold_data += MetaDataContainer(filename=self.evaluation_setup_filename(setup_part='evaluate', fold=fold)).load() for item in fold_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0]
-1,739,020,471,136,129,800
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) (raw_path, raw_filename) = os.path.split(item.filename) item.identifier = raw_filename.split('_')[0]
def __init__(self, storage_name='TUT-acoustic-scenes-2016-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2016, evaluation dataset', 'url': 'https://zenodo.org/record/165995', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/165995/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.doc.zip'), 'remote_bytes': 69217, 'remote_md5': 'ef315bf912d1124050646888cc3ceba2', 'filename': 'TUT-acoustic-scenes-2016-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.meta.zip'), 'remote_bytes': 5962, 'remote_md5': '0d5c131fc3f50c682de62e0e648aceba', 'filename': 'TUT-acoustic-scenes-2016-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'), 'remote_bytes': 1067685684, 'remote_md5': '7c6c2e54b8a9c4c37a803b81446d16fe', 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'), 'remote_bytes': 1068308900, 'remote_md5': '7930f1dc26707ab3ba9526073af87333', 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'), 'remote_bytes': 538894804, 'remote_md5': '17187d633d6402aee4b481122a1b28f0', 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2016_EvaluationSet, self).__init__(**kwargs)
8,314,640,026,505,892,000
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-acoustic-scenes-2016-evaluation' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-acoustic-scenes-2016-evaluation', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-evaluation'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'scene' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Acoustic Scenes 2016, evaluation dataset', 'url': 'https://zenodo.org/record/165995', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = None source_url = 'https://zenodo.org/record/165995/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.doc.zip'), 'remote_bytes': 69217, 'remote_md5': 'ef315bf912d1124050646888cc3ceba2', 'filename': 'TUT-acoustic-scenes-2016-evaluation.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.meta.zip'), 'remote_bytes': 5962, 'remote_md5': '0d5c131fc3f50c682de62e0e648aceba', 'filename': 'TUT-acoustic-scenes-2016-evaluation.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'), 'remote_bytes': 1067685684, 'remote_md5': '7c6c2e54b8a9c4c37a803b81446d16fe', 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.1.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'), 'remote_bytes': 1068308900, 'remote_md5': '7930f1dc26707ab3ba9526073af87333', 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.2.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'), 'remote_bytes': 538894804, 'remote_md5': '17187d633d6402aee4b481122a1b28f0', 'filename': 'TUT-acoustic-scenes-2016-evaluation.audio.3.zip'}] kwargs['audio_paths'] = ['audio'] super(TUTAcousticScenes_2016_EvaluationSet, self).__init__(**kwargs)
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (item.filename_original is not None): (raw_path, raw_filename) = os.path.split(item.filename_original) item.identifier = raw_filename.split('_')[0] del item['filename_original']
3,187,019,170,696,663,000
Process single meta data item Parameters ---------- item : MetaDataItem Meta data item absolute_path : bool Convert file paths to be absolute Default value True
dcase_util/datasets/tut.py
process_meta_item
ankitshah009/dcase_util
python
def process_meta_item(self, item, absolute_path=True, **kwargs): 'Process single meta data item\n\n Parameters\n ----------\n item : MetaDataItem\n Meta data item\n\n absolute_path : bool\n Convert file paths to be absolute\n Default value True\n\n ' if absolute_path: item.filename = self.relative_to_absolute_path(item.filename) else: item.filename = self.absolute_to_relative_path(item.filename) if (item.filename_original is not None): (raw_path, raw_filename) = os.path.split(item.filename_original) item.identifier = raw_filename.split('_')[0] del item['filename_original']
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate') eval_file = MetaDataContainer(filename=evaluate_filename) if eval_file.exists(): eval_data = eval_file.load() meta_data = {} for item in eval_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
3,599,202,904,819,247,600
Prepare dataset for the usage. Returns ------- self
dcase_util/datasets/tut.py
prepare
ankitshah009/dcase_util
python
def prepare(self): 'Prepare dataset for the usage.\n\n Returns\n -------\n self\n\n ' if (not self.meta_container.exists()): evaluate_filename = self.evaluation_setup_filename(setup_part='evaluate') eval_file = MetaDataContainer(filename=evaluate_filename) if eval_file.exists(): eval_data = eval_file.load() meta_data = {} for item in eval_data: if (item.filename not in meta_data): self.process_meta_item(item=item, absolute_path=False) meta_data[item.filename] = item MetaDataContainer(list(meta_data.values())).save(filename=self.meta_file) self.load() return self
def __init__(self, storage_name='TUT-acoustic-scenes-2016-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, development dataset', 'url': 'https://zenodo.org/record/45759', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/45759/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2016-development.doc.zip'), 'remote_bytes': 70918, 'remote_md5': '33fd26a895530aef607a07b08704eacd', 'filename': 'TUT-sound-events-2016-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2016-development.meta.zip'), 'remote_bytes': 122321, 'remote_md5': '7b29f0e2b82b3f264653cb4fa43da75d', 'filename': 'TUT-sound-events-2016-development.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2016-development.audio.zip'), 'remote_bytes': 1014040667, 'remote_md5': 'a6006efaa85bb69d5064b00c6802a8f8', 'filename': 'TUT-sound-events-2016-development.audio.zip'}] kwargs['audio_paths'] = [os.path.join('audio', 'home'), os.path.join('audio', 'residential_area')] super(TUTSoundEvents_2016_DevelopmentSet, self).__init__(**kwargs)
-1,871,470,950,716,974,300
Constructor Parameters ---------- storage_name : str Name to be used when storing dataset on disk Default value 'TUT-acoustic-scenes-2016-development' data_path : str Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets') is used. Default value None included_content_types : list of str or str Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code', 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string. Default value None
dcase_util/datasets/tut.py
__init__
ankitshah009/dcase_util
python
def __init__(self, storage_name='TUT-acoustic-scenes-2016-development', data_path=None, included_content_types=None, **kwargs): "\n Constructor\n\n Parameters\n ----------\n\n storage_name : str\n Name to be used when storing dataset on disk\n Default value 'TUT-acoustic-scenes-2016-development'\n\n data_path : str\n Root path where the dataset is stored. If None, os.path.join(tempfile.gettempdir(), 'dcase_util_datasets')\n is used.\n Default value None\n\n included_content_types : list of str or str\n Indicates what content type should be processed. One or multiple from ['all', 'audio', 'meta', 'code',\n 'documentation']. If None given, ['all'] is used. Parameter can be also comma separated string.\n Default value None\n\n " kwargs['included_content_types'] = included_content_types kwargs['data_path'] = data_path kwargs['storage_name'] = storage_name kwargs['dataset_group'] = 'event' kwargs['dataset_meta'] = {'authors': 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen', 'title': 'TUT Sound Events 2016, development dataset', 'url': 'https://zenodo.org/record/45759', 'audio_source': 'Field recording', 'audio_type': 'Natural', 'recording_device_model': 'Roland Edirol R-09', 'microphone_model': 'Soundman OKM II Klassik/studio A3 electret microphone', 'licence': 'free non-commercial'} kwargs['crossvalidation_folds'] = 4 source_url = 'https://zenodo.org/record/45759/files/' kwargs['package_list'] = [{'content_type': 'documentation', 'remote_file': (source_url + 'TUT-sound-events-2016-development.doc.zip'), 'remote_bytes': 70918, 'remote_md5': '33fd26a895530aef607a07b08704eacd', 'filename': 'TUT-sound-events-2016-development.doc.zip'}, {'content_type': 'meta', 'remote_file': (source_url + 'TUT-sound-events-2016-development.meta.zip'), 'remote_bytes': 122321, 'remote_md5': '7b29f0e2b82b3f264653cb4fa43da75d', 'filename': 'TUT-sound-events-2016-development.meta.zip'}, {'content_type': 'audio', 'remote_file': (source_url + 'TUT-sound-events-2016-development.audio.zip'), 'remote_bytes': 1014040667, 'remote_md5': 'a6006efaa85bb69d5064b00c6802a8f8', 'filename': 'TUT-sound-events-2016-development.audio.zip'}] kwargs['audio_paths'] = [os.path.join('audio', 'home'), os.path.join('audio', 'residential_area')] super(TUTSoundEvents_2016_DevelopmentSet, self).__init__(**kwargs)