repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
ocaballeror/LyricFetch
lyricfetch/run.py
exclude_sources
def exclude_sources(exclude, section=False): """ Returns a narrower list of sources. If the exclude parameter is a list, every one of its items will be removed from the returned list. If it's just a function (or a function's name) and 'section' is set to False (default), a copy of the sources list without this element will be returned. If it's a function (or a function's name) but the section parameter is set to True, the returned list will be a section of the sources list, including everything between 'exclude' and the end of the list. """ newlist = sources.copy() if not isinstance(exclude, list): exclude = [exclude] for source in exclude: if not section: newlist.remove(source) else: pos = newlist.index(source) + 1 if pos == len(sources): return [] newlist = sources[pos:] return newlist
python
def exclude_sources(exclude, section=False): """ Returns a narrower list of sources. If the exclude parameter is a list, every one of its items will be removed from the returned list. If it's just a function (or a function's name) and 'section' is set to False (default), a copy of the sources list without this element will be returned. If it's a function (or a function's name) but the section parameter is set to True, the returned list will be a section of the sources list, including everything between 'exclude' and the end of the list. """ newlist = sources.copy() if not isinstance(exclude, list): exclude = [exclude] for source in exclude: if not section: newlist.remove(source) else: pos = newlist.index(source) + 1 if pos == len(sources): return [] newlist = sources[pos:] return newlist
[ "def", "exclude_sources", "(", "exclude", ",", "section", "=", "False", ")", ":", "newlist", "=", "sources", ".", "copy", "(", ")", "if", "not", "isinstance", "(", "exclude", ",", "list", ")", ":", "exclude", "=", "[", "exclude", "]", "for", "source", ...
Returns a narrower list of sources. If the exclude parameter is a list, every one of its items will be removed from the returned list. If it's just a function (or a function's name) and 'section' is set to False (default), a copy of the sources list without this element will be returned. If it's a function (or a function's name) but the section parameter is set to True, the returned list will be a section of the sources list, including everything between 'exclude' and the end of the list.
[ "Returns", "a", "narrower", "list", "of", "sources", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L65-L90
train
52,400
ocaballeror/LyricFetch
lyricfetch/run.py
get_lyrics
def get_lyrics(song, l_sources=None): """ Searches for lyrics of a single song and returns a Result object with the various stats collected in the process. The optional parameter 'sources' specifies an alternative list of sources. If not present, the main list will be used. """ if l_sources is None: l_sources = sources if song.lyrics and not CONFIG['overwrite']: logger.debug('%s already has embedded lyrics', song) return None runtimes = {} source = None for l_source in l_sources: start = time.time() try: lyrics = l_source(song) except (HTTPError, HTTPException, URLError, ConnectionError): lyrics = '' runtimes[l_source] = time.time() - start if lyrics != '': source = l_source break if lyrics != '': logger.info('++ %s: Found lyrics for %s\n', source.__name__, song) song.lyrics = lyrics else: logger.info("Couldn't find lyrics for %s\n", song) source = None return Result(song, source, runtimes)
python
def get_lyrics(song, l_sources=None): """ Searches for lyrics of a single song and returns a Result object with the various stats collected in the process. The optional parameter 'sources' specifies an alternative list of sources. If not present, the main list will be used. """ if l_sources is None: l_sources = sources if song.lyrics and not CONFIG['overwrite']: logger.debug('%s already has embedded lyrics', song) return None runtimes = {} source = None for l_source in l_sources: start = time.time() try: lyrics = l_source(song) except (HTTPError, HTTPException, URLError, ConnectionError): lyrics = '' runtimes[l_source] = time.time() - start if lyrics != '': source = l_source break if lyrics != '': logger.info('++ %s: Found lyrics for %s\n', source.__name__, song) song.lyrics = lyrics else: logger.info("Couldn't find lyrics for %s\n", song) source = None return Result(song, source, runtimes)
[ "def", "get_lyrics", "(", "song", ",", "l_sources", "=", "None", ")", ":", "if", "l_sources", "is", "None", ":", "l_sources", "=", "sources", "if", "song", ".", "lyrics", "and", "not", "CONFIG", "[", "'overwrite'", "]", ":", "logger", ".", "debug", "("...
Searches for lyrics of a single song and returns a Result object with the various stats collected in the process. The optional parameter 'sources' specifies an alternative list of sources. If not present, the main list will be used.
[ "Searches", "for", "lyrics", "of", "a", "single", "song", "and", "returns", "a", "Result", "object", "with", "the", "various", "stats", "collected", "in", "the", "process", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L93-L129
train
52,401
ocaballeror/LyricFetch
lyricfetch/run.py
get_lyrics_threaded
def get_lyrics_threaded(song, l_sources=None): """ Launches a pool of threads to search for the lyrics of a single song. The optional parameter 'sources' specifies an alternative list of sources. If not present, the main list will be used. """ if l_sources is None: l_sources = sources if song.lyrics and not CONFIG['overwrite']: logger.debug('%s already has embedded lyrics', song) return None runtimes = {} queue = Queue() pool = [LyrThread(source, song, queue) for source in l_sources] for thread in pool: thread.start() for _ in range(len(pool)): result = queue.get() runtimes[result['source']] = result['runtime'] if result['lyrics']: break if result['lyrics']: song.lyrics = result['lyrics'] source = result['source'] else: source = None return Result(song, source, runtimes)
python
def get_lyrics_threaded(song, l_sources=None): """ Launches a pool of threads to search for the lyrics of a single song. The optional parameter 'sources' specifies an alternative list of sources. If not present, the main list will be used. """ if l_sources is None: l_sources = sources if song.lyrics and not CONFIG['overwrite']: logger.debug('%s already has embedded lyrics', song) return None runtimes = {} queue = Queue() pool = [LyrThread(source, song, queue) for source in l_sources] for thread in pool: thread.start() for _ in range(len(pool)): result = queue.get() runtimes[result['source']] = result['runtime'] if result['lyrics']: break if result['lyrics']: song.lyrics = result['lyrics'] source = result['source'] else: source = None return Result(song, source, runtimes)
[ "def", "get_lyrics_threaded", "(", "song", ",", "l_sources", "=", "None", ")", ":", "if", "l_sources", "is", "None", ":", "l_sources", "=", "sources", "if", "song", ".", "lyrics", "and", "not", "CONFIG", "[", "'overwrite'", "]", ":", "logger", ".", "debu...
Launches a pool of threads to search for the lyrics of a single song. The optional parameter 'sources' specifies an alternative list of sources. If not present, the main list will be used.
[ "Launches", "a", "pool", "of", "threads", "to", "search", "for", "the", "lyrics", "of", "a", "single", "song", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L132-L164
train
52,402
ocaballeror/LyricFetch
lyricfetch/run.py
run
def run(songs): """ Calls get_lyrics_threaded for a song or list of songs. """ if not hasattr(songs, '__iter__'): result = get_lyrics_threaded(songs) process_result(result) else: start = time.time() stats = run_mp(songs) end = time.time() if CONFIG['print_stats']: stats.print_stats() total_time = end - start total_time = '%d:%02d:%02d' % (total_time / 3600, (total_time / 3600) / 60, (total_time % 3600) % 60) print(f'Total time: {total_time}')
python
def run(songs): """ Calls get_lyrics_threaded for a song or list of songs. """ if not hasattr(songs, '__iter__'): result = get_lyrics_threaded(songs) process_result(result) else: start = time.time() stats = run_mp(songs) end = time.time() if CONFIG['print_stats']: stats.print_stats() total_time = end - start total_time = '%d:%02d:%02d' % (total_time / 3600, (total_time / 3600) / 60, (total_time % 3600) % 60) print(f'Total time: {total_time}')
[ "def", "run", "(", "songs", ")", ":", "if", "not", "hasattr", "(", "songs", ",", "'__iter__'", ")", ":", "result", "=", "get_lyrics_threaded", "(", "songs", ")", "process_result", "(", "result", ")", "else", ":", "start", "=", "time", ".", "time", "(",...
Calls get_lyrics_threaded for a song or list of songs.
[ "Calls", "get_lyrics_threaded", "for", "a", "song", "or", "list", "of", "songs", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L193-L210
train
52,403
ocaballeror/LyricFetch
lyricfetch/run.py
run_mp
def run_mp(songs): """ Concurrently calls get_lyrics to fetch the lyrics of a large list of songs. """ stats = Stats() if CONFIG['debug']: good = open('found', 'w') bad = open('notfound', 'w') logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount']) chunksize = math.ceil(len(songs) / os.cpu_count()) try: with Pool(CONFIG['jobcount']) as pool: for result in pool.imap_unordered(get_lyrics, songs, chunksize): if result is None: continue for source, runtime in result.runtimes.items(): stats.add_result(source, result.source == source, runtime) found = process_result(result) if CONFIG['debug']: if found: good.write(f'{id_source(source)}: {result.song}\n') good.flush() else: bad.write(str(result.song) + '\n') bad.flush() finally: if CONFIG['debug']: good.close() bad.close() return stats
python
def run_mp(songs): """ Concurrently calls get_lyrics to fetch the lyrics of a large list of songs. """ stats = Stats() if CONFIG['debug']: good = open('found', 'w') bad = open('notfound', 'w') logger.debug('Launching a pool of %d processes\n', CONFIG['jobcount']) chunksize = math.ceil(len(songs) / os.cpu_count()) try: with Pool(CONFIG['jobcount']) as pool: for result in pool.imap_unordered(get_lyrics, songs, chunksize): if result is None: continue for source, runtime in result.runtimes.items(): stats.add_result(source, result.source == source, runtime) found = process_result(result) if CONFIG['debug']: if found: good.write(f'{id_source(source)}: {result.song}\n') good.flush() else: bad.write(str(result.song) + '\n') bad.flush() finally: if CONFIG['debug']: good.close() bad.close() return stats
[ "def", "run_mp", "(", "songs", ")", ":", "stats", "=", "Stats", "(", ")", "if", "CONFIG", "[", "'debug'", "]", ":", "good", "=", "open", "(", "'found'", ",", "'w'", ")", "bad", "=", "open", "(", "'notfound'", ",", "'w'", ")", "logger", ".", "debu...
Concurrently calls get_lyrics to fetch the lyrics of a large list of songs.
[ "Concurrently", "calls", "get_lyrics", "to", "fetch", "the", "lyrics", "of", "a", "large", "list", "of", "songs", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/run.py#L213-L247
train
52,404
malramsay64/experi
src/experi/scheduler.py
parse_setup
def parse_setup(options: Union[List, str]) -> str: """Convert potentially a list of commands into a single string. This creates a single string with newlines between each element of the list so that they will all run after each other in a bash script. """ if isinstance(options, str): return options return "\n".join(options)
python
def parse_setup(options: Union[List, str]) -> str: """Convert potentially a list of commands into a single string. This creates a single string with newlines between each element of the list so that they will all run after each other in a bash script. """ if isinstance(options, str): return options return "\n".join(options)
[ "def", "parse_setup", "(", "options", ":", "Union", "[", "List", ",", "str", "]", ")", "->", "str", ":", "if", "isinstance", "(", "options", ",", "str", ")", ":", "return", "options", "return", "\"\\n\"", ".", "join", "(", "options", ")" ]
Convert potentially a list of commands into a single string. This creates a single string with newlines between each element of the list so that they will all run after each other in a bash script.
[ "Convert", "potentially", "a", "list", "of", "commands", "into", "a", "single", "string", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/scheduler.py#L267-L276
train
52,405
malramsay64/experi
src/experi/scheduler.py
create_scheduler_file
def create_scheduler_file(scheduler: str, job: Job) -> str: """Substitute values into a template scheduler file.""" logger.debug("Create Scheduler File Function") if job.scheduler_options is None: scheduler_options: Dict[str, Any] = {} else: scheduler_options = deepcopy(job.scheduler_options) try: setup_string = parse_setup(scheduler_options["setup"]) del scheduler_options["setup"] except KeyError: setup_string = "" # Create header header_string = create_header_string(scheduler, **scheduler_options) header_string += get_array_string(scheduler, len(job)) if scheduler.upper() == "SLURM": workdir = r"$SLURM_SUBMIT_DIR" array_index = r"$SLURM_ARRAY_TASK_ID" elif scheduler.upper() == "PBS": workdir = r"$PBS_O_WORKDIR" array_index = r"$PBS_ARRAY_INDEX" return header_string + SCHEDULER_TEMPLATE.format( workdir=workdir, command_list=job.as_bash_array(), setup=setup_string, array_index=array_index, )
python
def create_scheduler_file(scheduler: str, job: Job) -> str: """Substitute values into a template scheduler file.""" logger.debug("Create Scheduler File Function") if job.scheduler_options is None: scheduler_options: Dict[str, Any] = {} else: scheduler_options = deepcopy(job.scheduler_options) try: setup_string = parse_setup(scheduler_options["setup"]) del scheduler_options["setup"] except KeyError: setup_string = "" # Create header header_string = create_header_string(scheduler, **scheduler_options) header_string += get_array_string(scheduler, len(job)) if scheduler.upper() == "SLURM": workdir = r"$SLURM_SUBMIT_DIR" array_index = r"$SLURM_ARRAY_TASK_ID" elif scheduler.upper() == "PBS": workdir = r"$PBS_O_WORKDIR" array_index = r"$PBS_ARRAY_INDEX" return header_string + SCHEDULER_TEMPLATE.format( workdir=workdir, command_list=job.as_bash_array(), setup=setup_string, array_index=array_index, )
[ "def", "create_scheduler_file", "(", "scheduler", ":", "str", ",", "job", ":", "Job", ")", "->", "str", ":", "logger", ".", "debug", "(", "\"Create Scheduler File Function\"", ")", "if", "job", ".", "scheduler_options", "is", "None", ":", "scheduler_options", ...
Substitute values into a template scheduler file.
[ "Substitute", "values", "into", "a", "template", "scheduler", "file", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/scheduler.py#L304-L333
train
52,406
thiagopbueno/rddl2tf
rddl2tf/utils.py
range_type_to_dtype
def range_type_to_dtype(range_type: str) -> Optional[tf.DType]: '''Maps RDDL range types to TensorFlow dtypes.''' range2dtype = { 'real': tf.float32, 'int': tf.int32, 'bool': tf.bool } return range2dtype[range_type]
python
def range_type_to_dtype(range_type: str) -> Optional[tf.DType]: '''Maps RDDL range types to TensorFlow dtypes.''' range2dtype = { 'real': tf.float32, 'int': tf.int32, 'bool': tf.bool } return range2dtype[range_type]
[ "def", "range_type_to_dtype", "(", "range_type", ":", "str", ")", "->", "Optional", "[", "tf", ".", "DType", "]", ":", "range2dtype", "=", "{", "'real'", ":", "tf", ".", "float32", ",", "'int'", ":", "tf", ".", "int32", ",", "'bool'", ":", "tf", ".",...
Maps RDDL range types to TensorFlow dtypes.
[ "Maps", "RDDL", "range", "types", "to", "TensorFlow", "dtypes", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/utils.py#L22-L29
train
52,407
thiagopbueno/rddl2tf
rddl2tf/utils.py
python_type_to_dtype
def python_type_to_dtype(python_type: type) -> Optional[tf.DType]: '''Maps python types to TensorFlow dtypes.''' dtype = None if python_type == float: dtype = tf.float32 elif python_type == int: dtype = tf.int32 elif python_type == bool: dtype = tf.bool return dtype
python
def python_type_to_dtype(python_type: type) -> Optional[tf.DType]: '''Maps python types to TensorFlow dtypes.''' dtype = None if python_type == float: dtype = tf.float32 elif python_type == int: dtype = tf.int32 elif python_type == bool: dtype = tf.bool return dtype
[ "def", "python_type_to_dtype", "(", "python_type", ":", "type", ")", "->", "Optional", "[", "tf", ".", "DType", "]", ":", "dtype", "=", "None", "if", "python_type", "==", "float", ":", "dtype", "=", "tf", ".", "float32", "elif", "python_type", "==", "int...
Maps python types to TensorFlow dtypes.
[ "Maps", "python", "types", "to", "TensorFlow", "dtypes", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/utils.py#L32-L41
train
52,408
RowleyGroup/pyqueue
pyqueue/systems/slurm.py
SlurmPrinter.get_dependency_type
def get_dependency_type(_type): """ Get the dependency type string for SlurmPrinter :rtype: str """ if _type == DependencyTypes.AFTER: return 'after' elif _type == DependencyTypes.AFTER_ANY: return 'afterany' elif _type == DependencyTypes.AFTER_CORR: return 'aftercorr' elif _type == DependencyTypes.AFTER_NOT_OK: return 'afternotok' elif _type == DependencyTypes.AFTER_OK: return 'afterok' else: return None
python
def get_dependency_type(_type): """ Get the dependency type string for SlurmPrinter :rtype: str """ if _type == DependencyTypes.AFTER: return 'after' elif _type == DependencyTypes.AFTER_ANY: return 'afterany' elif _type == DependencyTypes.AFTER_CORR: return 'aftercorr' elif _type == DependencyTypes.AFTER_NOT_OK: return 'afternotok' elif _type == DependencyTypes.AFTER_OK: return 'afterok' else: return None
[ "def", "get_dependency_type", "(", "_type", ")", ":", "if", "_type", "==", "DependencyTypes", ".", "AFTER", ":", "return", "'after'", "elif", "_type", "==", "DependencyTypes", ".", "AFTER_ANY", ":", "return", "'afterany'", "elif", "_type", "==", "DependencyTypes...
Get the dependency type string for SlurmPrinter :rtype: str
[ "Get", "the", "dependency", "type", "string", "for", "SlurmPrinter" ]
24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f
https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L21-L38
train
52,409
RowleyGroup/pyqueue
pyqueue/systems/slurm.py
SlurmPrinter.generate
def generate(self, job): """ Generates a job submission script from a job object :param job: An instance of JobInterface :type job: pyqueue.job.JobInterface """ options = job.get_options().copy() job_name = options.pop('name', None) job_account = options.pop('account', None) job_walltime = options.pop('walltime', None) job_mem_per_cpu = options.pop('mem_per_cpu', None) job_memory = options.pop('memory', None) job_working_directory = options.pop('working_directory', None) job_error_path = options.pop('error_path', None) job_output_path = options.pop('output_path', None) job_dependency = options.pop('depending', None) job_shell = options.pop('shell', '/bin/bash') job_custom_options = options.pop('__custom__', []) directives_lines = [] if job_name is not None: directives_lines.append('--job-name=%s' % job_name) if job_account is not None: directives_lines.append('--account=%s' % job_account) if job_working_directory is not None: directives_lines.append('--workdir=%s' % job_working_directory) if job_error_path is not None: directives_lines.append('--error=%s' % job_error_path) if job_output_path is not None: directives_lines.append('--output=%s' % job_output_path) if job_walltime is not None: directives_lines.append('--time=%s' % strfdelta(job_walltime, '%H:%M:%S')) if job_mem_per_cpu is not None: directives_lines.append('--mem-per-cpu=%d' % job_mem_per_cpu) if job_memory is not None: directives_lines.append('--mem=%d' % job_memory) if job_dependency is not None: master = job_dependency['job'] dependency_type = SlurmPrinter.get_dependency_type( job_dependency['dependency_type'] ) job_id = master.get_id() if isinstance(master, JobInterface) else master directives_lines.append( '--dependency=%s:%s' % (dependency_type, job_id) ) for custom_option in job_custom_options: directives_lines.append(custom_option) directives = '\n'.join([ '#SBATCH %s' % directive for directive in directives_lines ]) commands = '\n'.join([ '\n'.join(command_container.get_commands()) for command_container in job.get_commands() ]) script = '#!%s\n' % job_shell script += SlurmPrinter.get_header() script += directives script += '\n\n' script += commands return script
python
def generate(self, job): """ Generates a job submission script from a job object :param job: An instance of JobInterface :type job: pyqueue.job.JobInterface """ options = job.get_options().copy() job_name = options.pop('name', None) job_account = options.pop('account', None) job_walltime = options.pop('walltime', None) job_mem_per_cpu = options.pop('mem_per_cpu', None) job_memory = options.pop('memory', None) job_working_directory = options.pop('working_directory', None) job_error_path = options.pop('error_path', None) job_output_path = options.pop('output_path', None) job_dependency = options.pop('depending', None) job_shell = options.pop('shell', '/bin/bash') job_custom_options = options.pop('__custom__', []) directives_lines = [] if job_name is not None: directives_lines.append('--job-name=%s' % job_name) if job_account is not None: directives_lines.append('--account=%s' % job_account) if job_working_directory is not None: directives_lines.append('--workdir=%s' % job_working_directory) if job_error_path is not None: directives_lines.append('--error=%s' % job_error_path) if job_output_path is not None: directives_lines.append('--output=%s' % job_output_path) if job_walltime is not None: directives_lines.append('--time=%s' % strfdelta(job_walltime, '%H:%M:%S')) if job_mem_per_cpu is not None: directives_lines.append('--mem-per-cpu=%d' % job_mem_per_cpu) if job_memory is not None: directives_lines.append('--mem=%d' % job_memory) if job_dependency is not None: master = job_dependency['job'] dependency_type = SlurmPrinter.get_dependency_type( job_dependency['dependency_type'] ) job_id = master.get_id() if isinstance(master, JobInterface) else master directives_lines.append( '--dependency=%s:%s' % (dependency_type, job_id) ) for custom_option in job_custom_options: directives_lines.append(custom_option) directives = '\n'.join([ '#SBATCH %s' % directive for directive in directives_lines ]) commands = '\n'.join([ '\n'.join(command_container.get_commands()) for command_container in job.get_commands() ]) script = '#!%s\n' % job_shell script += SlurmPrinter.get_header() script += directives script += '\n\n' script += commands return script
[ "def", "generate", "(", "self", ",", "job", ")", ":", "options", "=", "job", ".", "get_options", "(", ")", ".", "copy", "(", ")", "job_name", "=", "options", ".", "pop", "(", "'name'", ",", "None", ")", "job_account", "=", "options", ".", "pop", "(...
Generates a job submission script from a job object :param job: An instance of JobInterface :type job: pyqueue.job.JobInterface
[ "Generates", "a", "job", "submission", "script", "from", "a", "job", "object" ]
24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f
https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/systems/slurm.py#L60-L137
train
52,410
ocaballeror/LyricFetch
lyricfetch/song.py
get_info_mpris2
def get_info_mpris2(name): """ Get the current playing song from an mpris2 compliant player. """ # qdbus org.mpris.MediaPlayer2.<name> /org/mpris/MediaPlayer2\ # org.freedesktop.DBus.Properties.Get org.mpris.MediaPlayer2.Player Metadat bus_name = 'org.mpris.MediaPlayer2.' + name path = '/org/mpris/MediaPlayer2' interface = 'org.mpris.MediaPlayer2.Player' address = DBusAddress(path, bus_name=bus_name, interface=interface) msg = Properties(address).get('Metadata') connection = connect_and_authenticate() response = connection.send_and_get_reply(msg) metadata = dict(response[0][1]) keys = ['album', 'title', 'artist', 'albumartist'] info = {} metadata = {k: v for k, v in metadata.items() if 'xesam:' in k} for key, value in metadata.items(): name = key.split(':')[1].lower() value = value[1] if name not in keys or name in info: continue if isinstance(value, list): value = value[0] info[name] = value if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
python
def get_info_mpris2(name): """ Get the current playing song from an mpris2 compliant player. """ # qdbus org.mpris.MediaPlayer2.<name> /org/mpris/MediaPlayer2\ # org.freedesktop.DBus.Properties.Get org.mpris.MediaPlayer2.Player Metadat bus_name = 'org.mpris.MediaPlayer2.' + name path = '/org/mpris/MediaPlayer2' interface = 'org.mpris.MediaPlayer2.Player' address = DBusAddress(path, bus_name=bus_name, interface=interface) msg = Properties(address).get('Metadata') connection = connect_and_authenticate() response = connection.send_and_get_reply(msg) metadata = dict(response[0][1]) keys = ['album', 'title', 'artist', 'albumartist'] info = {} metadata = {k: v for k, v in metadata.items() if 'xesam:' in k} for key, value in metadata.items(): name = key.split(':')[1].lower() value = value[1] if name not in keys or name in info: continue if isinstance(value, list): value = value[0] info[name] = value if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
[ "def", "get_info_mpris2", "(", "name", ")", ":", "# qdbus org.mpris.MediaPlayer2.<name> /org/mpris/MediaPlayer2\\", "# org.freedesktop.DBus.Properties.Get org.mpris.MediaPlayer2.Player Metadat", "bus_name", "=", "'org.mpris.MediaPlayer2.'", "+", "name", "path", "=", "'/org/mpris/MediaP...
Get the current playing song from an mpris2 compliant player.
[ "Get", "the", "current", "playing", "song", "from", "an", "mpris2", "compliant", "player", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L147-L178
train
52,411
ocaballeror/LyricFetch
lyricfetch/song.py
get_current_clementine
def get_current_clementine(): """ Get the current song from clementine. """ # mpris_version 2 try: return get_info_mpris2('clementine') except DBusErrorResponse: bus_name = 'org.mpris.clementine' path = '/Player' interface = 'org.freedesktop.MediaPlayer' return dbus_get_metadata(path, bus_name, interface)
python
def get_current_clementine(): """ Get the current song from clementine. """ # mpris_version 2 try: return get_info_mpris2('clementine') except DBusErrorResponse: bus_name = 'org.mpris.clementine' path = '/Player' interface = 'org.freedesktop.MediaPlayer' return dbus_get_metadata(path, bus_name, interface)
[ "def", "get_current_clementine", "(", ")", ":", "# mpris_version 2", "try", ":", "return", "get_info_mpris2", "(", "'clementine'", ")", "except", "DBusErrorResponse", ":", "bus_name", "=", "'org.mpris.clementine'", "path", "=", "'/Player'", "interface", "=", "'org.fre...
Get the current song from clementine.
[ "Get", "the", "current", "song", "from", "clementine", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L198-L209
train
52,412
ocaballeror/LyricFetch
lyricfetch/song.py
get_current_cmus
def get_current_cmus(): """ Get the current song from cmus. """ result = subprocess.run('cmus-remote -Q'.split(' '), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split('\n'): line = line.split(' ') if line[0] != 'tag': continue key = line[1] if key in ['album', 'title', 'artist', 'albumartist'] and\ key not in info: info[key] = ' '.join(line[2:]) if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
python
def get_current_cmus(): """ Get the current song from cmus. """ result = subprocess.run('cmus-remote -Q'.split(' '), check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) info = {} for line in result.stdout.decode().split('\n'): line = line.split(' ') if line[0] != 'tag': continue key = line[1] if key in ['album', 'title', 'artist', 'albumartist'] and\ key not in info: info[key] = ' '.join(line[2:]) if 'albumartist' in info: info['artist'] = info['albumartist'] del info['albumartist'] return Song(**info)
[ "def", "get_current_cmus", "(", ")", ":", "result", "=", "subprocess", ".", "run", "(", "'cmus-remote -Q'", ".", "split", "(", "' '", ")", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", ...
Get the current song from cmus.
[ "Get", "the", "current", "song", "from", "cmus", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L212-L232
train
52,413
ocaballeror/LyricFetch
lyricfetch/song.py
Song.from_filename
def from_filename(cls, filename): """ Class constructor using the path to the corresponding mp3 file. The metadata will be read from this file to create the song object, so it must at least contain valid ID3 tags for artist and title. """ if not filename: logger.error('No filename specified') return None if not os.path.exists(filename): logger.error("Err: File '%s' does not exist", filename) return None if os.path.isdir(filename): logger.error("Err: File '%s' is a directory", filename) return None try: audiofile = eyed3.load(filename) except Exception as error: print(type(error), error) return None # Sometimes eyed3 may return a null object and not raise any exceptions if audiofile is None: return None tags = audiofile.tag album = tags.album title = tags.title lyrics = ''.join([l.text for l in tags.lyrics]) artist = tags.album_artist if not artist: artist = tags.artist song = cls(artist, title, album, lyrics) song.filename = filename return song
python
def from_filename(cls, filename): """ Class constructor using the path to the corresponding mp3 file. The metadata will be read from this file to create the song object, so it must at least contain valid ID3 tags for artist and title. """ if not filename: logger.error('No filename specified') return None if not os.path.exists(filename): logger.error("Err: File '%s' does not exist", filename) return None if os.path.isdir(filename): logger.error("Err: File '%s' is a directory", filename) return None try: audiofile = eyed3.load(filename) except Exception as error: print(type(error), error) return None # Sometimes eyed3 may return a null object and not raise any exceptions if audiofile is None: return None tags = audiofile.tag album = tags.album title = tags.title lyrics = ''.join([l.text for l in tags.lyrics]) artist = tags.album_artist if not artist: artist = tags.artist song = cls(artist, title, album, lyrics) song.filename = filename return song
[ "def", "from_filename", "(", "cls", ",", "filename", ")", ":", "if", "not", "filename", ":", "logger", ".", "error", "(", "'No filename specified'", ")", "return", "None", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "logger"...
Class constructor using the path to the corresponding mp3 file. The metadata will be read from this file to create the song object, so it must at least contain valid ID3 tags for artist and title.
[ "Class", "constructor", "using", "the", "path", "to", "the", "corresponding", "mp3", "file", ".", "The", "metadata", "will", "be", "read", "from", "this", "file", "to", "create", "the", "song", "object", "so", "it", "must", "at", "least", "contain", "valid...
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L65-L103
train
52,414
ocaballeror/LyricFetch
lyricfetch/song.py
Song.fetch_album_name
def fetch_album_name(self): """ Get the name of the album from lastfm. """ response = get_lastfm('track.getInfo', artist=self.artist, track=self.title) if response: try: self.album = response['track']['album']['title'] logger.debug('Found album %s from lastfm', self.album) except Exception: logger.warning('Could not fetch album name for %s', self) else: logger.warning('Could not fetch album name for %s', self)
python
def fetch_album_name(self): """ Get the name of the album from lastfm. """ response = get_lastfm('track.getInfo', artist=self.artist, track=self.title) if response: try: self.album = response['track']['album']['title'] logger.debug('Found album %s from lastfm', self.album) except Exception: logger.warning('Could not fetch album name for %s', self) else: logger.warning('Could not fetch album name for %s', self)
[ "def", "fetch_album_name", "(", "self", ")", ":", "response", "=", "get_lastfm", "(", "'track.getInfo'", ",", "artist", "=", "self", ".", "artist", ",", "track", "=", "self", ".", "title", ")", "if", "response", ":", "try", ":", "self", ".", "album", "...
Get the name of the album from lastfm.
[ "Get", "the", "name", "of", "the", "album", "from", "lastfm", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/song.py#L131-L144
train
52,415
numan/py-analytics
analytics/backends/redis.py
Redis._get_closest_week
def _get_closest_week(self, metric_date): """ Gets the closest monday to the date provided. """ #find the offset to the closest monday days_after_monday = metric_date.isoweekday() - 1 return metric_date - datetime.timedelta(days=days_after_monday)
python
def _get_closest_week(self, metric_date): """ Gets the closest monday to the date provided. """ #find the offset to the closest monday days_after_monday = metric_date.isoweekday() - 1 return metric_date - datetime.timedelta(days=days_after_monday)
[ "def", "_get_closest_week", "(", "self", ",", "metric_date", ")", ":", "#find the offset to the closest monday", "days_after_monday", "=", "metric_date", ".", "isoweekday", "(", ")", "-", "1", "return", "metric_date", "-", "datetime", ".", "timedelta", "(", "days", ...
Gets the closest monday to the date provided.
[ "Gets", "the", "closest", "monday", "to", "the", "date", "provided", "." ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L58-L65
train
52,416
numan/py-analytics
analytics/backends/redis.py
Redis._get_daily_date_range
def _get_daily_date_range(self, metric_date, delta): """ Get the range of months that we need to use as keys to scan redis. """ dates = [metric_date] start_date = metric_date end_date = metric_date + delta while start_date.month < end_date.month or start_date.year < end_date.year: days_in_month = calendar.monthrange(start_date.year, start_date.month)[1] #shift along to the next month as one of the months we will have to see. We don't care that the exact date #is the 1st in each subsequent date range as we only care about the year and the month start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1) dates.append(start_date) return dates
python
def _get_daily_date_range(self, metric_date, delta): """ Get the range of months that we need to use as keys to scan redis. """ dates = [metric_date] start_date = metric_date end_date = metric_date + delta while start_date.month < end_date.month or start_date.year < end_date.year: days_in_month = calendar.monthrange(start_date.year, start_date.month)[1] #shift along to the next month as one of the months we will have to see. We don't care that the exact date #is the 1st in each subsequent date range as we only care about the year and the month start_date = start_date + datetime.timedelta(days=days_in_month - start_date.day + 1) dates.append(start_date) return dates
[ "def", "_get_daily_date_range", "(", "self", ",", "metric_date", ",", "delta", ")", ":", "dates", "=", "[", "metric_date", "]", "start_date", "=", "metric_date", "end_date", "=", "metric_date", "+", "delta", "while", "start_date", ".", "month", "<", "end_date"...
Get the range of months that we need to use as keys to scan redis.
[ "Get", "the", "range", "of", "months", "that", "we", "need", "to", "use", "as", "keys", "to", "scan", "redis", "." ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L97-L112
train
52,417
numan/py-analytics
analytics/backends/redis.py
Redis._get_weekly_date_range
def _get_weekly_date_range(self, metric_date, delta): """ Gets the range of years that we need to use as keys to get metrics from redis. """ dates = [metric_date] end_date = metric_date + delta #Figure out how many years our metric range spans spanning_years = end_date.year - metric_date.year for i in range(spanning_years): #for the weekly keys, we only care about the year dates.append( datetime.date( year=metric_date.year + (i + 1), month=1, day=1)) return dates
python
def _get_weekly_date_range(self, metric_date, delta): """ Gets the range of years that we need to use as keys to get metrics from redis. """ dates = [metric_date] end_date = metric_date + delta #Figure out how many years our metric range spans spanning_years = end_date.year - metric_date.year for i in range(spanning_years): #for the weekly keys, we only care about the year dates.append( datetime.date( year=metric_date.year + (i + 1), month=1, day=1)) return dates
[ "def", "_get_weekly_date_range", "(", "self", ",", "metric_date", ",", "delta", ")", ":", "dates", "=", "[", "metric_date", "]", "end_date", "=", "metric_date", "+", "delta", "#Figure out how many years our metric range spans", "spanning_years", "=", "end_date", ".", ...
Gets the range of years that we need to use as keys to get metrics from redis.
[ "Gets", "the", "range", "of", "years", "that", "we", "need", "to", "use", "as", "keys", "to", "get", "metrics", "from", "redis", "." ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L114-L127
train
52,418
numan/py-analytics
analytics/backends/redis.py
Redis.clear_all
def clear_all(self): """ Deletes all ``sandsnake`` related data from redis. .. warning:: Very expensive and destructive operation. Use with causion """ keys = self._analytics_backend.keys() for key in itertools.chain(*keys): with self._analytics_backend.map() as conn: if key.startswith(self._prefix): conn.delete(key)
python
def clear_all(self): """ Deletes all ``sandsnake`` related data from redis. .. warning:: Very expensive and destructive operation. Use with causion """ keys = self._analytics_backend.keys() for key in itertools.chain(*keys): with self._analytics_backend.map() as conn: if key.startswith(self._prefix): conn.delete(key)
[ "def", "clear_all", "(", "self", ")", ":", "keys", "=", "self", ".", "_analytics_backend", ".", "keys", "(", ")", "for", "key", "in", "itertools", ".", "chain", "(", "*", "keys", ")", ":", "with", "self", ".", "_analytics_backend", ".", "map", "(", "...
Deletes all ``sandsnake`` related data from redis. .. warning:: Very expensive and destructive operation. Use with causion
[ "Deletes", "all", "sandsnake", "related", "data", "from", "redis", "." ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L151-L164
train
52,419
numan/py-analytics
analytics/backends/redis.py
Redis.track_count
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs): """ Tracks a metric just by count. If you track a metric this way, you won't be able to query the metric by day, week or month. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier`` :return: ``True`` if successful ``False`` otherwise """ return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
python
def track_count(self, unique_identifier, metric, inc_amt=1, **kwargs): """ Tracks a metric just by count. If you track a metric this way, you won't be able to query the metric by day, week or month. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier`` :return: ``True`` if successful ``False`` otherwise """ return self._analytics_backend.incr(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric), inc_amt)
[ "def", "track_count", "(", "self", ",", "unique_identifier", ",", "metric", ",", "inc_amt", "=", "1", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_analytics_backend", ".", "incr", "(", "self", ".", "_prefix", "+", "\":\"", "+", "\"analy:%s...
Tracks a metric just by count. If you track a metric this way, you won't be able to query the metric by day, week or month. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier`` :return: ``True`` if successful ``False`` otherwise
[ "Tracks", "a", "metric", "just", "by", "count", ".", "If", "you", "track", "a", "metric", "this", "way", "you", "won", "t", "be", "able", "to", "query", "the", "metric", "by", "day", "week", "or", "month", "." ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L166-L176
train
52,420
numan/py-analytics
analytics/backends/redis.py
Redis.track_metric
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs): """ Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track. This can be a list or a string. :param date: A python date object indicating when this event occured. Defaults to today. :param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier`` :return: ``True`` if successful ``False`` otherwise """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier results = [] if date is None: date = datetime.date.today() with self._analytics_backend.map() as conn: for uid in unique_identifier: hash_key_daily = self._get_daily_metric_key(uid, date) closest_monday = self._get_closest_week(date) hash_key_weekly = self._get_weekly_metric_key(uid, date) for single_metric in metric: daily_metric_name = self._get_daily_metric_name(single_metric, date) weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday) monthly_metric_name = self._get_monthly_metric_name(single_metric, date) results.append( [ conn.hincrby(hash_key_daily, daily_metric_name, inc_amt), conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt), conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt), conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt) ] ) return results
python
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs): """ Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track. This can be a list or a string. :param date: A python date object indicating when this event occured. Defaults to today. :param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier`` :return: ``True`` if successful ``False`` otherwise """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier results = [] if date is None: date = datetime.date.today() with self._analytics_backend.map() as conn: for uid in unique_identifier: hash_key_daily = self._get_daily_metric_key(uid, date) closest_monday = self._get_closest_week(date) hash_key_weekly = self._get_weekly_metric_key(uid, date) for single_metric in metric: daily_metric_name = self._get_daily_metric_name(single_metric, date) weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday) monthly_metric_name = self._get_monthly_metric_name(single_metric, date) results.append( [ conn.hincrby(hash_key_daily, daily_metric_name, inc_amt), conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt), conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt), conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt) ] ) return results
[ "def", "track_metric", "(", "self", ",", "unique_identifier", ",", "metric", ",", "date", "=", "None", ",", "inc_amt", "=", "1", ",", "*", "*", "kwargs", ")", ":", "metric", "=", "[", "metric", "]", "if", "isinstance", "(", "metric", ",", "basestring",...
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track. This can be a list or a string. :param date: A python date object indicating when this event occured. Defaults to today. :param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier`` :return: ``True`` if successful ``False`` otherwise
[ "Tracks", "a", "metric", "for", "a", "specific", "unique_identifier", "for", "a", "certain", "date", ".", "The", "redis", "backend", "supports", "lists", "for", "both", "unique_identifier", "and", "metric", "allowing", "for", "tracking", "of", "multiple", "metri...
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L178-L216
train
52,421
numan/py-analytics
analytics/backends/redis.py
Redis.get_metric_by_day
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by day starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of days to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count()) metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit)) #generate a list of mondays in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series] metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \ metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
python
def get_metric_by_day(self, unique_identifier, metric, from_date, limit=30, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by day starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of days to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) date_generator = (from_date + datetime.timedelta(days=i) for i in itertools.count()) metric_key_date_range = self._get_daily_date_range(from_date, datetime.timedelta(days=limit)) #generate a list of mondays in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_daily_metric_name(metric, daily_date) for daily_date in series] metric_func = lambda conn: [conn.hmget(self._get_daily_metric_key(unique_identifier, \ metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
[ "def", "get_metric_by_day", "(", "self", ",", "unique_identifier", ",", "metric", ",", "from_date", ",", "limit", "=", "30", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "kwargs", ".", "get", "(", "\"connection\"", ",", "None", ")", "date_generator", ...
Returns the ``metric`` for ``unique_identifier`` segmented by day starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of days to retrive starting from ``from_date``
[ "Returns", "the", "metric", "for", "unique_identifier", "segmented", "by", "day", "starting", "from", "from_date" ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L218-L246
train
52,422
numan/py-analytics
analytics/backends/redis.py
Redis.get_metric_by_week
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by week starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of weeks to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) closest_monday_from_date = self._get_closest_week(from_date) metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit)) date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7)) #generate a list of mondays in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series] metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \ metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
python
def get_metric_by_week(self, unique_identifier, metric, from_date, limit=10, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by week starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of weeks to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) closest_monday_from_date = self._get_closest_week(from_date) metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=limit)) date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7)) #generate a list of mondays in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_weekly_metric_name(metric, monday_date) for monday_date in series] metric_func = lambda conn: [conn.hmget(self._get_weekly_metric_key(unique_identifier, \ metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
[ "def", "get_metric_by_week", "(", "self", ",", "unique_identifier", ",", "metric", ",", "from_date", ",", "limit", "=", "10", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "kwargs", ".", "get", "(", "\"connection\"", ",", "None", ")", "closest_monday_fro...
Returns the ``metric`` for ``unique_identifier`` segmented by week starting from``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of weeks to retrive starting from ``from_date``
[ "Returns", "the", "metric", "for", "unique_identifier", "segmented", "by", "week", "starting", "from", "from_date" ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L248-L278
train
52,423
numan/py-analytics
analytics/backends/redis.py
Redis.get_metric_by_month
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by month starting from``from_date``. It will retrieve metrics data starting from the 1st of the month specified in ``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of months to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=limit)) date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series] metric_func = lambda conn: [conn.hmget( self._get_weekly_metric_key( unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
python
def get_metric_by_month(self, unique_identifier, metric, from_date, limit=10, **kwargs): """ Returns the ``metric`` for ``unique_identifier`` segmented by month starting from``from_date``. It will retrieve metrics data starting from the 1st of the month specified in ``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of months to retrive starting from ``from_date`` """ conn = kwargs.get("connection", None) first_of_month = datetime.date(year=from_date.year, month=from_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=limit)) date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date series = list(itertools.islice(date_generator, limit)) metric_keys = [self._get_monthly_metric_name(metric, month_date) for month_date in series] metric_func = lambda conn: [conn.hmget( self._get_weekly_metric_key( unique_identifier, metric_key_date), metric_keys) for metric_key_date in metric_key_date_range] if conn is not None: results = metric_func(conn) else: with self._analytics_backend.map() as conn: results = metric_func(conn) series, results = self._parse_and_process_metrics(series, results) return series, results
[ "def", "get_metric_by_month", "(", "self", ",", "unique_identifier", ",", "metric", ",", "from_date", ",", "limit", "=", "10", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "kwargs", ".", "get", "(", "\"connection\"", ",", "None", ")", "first_of_month", ...
Returns the ``metric`` for ``unique_identifier`` segmented by month starting from``from_date``. It will retrieve metrics data starting from the 1st of the month specified in ``from_date`` :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param from_date: A python date object :param limit: The total number of months to retrive starting from ``from_date``
[ "Returns", "the", "metric", "for", "unique_identifier", "segmented", "by", "month", "starting", "from", "from_date", ".", "It", "will", "retrieve", "metrics", "data", "starting", "from", "the", "1st", "of", "the", "month", "specified", "in", "from_date" ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L280-L313
train
52,424
numan/py-analytics
analytics/backends/redis.py
Redis.get_count
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs): """ Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date`` and an ``end_date``, to only get metrics within that time range. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Get the specified metrics after this date :param end_date: Get the sepcified metrics before this date :return: The count for the metric, 0 otherwise """ result = None if start_date and end_date: start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,) start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time()) end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time()) monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date)) #We can sorta optimize this by getting most of the data by month if len(monthly_metrics_dates) >= 3: with self._analytics_backend.map() as conn: monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts( conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date) monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results) starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results) ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results) result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values()) else: diff = end_date - start_date metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1) result = sum(metric_results[1].values()) else: try: result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,))) except TypeError: result = 0 return result
python
def get_count(self, unique_identifier, metric, start_date=None, end_date=None, **kwargs): """ Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date`` and an ``end_date``, to only get metrics within that time range. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Get the specified metrics after this date :param end_date: Get the sepcified metrics before this date :return: The count for the metric, 0 otherwise """ result = None if start_date and end_date: start_date, end_date = (start_date, end_date,) if start_date < end_date else (end_date, start_date,) start_date = start_date if hasattr(start_date, 'date') else datetime.datetime.combine(start_date, datetime.time()) end_date = end_date if hasattr(end_date, 'date') else datetime.datetime.combine(end_date, datetime.time()) monthly_metrics_dates = list(rrule.rrule(rrule.MONTHLY, dtstart=start_date, bymonthday=1, until=end_date)) #We can sorta optimize this by getting most of the data by month if len(monthly_metrics_dates) >= 3: with self._analytics_backend.map() as conn: monthly_metric_series, monthly_metric_results, starting_metric_series, starting_metric_results, ending_metric_series, ending_metric_results = self._get_counts( conn, metric, unique_identifier, monthly_metrics_dates, start_date, end_date) monthly_metric_series, monthly_metric_results = self._parse_and_process_metrics(monthly_metric_series, monthly_metric_results) starting_metric_series, starting_metric_results = self._parse_and_process_metrics(starting_metric_series, starting_metric_results) ending_metric_series, ending_metric_results = self._parse_and_process_metrics(ending_metric_series, ending_metric_results) result = sum(monthly_metric_results.values()) + sum(starting_metric_results.values()) + sum(ending_metric_results.values()) else: diff = end_date - start_date metric_results = self.get_metric_by_day(unique_identifier, metric, start_date, limit=diff.days + 1) result = sum(metric_results[1].values()) else: try: result = int(self._analytics_backend.get(self._prefix + ":" + "analy:%s:count:%s" % (unique_identifier, metric,))) except TypeError: result = 0 return result
[ "def", "get_count", "(", "self", ",", "unique_identifier", ",", "metric", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "None", "if", "start_date", "and", "end_date", ":", "start_date", ",...
Gets the count for the ``metric`` for ``unique_identifier``. You can specify a ``start_date`` and an ``end_date``, to only get metrics within that time range. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Get the specified metrics after this date :param end_date: Get the sepcified metrics before this date :return: The count for the metric, 0 otherwise
[ "Gets", "the", "count", "for", "the", "metric", "for", "unique_identifier", ".", "You", "can", "specify", "a", "start_date", "and", "an", "end_date", "to", "only", "get", "metrics", "within", "that", "time", "range", "." ]
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L346-L389
train
52,425
numan/py-analytics
analytics/backends/redis.py
Redis.set_metric_by_day
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True): """ Sets the count for the ``metric`` for ``unique_identifier``. You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param date: Sets the specified metrics for this date :param count: Sets the sepcified metrics to value of count :param sync_agg: Boolean used to determine if week and month metrics should be updated :param update_counter: Boolean used to determine if overall counter should be updated """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier results = [] with self._analytics_backend.map() as conn: for uid in unique_identifier: hash_key_daily = self._get_daily_metric_key(uid, date) for single_metric in metric: daily_metric_name = self._get_daily_metric_name(single_metric, date) if update_counter: # updates overall counter for metric overall_count = self.get_count(uid, single_metric) day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem() self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count)) results.append([conn.hset(hash_key_daily, daily_metric_name, count)]) if sync_agg: self.sync_agg_metric(unique_identifier, metric, date, date) return results
python
def set_metric_by_day(self, unique_identifier, metric, date, count, sync_agg=True, update_counter=True): """ Sets the count for the ``metric`` for ``unique_identifier``. You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param date: Sets the specified metrics for this date :param count: Sets the sepcified metrics to value of count :param sync_agg: Boolean used to determine if week and month metrics should be updated :param update_counter: Boolean used to determine if overall counter should be updated """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier results = [] with self._analytics_backend.map() as conn: for uid in unique_identifier: hash_key_daily = self._get_daily_metric_key(uid, date) for single_metric in metric: daily_metric_name = self._get_daily_metric_name(single_metric, date) if update_counter: # updates overall counter for metric overall_count = self.get_count(uid, single_metric) day, daily_count = self.get_metric_by_day(uid, single_metric, date, 1)[1].popitem() self._analytics_backend.set(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), overall_count + (count - daily_count)) results.append([conn.hset(hash_key_daily, daily_metric_name, count)]) if sync_agg: self.sync_agg_metric(unique_identifier, metric, date, date) return results
[ "def", "set_metric_by_day", "(", "self", ",", "unique_identifier", ",", "metric", ",", "date", ",", "count", ",", "sync_agg", "=", "True", ",", "update_counter", "=", "True", ")", ":", "metric", "=", "[", "metric", "]", "if", "isinstance", "(", "metric", ...
Sets the count for the ``metric`` for ``unique_identifier``. You must specify a ``date`` for the ``count`` to be set on. Useful for resetting a metric count to 0 or decrementing a metric. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param date: Sets the specified metrics for this date :param count: Sets the sepcified metrics to value of count :param sync_agg: Boolean used to determine if week and month metrics should be updated :param update_counter: Boolean used to determine if overall counter should be updated
[ "Sets", "the", "count", "for", "the", "metric", "for", "unique_identifier", ".", "You", "must", "specify", "a", "date", "for", "the", "count", "to", "be", "set", "on", ".", "Useful", "for", "resetting", "a", "metric", "count", "to", "0", "or", "decrement...
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L413-L448
train
52,426
numan/py-analytics
analytics/backends/redis.py
Redis.sync_agg_metric
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ self.sync_week_metric(unique_identifier, metric, start_date, end_date) self.sync_month_metric(unique_identifier, metric, start_date, end_date)
python
def sync_agg_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ self.sync_week_metric(unique_identifier, metric, start_date, end_date) self.sync_month_metric(unique_identifier, metric, start_date, end_date)
[ "def", "sync_agg_metric", "(", "self", ",", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")", ":", "self", ".", "sync_week_metric", "(", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")", "self", ".", "sync_...
Uses the count for each day in the date range to recalculate the counters for the associated weeks and months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end
[ "Uses", "the", "count", "for", "each", "day", "in", "the", "date", "range", "to", "recalculate", "the", "counters", "for", "the", "associated", "weeks", "and", "months", "for", "the", "metric", "for", "unique_identifier", ".", "Useful", "for", "updating", "t...
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L450-L464
train
52,427
numan/py-analytics
analytics/backends/redis.py
Redis.sync_week_metric
def sync_week_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the weeks for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier closest_monday_from_date = self._get_closest_week(start_date) num_weeks = self._num_weeks(start_date, end_date) metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks)) week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7)) #generate a list of mondays in between the start date and the end date weeks_to_update = list(itertools.islice(week_date_generator, num_weeks)) for uid in unique_identifier: for single_metric in metric: for week in weeks_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7) week_counter = sum([value for key, value in series_results.items()]) hash_key_weekly = self._get_weekly_metric_key(uid, week) weekly_metric_name = self._get_weekly_metric_name(single_metric, week) with self._analytics_backend.map() as conn: conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
python
def sync_week_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the weeks for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier closest_monday_from_date = self._get_closest_week(start_date) num_weeks = self._num_weeks(start_date, end_date) metric_key_date_range = self._get_weekly_date_range(closest_monday_from_date, datetime.timedelta(weeks=num_weeks)) week_date_generator = (closest_monday_from_date + datetime.timedelta(days=i) for i in itertools.count(step=7)) #generate a list of mondays in between the start date and the end date weeks_to_update = list(itertools.islice(week_date_generator, num_weeks)) for uid in unique_identifier: for single_metric in metric: for week in weeks_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=week, limit=7) week_counter = sum([value for key, value in series_results.items()]) hash_key_weekly = self._get_weekly_metric_key(uid, week) weekly_metric_name = self._get_weekly_metric_name(single_metric, week) with self._analytics_backend.map() as conn: conn.hset(hash_key_weekly, weekly_metric_name, week_counter)
[ "def", "sync_week_metric", "(", "self", ",", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")", ":", "metric", "=", "[", "metric", "]", "if", "isinstance", "(", "metric", ",", "basestring", ")", "else", "metric", "unique_identifier", ...
Uses the count for each day in the date range to recalculate the counters for the weeks for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end
[ "Uses", "the", "count", "for", "each", "day", "in", "the", "date", "range", "to", "recalculate", "the", "counters", "for", "the", "weeks", "for", "the", "metric", "for", "unique_identifier", ".", "Useful", "for", "updating", "the", "counters", "for", "week",...
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L466-L498
train
52,428
numan/py-analytics
analytics/backends/redis.py
Redis.sync_month_metric
def sync_month_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier num_months = self._num_months(start_date, end_date) first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=num_months)) month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date months_to_update = list(itertools.islice(month_date_generator, num_months)) for uid in unique_identifier: for single_metric in metric: for month in months_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1]) month_counter = sum([value for key, value in series_results.items()]) hash_key_monthly = self._get_weekly_metric_key(uid, month) monthly_metric_name = self._get_monthly_metric_name(single_metric, month) with self._analytics_backend.map() as conn: conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
python
def sync_month_metric(self, unique_identifier, metric, start_date, end_date): """ Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end """ metric = [metric] if isinstance(metric, basestring) else metric unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier num_months = self._num_months(start_date, end_date) first_of_month = datetime.date(year=start_date.year, month=start_date.month, day=1) metric_key_date_range = self._get_weekly_date_range( first_of_month, relativedelta(months=num_months)) month_date_generator = (first_of_month + relativedelta(months=i) for i in itertools.count()) #generate a list of first_of_month's in between the start date and the end date months_to_update = list(itertools.islice(month_date_generator, num_months)) for uid in unique_identifier: for single_metric in metric: for month in months_to_update: _, series_results = self.get_metric_by_day(uid, single_metric, from_date=month, limit=monthrange(month.year, month.month)[1]) month_counter = sum([value for key, value in series_results.items()]) hash_key_monthly = self._get_weekly_metric_key(uid, month) monthly_metric_name = self._get_monthly_metric_name(single_metric, month) with self._analytics_backend.map() as conn: conn.hset(hash_key_monthly, monthly_metric_name, month_counter)
[ "def", "sync_month_metric", "(", "self", ",", "unique_identifier", ",", "metric", ",", "start_date", ",", "end_date", ")", ":", "metric", "=", "[", "metric", "]", "if", "isinstance", "(", "metric", ",", "basestring", ")", "else", "metric", "unique_identifier",...
Uses the count for each day in the date range to recalculate the counters for the months for the ``metric`` for ``unique_identifier``. Useful for updating the counters for week and month after using set_metric_by_day. The redis backend supports lists for both ``unique_identifier`` and ``metric`` allowing for the setting of multiple metrics for multiple unique_identifiers efficiently. Not all backends may support this. :param unique_identifier: Unique string indetifying the object this metric is for :param metric: A unique name for the metric you want to track :param start_date: Date syncing starts :param end_date: Date syncing end
[ "Uses", "the", "count", "for", "each", "day", "in", "the", "date", "range", "to", "recalculate", "the", "counters", "for", "the", "months", "for", "the", "metric", "for", "unique_identifier", ".", "Useful", "for", "updating", "the", "counters", "for", "week"...
abbc814925c6cc200b3329c7de9f1868e1cb8c01
https://github.com/numan/py-analytics/blob/abbc814925c6cc200b3329c7de9f1868e1cb8c01/analytics/backends/redis.py#L500-L532
train
52,429
non-Jedi/gyr
gyr/utils.py
is_full_mxid
def is_full_mxid(user_string): """Returns True if a string is a valid mxid.""" if not user_string[0] == "@": return False parts = user_string[1:].split(":") localpart_chars = ascii_lowercase + digits + "._-=" if not (len(parts) == 2 and all([i in localpart_chars for i in parts[0]])): return False return True
python
def is_full_mxid(user_string): """Returns True if a string is a valid mxid.""" if not user_string[0] == "@": return False parts = user_string[1:].split(":") localpart_chars = ascii_lowercase + digits + "._-=" if not (len(parts) == 2 and all([i in localpart_chars for i in parts[0]])): return False return True
[ "def", "is_full_mxid", "(", "user_string", ")", ":", "if", "not", "user_string", "[", "0", "]", "==", "\"@\"", ":", "return", "False", "parts", "=", "user_string", "[", "1", ":", "]", ".", "split", "(", "\":\"", ")", "localpart_chars", "=", "ascii_lowerc...
Returns True if a string is a valid mxid.
[ "Returns", "True", "if", "a", "string", "is", "a", "valid", "mxid", "." ]
9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/utils.py#L30-L38
train
52,430
non-Jedi/gyr
gyr/utils.py
intent
def intent(method): """Helps object methods handle MatrixRequestError. Args: method(function): Object method to be wrapped Method's object must have _handle_request_exception method that deals with specific status codes and errcodes. """ def wrapper(self, *args, **kwargs): try: return method(self, *args, **kwargs) except exceptions.MatrixError as e: if isinstance(e.original_exception, matrix_client.errors.MatrixRequestError): self._handle_request_exception(e) # May still throw exception for other reasons; not handled return method(self, *args, **kwargs) else: raise e return wrapper
python
def intent(method): """Helps object methods handle MatrixRequestError. Args: method(function): Object method to be wrapped Method's object must have _handle_request_exception method that deals with specific status codes and errcodes. """ def wrapper(self, *args, **kwargs): try: return method(self, *args, **kwargs) except exceptions.MatrixError as e: if isinstance(e.original_exception, matrix_client.errors.MatrixRequestError): self._handle_request_exception(e) # May still throw exception for other reasons; not handled return method(self, *args, **kwargs) else: raise e return wrapper
[ "def", "intent", "(", "method", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "method", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "exceptions", "."...
Helps object methods handle MatrixRequestError. Args: method(function): Object method to be wrapped Method's object must have _handle_request_exception method that deals with specific status codes and errcodes.
[ "Helps", "object", "methods", "handle", "MatrixRequestError", "." ]
9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/utils.py#L46-L68
train
52,431
malramsay64/experi
src/experi/commands.py
Command.get_variables
def get_variables(self) -> Set[str]: """Find all the variables specified in a format string. This returns a list of all the different variables specified in a format string, that is the variables inside the braces. """ variables = set() for cmd in self._cmd: for var in self.__formatter.parse(cmd): logger.debug("Checking variable: %s", var) # creates and requires are special class values if var[1] is not None and var[1] not in ["creates", "requires"]: variables.add(var[1]) return variables
python
def get_variables(self) -> Set[str]: """Find all the variables specified in a format string. This returns a list of all the different variables specified in a format string, that is the variables inside the braces. """ variables = set() for cmd in self._cmd: for var in self.__formatter.parse(cmd): logger.debug("Checking variable: %s", var) # creates and requires are special class values if var[1] is not None and var[1] not in ["creates", "requires"]: variables.add(var[1]) return variables
[ "def", "get_variables", "(", "self", ")", "->", "Set", "[", "str", "]", ":", "variables", "=", "set", "(", ")", "for", "cmd", "in", "self", ".", "_cmd", ":", "for", "var", "in", "self", ".", "__formatter", ".", "parse", "(", "cmd", ")", ":", "log...
Find all the variables specified in a format string. This returns a list of all the different variables specified in a format string, that is the variables inside the braces.
[ "Find", "all", "the", "variables", "specified", "in", "a", "format", "string", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L53-L67
train
52,432
malramsay64/experi
src/experi/commands.py
Job.as_bash_array
def as_bash_array(self) -> str: """Return a representation as a bash array. This creates a string formatted as a bash array containing all the commands in the job. """ return_string = "( \\\n" for command in self: return_string += '"' + str(command) + '" \\\n' return_string += ")" return return_string
python
def as_bash_array(self) -> str: """Return a representation as a bash array. This creates a string formatted as a bash array containing all the commands in the job. """ return_string = "( \\\n" for command in self: return_string += '"' + str(command) + '" \\\n' return_string += ")" return return_string
[ "def", "as_bash_array", "(", "self", ")", "->", "str", ":", "return_string", "=", "\"( \\\\\\n\"", "for", "command", "in", "self", ":", "return_string", "+=", "'\"'", "+", "str", "(", "command", ")", "+", "'\" \\\\\\n'", "return_string", "+=", "\")\"", "retu...
Return a representation as a bash array. This creates a string formatted as a bash array containing all the commands in the job.
[ "Return", "a", "representation", "as", "a", "bash", "array", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/commands.py#L140-L150
train
52,433
malramsay64/experi
src/experi/run.py
combine_dictionaries
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]: """Merge a list of dictionaries into a single dictionary. Where there are collisions the first value in the list will be set as this function is using ChainMap to combine the dicts. """ return dict(ChainMap(*dicts))
python
def combine_dictionaries(dicts: List[Dict[str, Any]]) -> Dict[str, Any]: """Merge a list of dictionaries into a single dictionary. Where there are collisions the first value in the list will be set as this function is using ChainMap to combine the dicts. """ return dict(ChainMap(*dicts))
[ "def", "combine_dictionaries", "(", "dicts", ":", "List", "[", "Dict", "[", "str", ",", "Any", "]", "]", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "return", "dict", "(", "ChainMap", "(", "*", "dicts", ")", ")" ]
Merge a list of dictionaries into a single dictionary. Where there are collisions the first value in the list will be set as this function is using ChainMap to combine the dicts.
[ "Merge", "a", "list", "of", "dictionaries", "into", "a", "single", "dictionary", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L39-L46
train
52,434
malramsay64/experi
src/experi/run.py
iterator_zip
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """Apply the zip operator to a set of variables. This uses the python zip iterator to combine multiple lists of variables such that the nth variable in each list is aligned. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from zip iterator") if isinstance(variables, list): for item in variables: yield list(variable_matrix(item, parent, "zip")) else: yield list(variable_matrix(variables, parent, "zip"))
python
def iterator_zip(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """Apply the zip operator to a set of variables. This uses the python zip iterator to combine multiple lists of variables such that the nth variable in each list is aligned. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from zip iterator") if isinstance(variables, list): for item in variables: yield list(variable_matrix(item, parent, "zip")) else: yield list(variable_matrix(variables, parent, "zip"))
[ "def", "iterator_zip", "(", "variables", ":", "VarType", ",", "parent", ":", "str", "=", "None", ")", "->", "Iterable", "[", "VarMatrix", "]", ":", "logger", ".", "debug", "(", "\"Yielding from zip iterator\"", ")", "if", "isinstance", "(", "variables", ",",...
Apply the zip operator to a set of variables. This uses the python zip iterator to combine multiple lists of variables such that the nth variable in each list is aligned. Args: variables: The variables object parent: Unused
[ "Apply", "the", "zip", "operator", "to", "a", "set", "of", "variables", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L49-L66
train
52,435
malramsay64/experi
src/experi/run.py
iterator_product
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """Apply the product operator to a set of variables. This uses the python itertools.product iterator to combine multiple variables such that all possible combinations are generated. This is the default iterator however this is a method of manually specifying the option. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from product iterator") if isinstance(variables, list): raise ValueError( f"Product only takes mappings of values, got {variables} of type {type(variables)}" ) yield list(variable_matrix(variables, parent, "product"))
python
def iterator_product(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """Apply the product operator to a set of variables. This uses the python itertools.product iterator to combine multiple variables such that all possible combinations are generated. This is the default iterator however this is a method of manually specifying the option. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from product iterator") if isinstance(variables, list): raise ValueError( f"Product only takes mappings of values, got {variables} of type {type(variables)}" ) yield list(variable_matrix(variables, parent, "product"))
[ "def", "iterator_product", "(", "variables", ":", "VarType", ",", "parent", ":", "str", "=", "None", ")", "->", "Iterable", "[", "VarMatrix", "]", ":", "logger", ".", "debug", "(", "\"Yielding from product iterator\"", ")", "if", "isinstance", "(", "variables"...
Apply the product operator to a set of variables. This uses the python itertools.product iterator to combine multiple variables such that all possible combinations are generated. This is the default iterator however this is a method of manually specifying the option. Args: variables: The variables object parent: Unused
[ "Apply", "the", "product", "operator", "to", "a", "set", "of", "variables", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L69-L87
train
52,436
malramsay64/experi
src/experi/run.py
iterator_chain
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """This successively appends each element of an array to a single list of values. This takes a list of values and puts all the values generated for each element in the list into a single list of values. It uses the :func:`itertools.chain` function to achieve this. This function is particularly useful for specifying multiple types of simulations with different parameters. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from append iterator") if not isinstance(variables, list): raise ValueError( f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}" ) # Create a single list containing all the values yield list( chain.from_iterable( variable_matrix(item, parent, "product") for item in variables ) )
python
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]: """This successively appends each element of an array to a single list of values. This takes a list of values and puts all the values generated for each element in the list into a single list of values. It uses the :func:`itertools.chain` function to achieve this. This function is particularly useful for specifying multiple types of simulations with different parameters. Args: variables: The variables object parent: Unused """ logger.debug("Yielding from append iterator") if not isinstance(variables, list): raise ValueError( f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}" ) # Create a single list containing all the values yield list( chain.from_iterable( variable_matrix(item, parent, "product") for item in variables ) )
[ "def", "iterator_chain", "(", "variables", ":", "VarType", ",", "parent", ":", "str", "=", "None", ")", "->", "Iterable", "[", "VarMatrix", "]", ":", "logger", ".", "debug", "(", "\"Yielding from append iterator\"", ")", "if", "not", "isinstance", "(", "vari...
This successively appends each element of an array to a single list of values. This takes a list of values and puts all the values generated for each element in the list into a single list of values. It uses the :func:`itertools.chain` function to achieve this. This function is particularly useful for specifying multiple types of simulations with different parameters. Args: variables: The variables object parent: Unused
[ "This", "successively", "appends", "each", "element", "of", "an", "array", "to", "a", "single", "list", "of", "values", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L90-L114
train
52,437
malramsay64/experi
src/experi/run.py
iterator_cycle
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]: """Cycle through a list of values a specified number of times Args: variables: The input variables for the creation of the range parent: The variable for which the values are being generated. Returns: A list of dictionaries mapping the parent to each value. """ if isinstance(variables, dict): if variables.get("times"): times = int(variables["times"]) del variables["times"] yield list(variable_matrix(variables, parent, "product")) * times else: raise ValueError(f"times is a required keyword for the repeat iterator.") else: raise ValueError( f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}" )
python
def iterator_cycle(variables: VarType, parent: str) -> Iterable[VarMatrix]: """Cycle through a list of values a specified number of times Args: variables: The input variables for the creation of the range parent: The variable for which the values are being generated. Returns: A list of dictionaries mapping the parent to each value. """ if isinstance(variables, dict): if variables.get("times"): times = int(variables["times"]) del variables["times"] yield list(variable_matrix(variables, parent, "product")) * times else: raise ValueError(f"times is a required keyword for the repeat iterator.") else: raise ValueError( f"The repeat operator only takes a dict as arguments, got {variables} of type {type(variables)}" )
[ "def", "iterator_cycle", "(", "variables", ":", "VarType", ",", "parent", ":", "str", ")", "->", "Iterable", "[", "VarMatrix", "]", ":", "if", "isinstance", "(", "variables", ",", "dict", ")", ":", "if", "variables", ".", "get", "(", "\"times\"", ")", ...
Cycle through a list of values a specified number of times Args: variables: The input variables for the creation of the range parent: The variable for which the values are being generated. Returns: A list of dictionaries mapping the parent to each value.
[ "Cycle", "through", "a", "list", "of", "values", "a", "specified", "number", "of", "times" ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L149-L171
train
52,438
malramsay64/experi
src/experi/run.py
variable_matrix
def variable_matrix( variables: VarType, parent: str = None, iterator: str = "product" ) -> Iterable[Dict[str, YamlValue]]: """Process the variables into a list of the appropriate combinations. This function performs recursive processing of the input variables, creating an iterator which has all the combinations of variables specified in the input. """ _iters: Dict[str, Callable] = {"product": product, "zip": zip} _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = { "zip": iterator_zip, "product": iterator_product, "arange": iterator_arange, "chain": iterator_chain, "append": iterator_chain, "cycle": iterator_cycle, "repeat": iterator_cycle, } if isinstance(variables, dict): key_vars: List[List[Dict[str, YamlValue]]] = [] # Handling of specialised iterators for key, function in _special_keys.items(): if variables.get(key): item = variables[key] assert item is not None for val in function(item, parent): key_vars.append(val) del variables[key] for key, value in variables.items(): key_vars.append(list(variable_matrix(value, key, iterator))) logger.debug("key vars: %s", key_vars) # Iterate through all possible products generating a dictionary for i in _iters[iterator](*key_vars): logger.debug("dicts: %s", i) yield combine_dictionaries(i) # Iterate through a list of values elif isinstance(variables, list): for item in variables: yield from variable_matrix(item, parent, iterator) # Stopping condition -> we have either a single value from a list # or a value had only one item else: assert parent is not None yield {parent: variables}
python
def variable_matrix( variables: VarType, parent: str = None, iterator: str = "product" ) -> Iterable[Dict[str, YamlValue]]: """Process the variables into a list of the appropriate combinations. This function performs recursive processing of the input variables, creating an iterator which has all the combinations of variables specified in the input. """ _iters: Dict[str, Callable] = {"product": product, "zip": zip} _special_keys: Dict[str, Callable[[VarType, Any], Iterable[VarMatrix]]] = { "zip": iterator_zip, "product": iterator_product, "arange": iterator_arange, "chain": iterator_chain, "append": iterator_chain, "cycle": iterator_cycle, "repeat": iterator_cycle, } if isinstance(variables, dict): key_vars: List[List[Dict[str, YamlValue]]] = [] # Handling of specialised iterators for key, function in _special_keys.items(): if variables.get(key): item = variables[key] assert item is not None for val in function(item, parent): key_vars.append(val) del variables[key] for key, value in variables.items(): key_vars.append(list(variable_matrix(value, key, iterator))) logger.debug("key vars: %s", key_vars) # Iterate through all possible products generating a dictionary for i in _iters[iterator](*key_vars): logger.debug("dicts: %s", i) yield combine_dictionaries(i) # Iterate through a list of values elif isinstance(variables, list): for item in variables: yield from variable_matrix(item, parent, iterator) # Stopping condition -> we have either a single value from a list # or a value had only one item else: assert parent is not None yield {parent: variables}
[ "def", "variable_matrix", "(", "variables", ":", "VarType", ",", "parent", ":", "str", "=", "None", ",", "iterator", ":", "str", "=", "\"product\"", ")", "->", "Iterable", "[", "Dict", "[", "str", ",", "YamlValue", "]", "]", ":", "_iters", ":", "Dict",...
Process the variables into a list of the appropriate combinations. This function performs recursive processing of the input variables, creating an iterator which has all the combinations of variables specified in the input.
[ "Process", "the", "variables", "into", "a", "list", "of", "the", "appropriate", "combinations", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L174-L226
train
52,439
malramsay64/experi
src/experi/run.py
uniqueify
def uniqueify(my_list: Any) -> List[Any]: """Remove duplicate entries in a list retaining order.""" if sys.version_info >= (3, 6): # An implementation specific detail of py3.6 is the retention of order # within a dictionary. In py3.7 this becomes the documented behaviour. return list(dict.fromkeys(my_list)) # Slower method of order preserving unique list in older python versions seen = set() return [x for x in my_list if x not in seen and not seen.add(x)]
python
def uniqueify(my_list: Any) -> List[Any]: """Remove duplicate entries in a list retaining order.""" if sys.version_info >= (3, 6): # An implementation specific detail of py3.6 is the retention of order # within a dictionary. In py3.7 this becomes the documented behaviour. return list(dict.fromkeys(my_list)) # Slower method of order preserving unique list in older python versions seen = set() return [x for x in my_list if x not in seen and not seen.add(x)]
[ "def", "uniqueify", "(", "my_list", ":", "Any", ")", "->", "List", "[", "Any", "]", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "6", ")", ":", "# An implementation specific detail of py3.6 is the retention of order", "# within a dictionary. In py3.7 ...
Remove duplicate entries in a list retaining order.
[ "Remove", "duplicate", "entries", "in", "a", "list", "retaining", "order", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L229-L238
train
52,440
malramsay64/experi
src/experi/run.py
process_command
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]: """Generate all combinations of commands given a variable matrix. Processes the commands to be sequences of strings. """ assert command is not None if isinstance(command, str): command_list = [Command(command, variables=variables) for variables in matrix] elif isinstance(command, list): command_list = [Command(command, variables=variables) for variables in matrix] else: if command.get("command") is not None: cmd = command.get("command") else: cmd = command.get("cmd") creates = str(command.get("creates", "")) requires = str(command.get("requires", "")) assert isinstance(cmd, (list, str)) command_list = [ Command(cmd, variables, creates, requires) for variables in matrix ] return uniqueify(command_list)
python
def process_command(command: CommandInput, matrix: VarMatrix) -> List[Command]: """Generate all combinations of commands given a variable matrix. Processes the commands to be sequences of strings. """ assert command is not None if isinstance(command, str): command_list = [Command(command, variables=variables) for variables in matrix] elif isinstance(command, list): command_list = [Command(command, variables=variables) for variables in matrix] else: if command.get("command") is not None: cmd = command.get("command") else: cmd = command.get("cmd") creates = str(command.get("creates", "")) requires = str(command.get("requires", "")) assert isinstance(cmd, (list, str)) command_list = [ Command(cmd, variables, creates, requires) for variables in matrix ] return uniqueify(command_list)
[ "def", "process_command", "(", "command", ":", "CommandInput", ",", "matrix", ":", "VarMatrix", ")", "->", "List", "[", "Command", "]", ":", "assert", "command", "is", "not", "None", "if", "isinstance", "(", "command", ",", "str", ")", ":", "command_list",...
Generate all combinations of commands given a variable matrix. Processes the commands to be sequences of strings.
[ "Generate", "all", "combinations", "of", "commands", "given", "a", "variable", "matrix", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L263-L286
train
52,441
malramsay64/experi
src/experi/run.py
read_file
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]: """Read and parse yaml file.""" logger.debug("Input file: %s", filename) with open(filename, "r") as stream: structure = yaml.safe_load(stream) return structure
python
def read_file(filename: PathLike = "experiment.yml") -> Dict[str, Any]: """Read and parse yaml file.""" logger.debug("Input file: %s", filename) with open(filename, "r") as stream: structure = yaml.safe_load(stream) return structure
[ "def", "read_file", "(", "filename", ":", "PathLike", "=", "\"experiment.yml\"", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "logger", ".", "debug", "(", "\"Input file: %s\"", ",", "filename", ")", "with", "open", "(", "filename", ",", "\"r\"", ...
Read and parse yaml file.
[ "Read", "and", "parse", "yaml", "file", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L289-L295
train
52,442
malramsay64/experi
src/experi/run.py
run_bash_jobs
def run_bash_jobs( jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False ) -> None: """Submit commands to the bash shell. This function runs the commands iteratively but handles errors in the same way as with the pbs_commands function. A command will run for all combinations of variables in the variable matrix, however if any one of those commands fails then the next command will not run. """ logger.debug("Running commands in bash shell") # iterate through command groups for job in jobs: # Check shell exists if shutil.which(job.shell) is None: raise ProcessLookupError(f"The shell '{job.shell}' was not found.") failed = False for command in job: for cmd in command: logger.info(cmd) if dry_run: print(f"{job.shell} -c '{cmd}'") else: result = subprocess.run( [job.shell, "-c", f"{cmd}"], cwd=str(directory) ) if result.returncode != 0: failed = True logger.error("Command failed: %s", command) break if failed: logger.error("A command failed, not continuing further.") return
python
def run_bash_jobs( jobs: Iterator[Job], directory: PathLike = Path.cwd(), dry_run: bool = False ) -> None: """Submit commands to the bash shell. This function runs the commands iteratively but handles errors in the same way as with the pbs_commands function. A command will run for all combinations of variables in the variable matrix, however if any one of those commands fails then the next command will not run. """ logger.debug("Running commands in bash shell") # iterate through command groups for job in jobs: # Check shell exists if shutil.which(job.shell) is None: raise ProcessLookupError(f"The shell '{job.shell}' was not found.") failed = False for command in job: for cmd in command: logger.info(cmd) if dry_run: print(f"{job.shell} -c '{cmd}'") else: result = subprocess.run( [job.shell, "-c", f"{cmd}"], cwd=str(directory) ) if result.returncode != 0: failed = True logger.error("Command failed: %s", command) break if failed: logger.error("A command failed, not continuing further.") return
[ "def", "run_bash_jobs", "(", "jobs", ":", "Iterator", "[", "Job", "]", ",", "directory", ":", "PathLike", "=", "Path", ".", "cwd", "(", ")", ",", "dry_run", ":", "bool", "=", "False", ")", "->", "None", ":", "logger", ".", "debug", "(", "\"Running co...
Submit commands to the bash shell. This function runs the commands iteratively but handles errors in the same way as with the pbs_commands function. A command will run for all combinations of variables in the variable matrix, however if any one of those commands fails then the next command will not run.
[ "Submit", "commands", "to", "the", "bash", "shell", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L361-L395
train
52,443
malramsay64/experi
src/experi/run.py
run_scheduler_jobs
def run_scheduler_jobs( scheduler: str, jobs: Iterator[Job], directory: PathLike = Path.cwd(), basename: str = "experi", dry_run: bool = False, ) -> None: """Submit a series of commands to a batch scheduler. This takes a list of strings which are the contents of the pbs files, writes the files to disk and submits the job to the scheduler. Files which match the pattern of the resulting files <basename>_<index>.pbs are deleted before writing the new files. To ensure that commands run consecutively the aditional requirement to the run script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components of the experiment to be conducted in a single script. Note: Having this function submit jobs requires that the command `qsub` exists, implying that a job scheduler is installed. """ submit_job = True logger.debug("Creating commands in %s files.", scheduler) # Check scheduler submit command exists if scheduler == "pbs": submit_executable = "qsub" elif scheduler == "slurm": submit_executable = "sbatch" else: raise ValueError("scheduler can only take values ['pbs', 'slurm']") if shutil.which(submit_executable) is None: logger.warning( "The `%s` command is not found." "Skipping job submission and just generating files", submit_executable, ) submit_job = False # Ensure directory is a Path directory = Path(directory) # remove existing files for fname in directory.glob(basename + f"*.{scheduler}"): print("Removing {}".format(fname)) os.remove(str(fname)) # Write new files and generate commands prev_jobids: List[str] = [] for index, job in enumerate(jobs): # Generate scheduler file content = create_scheduler_file(scheduler, job) logger.debug("File contents:\n%s", content) # Write file to disk fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler)) with fname.open("w") as dst: dst.write(content) if submit_job or dry_run: # Construct command submit_cmd = [submit_executable] if prev_jobids: # Continue to append all previous jobs to submit_cmd so subsequent jobs die along # with the first. afterok = f"afterok:{':'.join(prev_jobids)}" if scheduler == "pbs": submit_cmd += ["-W", f"depend={afterok}"] elif scheduler == "slurm": submit_cmd += ["--dependency", afterok] # actually run the command logger.info(str(submit_cmd)) try: if dry_run: print(f"{submit_cmd} {fname.name}") prev_jobids.append("dry_run") else: cmd_res = subprocess.check_output( submit_cmd + [fname.name], cwd=str(directory) ) prev_jobids.append(cmd_res.decode().strip()) except subprocess.CalledProcessError: logger.error("Submitting job to the queue failed.") break
python
def run_scheduler_jobs( scheduler: str, jobs: Iterator[Job], directory: PathLike = Path.cwd(), basename: str = "experi", dry_run: bool = False, ) -> None: """Submit a series of commands to a batch scheduler. This takes a list of strings which are the contents of the pbs files, writes the files to disk and submits the job to the scheduler. Files which match the pattern of the resulting files <basename>_<index>.pbs are deleted before writing the new files. To ensure that commands run consecutively the aditional requirement to the run script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components of the experiment to be conducted in a single script. Note: Having this function submit jobs requires that the command `qsub` exists, implying that a job scheduler is installed. """ submit_job = True logger.debug("Creating commands in %s files.", scheduler) # Check scheduler submit command exists if scheduler == "pbs": submit_executable = "qsub" elif scheduler == "slurm": submit_executable = "sbatch" else: raise ValueError("scheduler can only take values ['pbs', 'slurm']") if shutil.which(submit_executable) is None: logger.warning( "The `%s` command is not found." "Skipping job submission and just generating files", submit_executable, ) submit_job = False # Ensure directory is a Path directory = Path(directory) # remove existing files for fname in directory.glob(basename + f"*.{scheduler}"): print("Removing {}".format(fname)) os.remove(str(fname)) # Write new files and generate commands prev_jobids: List[str] = [] for index, job in enumerate(jobs): # Generate scheduler file content = create_scheduler_file(scheduler, job) logger.debug("File contents:\n%s", content) # Write file to disk fname = Path(directory / "{}_{:02d}.{}".format(basename, index, scheduler)) with fname.open("w") as dst: dst.write(content) if submit_job or dry_run: # Construct command submit_cmd = [submit_executable] if prev_jobids: # Continue to append all previous jobs to submit_cmd so subsequent jobs die along # with the first. afterok = f"afterok:{':'.join(prev_jobids)}" if scheduler == "pbs": submit_cmd += ["-W", f"depend={afterok}"] elif scheduler == "slurm": submit_cmd += ["--dependency", afterok] # actually run the command logger.info(str(submit_cmd)) try: if dry_run: print(f"{submit_cmd} {fname.name}") prev_jobids.append("dry_run") else: cmd_res = subprocess.check_output( submit_cmd + [fname.name], cwd=str(directory) ) prev_jobids.append(cmd_res.decode().strip()) except subprocess.CalledProcessError: logger.error("Submitting job to the queue failed.") break
[ "def", "run_scheduler_jobs", "(", "scheduler", ":", "str", ",", "jobs", ":", "Iterator", "[", "Job", "]", ",", "directory", ":", "PathLike", "=", "Path", ".", "cwd", "(", ")", ",", "basename", ":", "str", "=", "\"experi\"", ",", "dry_run", ":", "bool",...
Submit a series of commands to a batch scheduler. This takes a list of strings which are the contents of the pbs files, writes the files to disk and submits the job to the scheduler. Files which match the pattern of the resulting files <basename>_<index>.pbs are deleted before writing the new files. To ensure that commands run consecutively the aditional requirement to the run script `-W depend=afterok:<prev_jobid>` is added. This allows for all the components of the experiment to be conducted in a single script. Note: Having this function submit jobs requires that the command `qsub` exists, implying that a job scheduler is installed.
[ "Submit", "a", "series", "of", "commands", "to", "a", "batch", "scheduler", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L398-L483
train
52,444
malramsay64/experi
src/experi/run.py
determine_scheduler
def determine_scheduler( scheduler: Optional[str], experiment_definition: Dict[str, YamlValue] ) -> str: """Determine the scheduler to use to run the jobs.""" # Scheduler value from command line has first priority if scheduler is not None: if scheduler in ["shell", "pbs", "slurm"]: return scheduler raise ValueError( "Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']" ) # Next priority goes to the experiment.yml file if experiment_definition.get("pbs"): return "pbs" if experiment_definition.get("slurm"): return "slurm" if experiment_definition.get("shell"): return "shell" # Final priority goes to the auto-discovery if shutil.which("pbs") is not None: return "pbs" if shutil.which("slurm") is not None: return "slurm" # Default if nothing else is found goes to shell return "shell"
python
def determine_scheduler( scheduler: Optional[str], experiment_definition: Dict[str, YamlValue] ) -> str: """Determine the scheduler to use to run the jobs.""" # Scheduler value from command line has first priority if scheduler is not None: if scheduler in ["shell", "pbs", "slurm"]: return scheduler raise ValueError( "Argument scheduler only supports input values of ['shell', 'pbs', 'slurm']" ) # Next priority goes to the experiment.yml file if experiment_definition.get("pbs"): return "pbs" if experiment_definition.get("slurm"): return "slurm" if experiment_definition.get("shell"): return "shell" # Final priority goes to the auto-discovery if shutil.which("pbs") is not None: return "pbs" if shutil.which("slurm") is not None: return "slurm" # Default if nothing else is found goes to shell return "shell"
[ "def", "determine_scheduler", "(", "scheduler", ":", "Optional", "[", "str", "]", ",", "experiment_definition", ":", "Dict", "[", "str", ",", "YamlValue", "]", ")", "->", "str", ":", "# Scheduler value from command line has first priority", "if", "scheduler", "is", ...
Determine the scheduler to use to run the jobs.
[ "Determine", "the", "scheduler", "to", "use", "to", "run", "the", "jobs", "." ]
7159644df0420e4a395c87c0c08e11567f401443
https://github.com/malramsay64/experi/blob/7159644df0420e4a395c87c0c08e11567f401443/src/experi/run.py#L486-L514
train
52,445
alfredodeza/notario
notario/validators/iterables.py
BasicIterableValidator.safe_type
def safe_type(self, data, tree): """ Make sure that the incoming data complies with the class type we are expecting it to be. In this case, classes that inherit from this base class expect data to be of type ``list``. """ if not isinstance(data, list): name = self.__class__.__name__ msg = "did not pass validation against callable: %s" % name reason = 'expected a list but got %s' % safe_repr(data) raise Invalid(self.schema, tree, reason=reason, pair='value', msg=msg)
python
def safe_type(self, data, tree): """ Make sure that the incoming data complies with the class type we are expecting it to be. In this case, classes that inherit from this base class expect data to be of type ``list``. """ if not isinstance(data, list): name = self.__class__.__name__ msg = "did not pass validation against callable: %s" % name reason = 'expected a list but got %s' % safe_repr(data) raise Invalid(self.schema, tree, reason=reason, pair='value', msg=msg)
[ "def", "safe_type", "(", "self", ",", "data", ",", "tree", ")", ":", "if", "not", "isinstance", "(", "data", ",", "list", ")", ":", "name", "=", "self", ".", "__class__", ".", "__name__", "msg", "=", "\"did not pass validation against callable: %s\"", "%", ...
Make sure that the incoming data complies with the class type we are expecting it to be. In this case, classes that inherit from this base class expect data to be of type ``list``.
[ "Make", "sure", "that", "the", "incoming", "data", "complies", "with", "the", "class", "type", "we", "are", "expecting", "it", "to", "be", ".", "In", "this", "case", "classes", "that", "inherit", "from", "this", "base", "class", "expect", "data", "to", "...
d5dc2edfcb75d9291ced3f2551f368c35dd31475
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/validators/iterables.py#L22-L32
train
52,446
aptivate/ckanext-datasetversions
ckanext/datasetversions/helpers.py
get_context
def get_context(context): """An internal context generator. Accepts a CKAN context. CKAN's internals put various things into the context which makes reusing it for multiple API calls inadvisable. This function adds more fine grain control on the context from our plugin logic side. """ new_context = { 'model': context['model'], 'session': context['session'], 'user': context.get('user'), 'ignore_auth': context.get('ignore_auth', False), 'use_cache': context.get('use_cache', False), } if 'validate' in context: new_context['validate'] = context['validate'] return new_context
python
def get_context(context): """An internal context generator. Accepts a CKAN context. CKAN's internals put various things into the context which makes reusing it for multiple API calls inadvisable. This function adds more fine grain control on the context from our plugin logic side. """ new_context = { 'model': context['model'], 'session': context['session'], 'user': context.get('user'), 'ignore_auth': context.get('ignore_auth', False), 'use_cache': context.get('use_cache', False), } if 'validate' in context: new_context['validate'] = context['validate'] return new_context
[ "def", "get_context", "(", "context", ")", ":", "new_context", "=", "{", "'model'", ":", "context", "[", "'model'", "]", ",", "'session'", ":", "context", "[", "'session'", "]", ",", "'user'", ":", "context", ".", "get", "(", "'user'", ")", ",", "'igno...
An internal context generator. Accepts a CKAN context. CKAN's internals put various things into the context which makes reusing it for multiple API calls inadvisable. This function adds more fine grain control on the context from our plugin logic side.
[ "An", "internal", "context", "generator", ".", "Accepts", "a", "CKAN", "context", "." ]
6a82fa5b20e28c705a2c187f4835b31ae928d88a
https://github.com/aptivate/ckanext-datasetversions/blob/6a82fa5b20e28c705a2c187f4835b31ae928d88a/ckanext/datasetversions/helpers.py#L16-L35
train
52,447
alfredodeza/notario
notario/utils.py
re_sort
def re_sort(data): """ A data with keys that are not enumerated sequentially will be re sorted and sequentially ordered. For example:: >>> data = {16: ('1', 'b'), 3: ('1', 'a')} >>> re_sort(data) >>> {0: ('1', 'a'), 1: ('1', 'b')} """ keys = sorted(data.keys()) new_data = {} for number, key in enumerate(keys): new_data[number] = data[key] return new_data
python
def re_sort(data): """ A data with keys that are not enumerated sequentially will be re sorted and sequentially ordered. For example:: >>> data = {16: ('1', 'b'), 3: ('1', 'a')} >>> re_sort(data) >>> {0: ('1', 'a'), 1: ('1', 'b')} """ keys = sorted(data.keys()) new_data = {} for number, key in enumerate(keys): new_data[number] = data[key] return new_data
[ "def", "re_sort", "(", "data", ")", ":", "keys", "=", "sorted", "(", "data", ".", "keys", "(", ")", ")", "new_data", "=", "{", "}", "for", "number", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "new_data", "[", "number", "]", "=", "data"...
A data with keys that are not enumerated sequentially will be re sorted and sequentially ordered. For example:: >>> data = {16: ('1', 'b'), 3: ('1', 'a')} >>> re_sort(data) >>> {0: ('1', 'a'), 1: ('1', 'b')}
[ "A", "data", "with", "keys", "that", "are", "not", "enumerated", "sequentially", "will", "be", "re", "sorted", "and", "sequentially", "ordered", "." ]
d5dc2edfcb75d9291ced3f2551f368c35dd31475
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L39-L54
train
52,448
alfredodeza/notario
notario/utils.py
ensure
def ensure(assertion, message=None): """ Checks an assertion argument for truth-ness. Will return ``True`` or explicitly raise ``AssertionError``. This is to deal with environments using ``python -O` or ``PYTHONOPTIMIZE=``. :param assertion: some value to evaluate for truth-ness :param message: optional message used for raising AssertionError """ message = message or assertion if not assertion: raise AssertionError(message) return True
python
def ensure(assertion, message=None): """ Checks an assertion argument for truth-ness. Will return ``True`` or explicitly raise ``AssertionError``. This is to deal with environments using ``python -O` or ``PYTHONOPTIMIZE=``. :param assertion: some value to evaluate for truth-ness :param message: optional message used for raising AssertionError """ message = message or assertion if not assertion: raise AssertionError(message) return True
[ "def", "ensure", "(", "assertion", ",", "message", "=", "None", ")", ":", "message", "=", "message", "or", "assertion", "if", "not", "assertion", ":", "raise", "AssertionError", "(", "message", ")", "return", "True" ]
Checks an assertion argument for truth-ness. Will return ``True`` or explicitly raise ``AssertionError``. This is to deal with environments using ``python -O` or ``PYTHONOPTIMIZE=``. :param assertion: some value to evaluate for truth-ness :param message: optional message used for raising AssertionError
[ "Checks", "an", "assertion", "argument", "for", "truth", "-", "ness", ".", "Will", "return", "True", "or", "explicitly", "raise", "AssertionError", ".", "This", "is", "to", "deal", "with", "environments", "using", "python", "-", "O", "or", "PYTHONOPTIMIZE", ...
d5dc2edfcb75d9291ced3f2551f368c35dd31475
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/utils.py#L144-L158
train
52,449
thiagopbueno/rddl2tf
rddl2tf/fluentshape.py
TensorFluentShape.fluent_shape
def fluent_shape(self) -> Sequence[int]: '''Returns a copy of the fluent shape, ignoring batch size if in batch mode.''' return tuple(self._shape.as_list()[1:] if self._batch else self._shape.as_list()[:])
python
def fluent_shape(self) -> Sequence[int]: '''Returns a copy of the fluent shape, ignoring batch size if in batch mode.''' return tuple(self._shape.as_list()[1:] if self._batch else self._shape.as_list()[:])
[ "def", "fluent_shape", "(", "self", ")", "->", "Sequence", "[", "int", "]", ":", "return", "tuple", "(", "self", ".", "_shape", ".", "as_list", "(", ")", "[", "1", ":", "]", "if", "self", ".", "_batch", "else", "self", ".", "_shape", ".", "as_list"...
Returns a copy of the fluent shape, ignoring batch size if in batch mode.
[ "Returns", "a", "copy", "of", "the", "fluent", "shape", "ignoring", "batch", "size", "if", "in", "batch", "mode", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluentshape.py#L80-L82
train
52,450
thiagopbueno/rddl2tf
rddl2tf/fluentshape.py
TensorFluentShape.broadcast
def broadcast(cls, shape1: 'TensorFluentShape', shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]: '''It broadcasts the fluent shapes if any input is in batch mode. It handles input shapes in different modes, expanding its dimensions if necessary. It outputs a tuple with new shapes. If no input shape is in batch mode, return (None, None). If an input shape does not need to be changed, return None. Args: shape1: A fluent's shape. shape2: A fluent's shape. Returns: A pair of new shapes. ''' reshape_1, reshape_2 = None, None if not (shape1._batch or shape2._batch): return reshape_1, reshape_2 size_1, size_2 = shape1.fluent_size, shape2.fluent_size size_diff = abs(size_1 - size_2) if size_diff == 0: return reshape_1, reshape_2 if size_2 > size_1 and not (size_1 == 0 and not shape1._batch): reshape_1 = [1] * size_diff + list(shape1.fluent_shape) if shape1._batch: reshape_1 = [shape1.batch_size] + reshape_1 elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch): reshape_2 = [1] * size_diff + list(shape2.fluent_shape) if shape2._batch: reshape_2 = [shape2.batch_size] + reshape_2 return reshape_1, reshape_2
python
def broadcast(cls, shape1: 'TensorFluentShape', shape2: 'TensorFluentShape') -> Tuple[Reshaping, Reshaping]: '''It broadcasts the fluent shapes if any input is in batch mode. It handles input shapes in different modes, expanding its dimensions if necessary. It outputs a tuple with new shapes. If no input shape is in batch mode, return (None, None). If an input shape does not need to be changed, return None. Args: shape1: A fluent's shape. shape2: A fluent's shape. Returns: A pair of new shapes. ''' reshape_1, reshape_2 = None, None if not (shape1._batch or shape2._batch): return reshape_1, reshape_2 size_1, size_2 = shape1.fluent_size, shape2.fluent_size size_diff = abs(size_1 - size_2) if size_diff == 0: return reshape_1, reshape_2 if size_2 > size_1 and not (size_1 == 0 and not shape1._batch): reshape_1 = [1] * size_diff + list(shape1.fluent_shape) if shape1._batch: reshape_1 = [shape1.batch_size] + reshape_1 elif size_1 > size_2 and not (size_2 == 0 and not shape2._batch): reshape_2 = [1] * size_diff + list(shape2.fluent_shape) if shape2._batch: reshape_2 = [shape2.batch_size] + reshape_2 return reshape_1, reshape_2
[ "def", "broadcast", "(", "cls", ",", "shape1", ":", "'TensorFluentShape'", ",", "shape2", ":", "'TensorFluentShape'", ")", "->", "Tuple", "[", "Reshaping", ",", "Reshaping", "]", ":", "reshape_1", ",", "reshape_2", "=", "None", ",", "None", "if", "not", "(...
It broadcasts the fluent shapes if any input is in batch mode. It handles input shapes in different modes, expanding its dimensions if necessary. It outputs a tuple with new shapes. If no input shape is in batch mode, return (None, None). If an input shape does not need to be changed, return None. Args: shape1: A fluent's shape. shape2: A fluent's shape. Returns: A pair of new shapes.
[ "It", "broadcasts", "the", "fluent", "shapes", "if", "any", "input", "is", "in", "batch", "mode", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/fluentshape.py#L90-L125
train
52,451
inodb/sufam
sufam/mpileup_parser.py
run
def run(bam, chrom, pos1, pos2, reffa, chr_reffa, parameters): """Run mpileup on given chrom and pos""" # check for chr ref is_chr_query = chrom.startswith('chr') if is_chr_query and chr_reffa is None: chr_reffa = reffa # check bam ref type bam_header = subprocess.check_output("samtools view -H {}".format(bam), shell=True) is_chr_bam = bam_header.find('SN:chr') != -1 if is_chr_bam: reffa = chr_reffa if not is_chr_query and is_chr_bam: chrom = 'chr' + chrom if is_chr_query and not is_chr_bam: chrom = re.sub(r'^chr', '', chrom) posmin = min(pos1, pos2) posmax = max(pos1, pos2) cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " \ "| samtools mpileup {parameters} -f {reffa} -".format(bam=bam, chrom=chrom, pos1=posmin, pos2=posmax, reffa=reffa, parameters=parameters) if pos1 == pos2: cmd += " | awk '$2 == {pos}'".format(pos=pos1) else: cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'".format(posmin=posmin, posmax=posmax) sys.stderr.write("Running:\n{}\n".format(cmd)) child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) stdout, stderr = child.communicate() if child.returncode != 0: if len(stdout) == 0 and stderr is None: warnings.warn("Command:\n{cmd}\n did not exit with zero exit code. " "Possibly no coverage for sample.".format(cmd=cmd)) else: raise(Exception("Command:\n{cmd}\n did not exit with zero exit code. " "Check command.".format(cmd=cmd))) else: return stdout
python
def run(bam, chrom, pos1, pos2, reffa, chr_reffa, parameters): """Run mpileup on given chrom and pos""" # check for chr ref is_chr_query = chrom.startswith('chr') if is_chr_query and chr_reffa is None: chr_reffa = reffa # check bam ref type bam_header = subprocess.check_output("samtools view -H {}".format(bam), shell=True) is_chr_bam = bam_header.find('SN:chr') != -1 if is_chr_bam: reffa = chr_reffa if not is_chr_query and is_chr_bam: chrom = 'chr' + chrom if is_chr_query and not is_chr_bam: chrom = re.sub(r'^chr', '', chrom) posmin = min(pos1, pos2) posmax = max(pos1, pos2) cmd = "samtools view -bh {bam} {chrom}:{pos1}-{pos2} " \ "| samtools mpileup {parameters} -f {reffa} -".format(bam=bam, chrom=chrom, pos1=posmin, pos2=posmax, reffa=reffa, parameters=parameters) if pos1 == pos2: cmd += " | awk '$2 == {pos}'".format(pos=pos1) else: cmd += " | tail -n +2 | awk '$2 >= {posmin} && $2 <= {posmax}'".format(posmin=posmin, posmax=posmax) sys.stderr.write("Running:\n{}\n".format(cmd)) child = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) stdout, stderr = child.communicate() if child.returncode != 0: if len(stdout) == 0 and stderr is None: warnings.warn("Command:\n{cmd}\n did not exit with zero exit code. " "Possibly no coverage for sample.".format(cmd=cmd)) else: raise(Exception("Command:\n{cmd}\n did not exit with zero exit code. " "Check command.".format(cmd=cmd))) else: return stdout
[ "def", "run", "(", "bam", ",", "chrom", ",", "pos1", ",", "pos2", ",", "reffa", ",", "chr_reffa", ",", "parameters", ")", ":", "# check for chr ref", "is_chr_query", "=", "chrom", ".", "startswith", "(", "'chr'", ")", "if", "is_chr_query", "and", "chr_reff...
Run mpileup on given chrom and pos
[ "Run", "mpileup", "on", "given", "chrom", "and", "pos" ]
d4e41c5478ca9ba58be44d95106885c096c90a74
https://github.com/inodb/sufam/blob/d4e41c5478ca9ba58be44d95106885c096c90a74/sufam/mpileup_parser.py#L97-L136
train
52,452
craigahobbs/chisel
src/chisel/util.py
parse_iso8601_date
def parse_iso8601_date(string): """ Parse an ISO 8601 date string """ # Match ISO 8601? match = _RE_ISO8601_DATE.search(string) if not match: raise ValueError('Expected ISO 8601 date') # Extract ISO 8601 components year = int(match.group('year')) month = int(match.group('month')) day = int(match.group('day')) return date(year, month, day)
python
def parse_iso8601_date(string): """ Parse an ISO 8601 date string """ # Match ISO 8601? match = _RE_ISO8601_DATE.search(string) if not match: raise ValueError('Expected ISO 8601 date') # Extract ISO 8601 components year = int(match.group('year')) month = int(match.group('month')) day = int(match.group('day')) return date(year, month, day)
[ "def", "parse_iso8601_date", "(", "string", ")", ":", "# Match ISO 8601?", "match", "=", "_RE_ISO8601_DATE", ".", "search", "(", "string", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "'Expected ISO 8601 date'", ")", "# Extract ISO 8601 components", "...
Parse an ISO 8601 date string
[ "Parse", "an", "ISO", "8601", "date", "string" ]
d306a9eae2ff757647c6ca1c933bc944efa5c326
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/util.py#L106-L121
train
52,453
craigahobbs/chisel
src/chisel/util.py
import_submodules
def import_submodules(package, parent_package=None, exclude_submodules=None): """ Generator which imports all submodules of a module, recursively, including subpackages :param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided :type package: str :param parent_package: parent package name (e.g 'chisel') :type package: str :rtype: iterator of modules """ exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules package = importlib.import_module(package, parent_package) for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'): if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)): continue yield importlib.import_module(name)
python
def import_submodules(package, parent_package=None, exclude_submodules=None): """ Generator which imports all submodules of a module, recursively, including subpackages :param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided :type package: str :param parent_package: parent package name (e.g 'chisel') :type package: str :rtype: iterator of modules """ exclude_submodules_dot = [x + '.' for x in exclude_submodules] if exclude_submodules else exclude_submodules package = importlib.import_module(package, parent_package) for _, name, _ in pkgutil.walk_packages(package.__path__, package.__name__ + '.'): if exclude_submodules and (name in exclude_submodules or any(name.startswith(x) for x in exclude_submodules_dot)): continue yield importlib.import_module(name)
[ "def", "import_submodules", "(", "package", ",", "parent_package", "=", "None", ",", "exclude_submodules", "=", "None", ")", ":", "exclude_submodules_dot", "=", "[", "x", "+", "'.'", "for", "x", "in", "exclude_submodules", "]", "if", "exclude_submodules", "else"...
Generator which imports all submodules of a module, recursively, including subpackages :param package: package name (e.g 'chisel.util'); may be relative if parent_package is provided :type package: str :param parent_package: parent package name (e.g 'chisel') :type package: str :rtype: iterator of modules
[ "Generator", "which", "imports", "all", "submodules", "of", "a", "module", "recursively", "including", "subpackages" ]
d306a9eae2ff757647c6ca1c933bc944efa5c326
https://github.com/craigahobbs/chisel/blob/d306a9eae2ff757647c6ca1c933bc944efa5c326/src/chisel/util.py#L149-L165
train
52,454
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_initial_state
def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]: '''Returns a tuple of tensors representing the initial state fluents. Args: batch_size (Optional[int]): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors. ''' with self.graph.as_default(): with tf.name_scope('initial_state'): self._initialize_initial_state_fluents() if batch_size is None: return self.initial_state_fluents return self._compile_batch_fluents(self.initial_state_fluents, batch_size)
python
def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]: '''Returns a tuple of tensors representing the initial state fluents. Args: batch_size (Optional[int]): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors. ''' with self.graph.as_default(): with tf.name_scope('initial_state'): self._initialize_initial_state_fluents() if batch_size is None: return self.initial_state_fluents return self._compile_batch_fluents(self.initial_state_fluents, batch_size)
[ "def", "compile_initial_state", "(", "self", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "Sequence", "[", "tf", ".", "Tensor", "]", ":", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "with", "tf", "....
Returns a tuple of tensors representing the initial state fluents. Args: batch_size (Optional[int]): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors.
[ "Returns", "a", "tuple", "of", "tensors", "representing", "the", "initial", "state", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L90-L104
train
52,455
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_default_action
def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]: '''Returns a tuple of tensors representing the default action fluents. Args: batch_size (int): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors. ''' with self.graph.as_default(): with tf.name_scope('default_action'): self._initialize_default_action_fluents() if batch_size is None: return self.default_action_fluents return self._compile_batch_fluents(self.default_action_fluents, batch_size)
python
def compile_default_action(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]: '''Returns a tuple of tensors representing the default action fluents. Args: batch_size (int): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors. ''' with self.graph.as_default(): with tf.name_scope('default_action'): self._initialize_default_action_fluents() if batch_size is None: return self.default_action_fluents return self._compile_batch_fluents(self.default_action_fluents, batch_size)
[ "def", "compile_default_action", "(", "self", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "Sequence", "[", "tf", ".", "Tensor", "]", ":", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "with", "tf", "...
Returns a tuple of tensors representing the default action fluents. Args: batch_size (int): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors.
[ "Returns", "a", "tuple", "of", "tensors", "representing", "the", "default", "action", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L106-L120
train
52,456
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.cpfs
def cpfs(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]: '''Compiles the intermediate and next state fluent CPFs given the current `state` and `action`. Args: state (Sequence[tf.Tensor]): A tuple of state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. Returns: Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent representing the intermediate and state CPFs. ''' scope = self.transition_scope(state, action) batch_size = int(state[0].shape[0]) interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise) interms = [fluent for _, fluent in interm_fluents] next_state = [fluent for _, fluent in next_state_fluents] return interms, next_state
python
def cpfs(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], noise: Optional[Noise] = None) -> Tuple[List[TensorFluent], List[TensorFluent]]: '''Compiles the intermediate and next state fluent CPFs given the current `state` and `action`. Args: state (Sequence[tf.Tensor]): A tuple of state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. Returns: Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent representing the intermediate and state CPFs. ''' scope = self.transition_scope(state, action) batch_size = int(state[0].shape[0]) interm_fluents, next_state_fluents = self.compile_cpfs(scope, batch_size, noise) interms = [fluent for _, fluent in interm_fluents] next_state = [fluent for _, fluent in next_state_fluents] return interms, next_state
[ "def", "cpfs", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "noise", ":", "Optional", "[", "Noise", "]", "=", "None", ")", "->", "Tuple", "[", "List...
Compiles the intermediate and next state fluent CPFs given the current `state` and `action`. Args: state (Sequence[tf.Tensor]): A tuple of state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. Returns: Tuple[List[TensorFluent], List[TensorFluent]]: A pair of lists of TensorFluent representing the intermediate and state CPFs.
[ "Compiles", "the", "intermediate", "and", "next", "state", "fluent", "CPFs", "given", "the", "current", "state", "and", "action", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L122-L142
train
52,457
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.reward
def reward(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> tf.Tensor: '''Compiles the reward function given the current `state`, `action` and `next_state`. Args: state (Sequence[tf.Tensor]): A tuple of current state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. next_state (Sequence[tf.Tensor]): A tuple of next state tensors. Returns: (:obj:`tf.Tensor`): A tensor representing the reward function. ''' scope = self.reward_scope(state, action, next_state) r = self.compile_reward(scope).tensor with self.graph.as_default(): with tf.name_scope('reward'): return tf.expand_dims(r, -1)
python
def reward(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> tf.Tensor: '''Compiles the reward function given the current `state`, `action` and `next_state`. Args: state (Sequence[tf.Tensor]): A tuple of current state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. next_state (Sequence[tf.Tensor]): A tuple of next state tensors. Returns: (:obj:`tf.Tensor`): A tensor representing the reward function. ''' scope = self.reward_scope(state, action, next_state) r = self.compile_reward(scope).tensor with self.graph.as_default(): with tf.name_scope('reward'): return tf.expand_dims(r, -1)
[ "def", "reward", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "next_state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "tf", ".", "T...
Compiles the reward function given the current `state`, `action` and `next_state`. Args: state (Sequence[tf.Tensor]): A tuple of current state tensors. action (Sequence[tf.Tensor]): A tuple of action tensors. next_state (Sequence[tf.Tensor]): A tuple of next state tensors. Returns: (:obj:`tf.Tensor`): A tensor representing the reward function.
[ "Compiles", "the", "reward", "function", "given", "the", "current", "state", "action", "and", "next_state", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L144-L163
train
52,458
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_cpfs
def compile_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> Tuple[List[CPFPair], List[CPFPair]]: '''Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent representing the intermediate and state CPFs. ''' interm_fluents = self.compile_intermediate_cpfs(scope, batch_size, noise) scope.update(dict(interm_fluents)) next_state_fluents = self.compile_state_cpfs(scope, batch_size, noise) return interm_fluents, next_state_fluents
python
def compile_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> Tuple[List[CPFPair], List[CPFPair]]: '''Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent representing the intermediate and state CPFs. ''' interm_fluents = self.compile_intermediate_cpfs(scope, batch_size, noise) scope.update(dict(interm_fluents)) next_state_fluents = self.compile_state_cpfs(scope, batch_size, noise) return interm_fluents, next_state_fluents
[ "def", "compile_cpfs", "(", "self", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "Noise", "]", "=", "None", ")", "->", "Tuple", ...
Compiles the intermediate and next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: Tuple[List[CPFPair], List[CPFPair]]: A pair of lists of TensorFluent representing the intermediate and state CPFs.
[ "Compiles", "the", "intermediate", "and", "next", "state", "fluent", "CPFs", "given", "the", "current", "state", "and", "action", "scope", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L165-L182
train
52,459
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_intermediate_cpfs
def compile_intermediate_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> List[CPFPair]: '''Compiles the intermediate fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. ''' interm_fluents = [] with self.graph.as_default(): with tf.name_scope('intermediate_cpfs'): for cpf in self.rddl.domain.intermediate_cpfs: cpf_noise = noise.get(cpf.name, None) if noise is not None else None name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) interm_fluents.append((cpf.name, t)) scope[cpf.name] = t return interm_fluents
python
def compile_intermediate_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> List[CPFPair]: '''Compiles the intermediate fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. ''' interm_fluents = [] with self.graph.as_default(): with tf.name_scope('intermediate_cpfs'): for cpf in self.rddl.domain.intermediate_cpfs: cpf_noise = noise.get(cpf.name, None) if noise is not None else None name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) interm_fluents.append((cpf.name, t)) scope[cpf.name] = t return interm_fluents
[ "def", "compile_intermediate_cpfs", "(", "self", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "Noise", "]", "=", "None", ")", "->"...
Compiles the intermediate fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of intermediate fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Compiles", "the", "intermediate", "fluent", "CPFs", "given", "the", "current", "state", "and", "action", "scope", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L184-L212
train
52,460
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_state_cpfs
def compile_state_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> List[CPFPair]: '''Compiles the next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. ''' next_state_fluents = [] with self.graph.as_default(): with tf.name_scope('state_cpfs'): for cpf in self.rddl.domain.state_cpfs: cpf_noise = noise.get(cpf.name, None) if noise is not None else None name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) next_state_fluents.append((cpf.name, t)) key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0]) next_state_fluents = sorted(next_state_fluents, key=key) return next_state_fluents
python
def compile_state_cpfs(self, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[Noise] = None) -> List[CPFPair]: '''Compiles the next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`. ''' next_state_fluents = [] with self.graph.as_default(): with tf.name_scope('state_cpfs'): for cpf in self.rddl.domain.state_cpfs: cpf_noise = noise.get(cpf.name, None) if noise is not None else None name_scope = utils.identifier(cpf.name) with tf.name_scope(name_scope): t = self._compile_expression(cpf.expr, scope, batch_size, cpf_noise) next_state_fluents.append((cpf.name, t)) key = lambda f: self.rddl.domain.next_state_fluent_ordering.index(f[0]) next_state_fluents = sorted(next_state_fluents, key=key) return next_state_fluents
[ "def", "compile_state_cpfs", "(", "self", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "Noise", "]", "=", "None", ")", "->", "Li...
Compiles the next state fluent CPFs given the current `state` and `action` scope. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for CPF evaluation. batch_size (Optional[int]): The batch size. Returns: A list of state fluent CPFs compiled to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Compiles", "the", "next", "state", "fluent", "CPFs", "given", "the", "current", "state", "and", "action", "scope", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L214-L244
train
52,461
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_reward
def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent: '''Compiles the reward function given the fluent `scope`. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation. Returns: A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function. ''' reward_expr = self.rddl.domain.reward with self.graph.as_default(): with tf.name_scope('reward'): return self._compile_expression(reward_expr, scope)
python
def compile_reward(self, scope: Dict[str, TensorFluent]) -> TensorFluent: '''Compiles the reward function given the fluent `scope`. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation. Returns: A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function. ''' reward_expr = self.rddl.domain.reward with self.graph.as_default(): with tf.name_scope('reward'): return self._compile_expression(reward_expr, scope)
[ "def", "compile_reward", "(", "self", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ")", "->", "TensorFluent", ":", "reward_expr", "=", "self", ".", "rddl", ".", "domain", ".", "reward", "with", "self", ".", "graph", ".", "as_default",...
Compiles the reward function given the fluent `scope`. Args: scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): The fluent scope for reward evaluation. Returns: A :obj:`rddl2tf.fluent.TensorFluent` representing the reward function.
[ "Compiles", "the", "reward", "function", "given", "the", "fluent", "scope", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L246-L258
train
52,462
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_state_action_constraints
def compile_state_action_constraints(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]: '''Compiles the state-action constraints given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = self.transition_scope(state, action) constraints = [] with self.graph.as_default(): with tf.name_scope('state_action_constraints'): for p in self.rddl.domain.constraints: fluent = self._compile_expression(p, scope) constraints.append(fluent) return constraints
python
def compile_state_action_constraints(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]: '''Compiles the state-action constraints given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = self.transition_scope(state, action) constraints = [] with self.graph.as_default(): with tf.name_scope('state_action_constraints'): for p in self.rddl.domain.constraints: fluent = self._compile_expression(p, scope) constraints.append(fluent) return constraints
[ "def", "compile_state_action_constraints", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "List", "[", "TensorFluent", "]", ":", "scope", "=", "self", ...
Compiles the state-action constraints given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`.
[ "Compiles", "the", "state", "-", "action", "constraints", "given", "current", "state", "and", "action", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L260-L279
train
52,463
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_action_preconditions
def compile_action_preconditions(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]: '''Compiles the action preconditions given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = self.action_precondition_scope(state, action) preconds = [] with self.graph.as_default(): with tf.name_scope('action_preconditions'): for p in self.rddl.domain.preconds: fluent = self._compile_expression(p, scope) preconds.append(fluent) return preconds
python
def compile_action_preconditions(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> List[TensorFluent]: '''Compiles the action preconditions given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = self.action_precondition_scope(state, action) preconds = [] with self.graph.as_default(): with tf.name_scope('action_preconditions'): for p in self.rddl.domain.preconds: fluent = self._compile_expression(p, scope) preconds.append(fluent) return preconds
[ "def", "compile_action_preconditions", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "List", "[", "TensorFluent", "]", ":", "scope", "=", "self", "."...
Compiles the action preconditions given current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`.
[ "Compiles", "the", "action", "preconditions", "given", "current", "state", "and", "action", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L281-L300
train
52,464
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_state_invariants
def compile_state_invariants(self, state: Sequence[tf.Tensor]) -> List[TensorFluent]: '''Compiles the state invarints given current `state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = self.state_invariant_scope(state) invariants = [] with self.graph.as_default(): with tf.name_scope('state_invariants'): for p in self.rddl.domain.invariants: fluent = self._compile_expression(p, scope) invariants.append(fluent) return invariants
python
def compile_state_invariants(self, state: Sequence[tf.Tensor]) -> List[TensorFluent]: '''Compiles the state invarints given current `state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = self.state_invariant_scope(state) invariants = [] with self.graph.as_default(): with tf.name_scope('state_invariants'): for p in self.rddl.domain.invariants: fluent = self._compile_expression(p, scope) invariants.append(fluent) return invariants
[ "def", "compile_state_invariants", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "List", "[", "TensorFluent", "]", ":", "scope", "=", "self", ".", "state_invariant_scope", "(", "state", ")", "invariants", "=", "[", ...
Compiles the state invarints given current `state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A list of :obj:`rddl2tf.fluent.TensorFluent`.
[ "Compiles", "the", "state", "invarints", "given", "current", "state", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L302-L319
train
52,465
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_action_preconditions_checking
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
python
def compile_action_preconditions_checking(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> tf.Tensor: '''Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`. ''' with self.graph.as_default(): with tf.name_scope('action_preconditions_checking'): preconds = self.compile_action_preconditions(state, action) all_preconds = tf.stack([p.tensor for p in preconds], axis=1) checking = tf.reduce_all(all_preconds, axis=1) return checking
[ "def", "compile_action_preconditions_checking", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "tf", ".", "Tensor", ":", "with", "self", ".", "graph", ...
Combines the action preconditions into an applicability checking op. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A boolean tensor for checking if `action` is application in `state`.
[ "Combines", "the", "action", "preconditions", "into", "an", "applicability", "checking", "op", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L321-L338
train
52,466
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.compile_action_bound_constraints
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[str, Bounds]: '''Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds. ''' scope = self.action_precondition_scope(state) lower_bounds = self.rddl.domain.action_lower_bound_constraints upper_bounds = self.rddl.domain.action_upper_bound_constraints with self.graph.as_default(): with tf.name_scope('action_bound_constraints'): bounds = {} for name in self.rddl.domain.action_fluent_ordering: lower_expr = lower_bounds.get(name) lower = None if lower_expr is not None: with tf.name_scope('lower_bound'): lower = self._compile_expression(lower_expr, scope) upper_expr = upper_bounds.get(name) upper = None if upper_expr is not None: with tf.name_scope('upper_bound'): upper = self._compile_expression(upper_expr, scope) bounds[name] = (lower, upper) return bounds
python
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[str, Bounds]: '''Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds. ''' scope = self.action_precondition_scope(state) lower_bounds = self.rddl.domain.action_lower_bound_constraints upper_bounds = self.rddl.domain.action_upper_bound_constraints with self.graph.as_default(): with tf.name_scope('action_bound_constraints'): bounds = {} for name in self.rddl.domain.action_fluent_ordering: lower_expr = lower_bounds.get(name) lower = None if lower_expr is not None: with tf.name_scope('lower_bound'): lower = self._compile_expression(lower_expr, scope) upper_expr = upper_bounds.get(name) upper = None if upper_expr is not None: with tf.name_scope('upper_bound'): upper = self._compile_expression(upper_expr, scope) bounds[name] = (lower, upper) return bounds
[ "def", "compile_action_bound_constraints", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "Bounds", "]", ":", "scope", "=", "self", ".", "action_precondition_scope", "(", "state", ")", "lower_...
Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds.
[ "Compiles", "all", "actions", "bounds", "for", "the", "given", "state", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L340-L377
train
52,467
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.non_fluents_scope
def non_fluents_scope(self) -> Dict[str, TensorFluent]: '''Returns a partial scope with non-fluents. Returns: A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' if self.__dict__.get('non_fluents') is None: self._initialize_non_fluents() return dict(self.non_fluents)
python
def non_fluents_scope(self) -> Dict[str, TensorFluent]: '''Returns a partial scope with non-fluents. Returns: A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' if self.__dict__.get('non_fluents') is None: self._initialize_non_fluents() return dict(self.non_fluents)
[ "def", "non_fluents_scope", "(", "self", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "if", "self", ".", "__dict__", ".", "get", "(", "'non_fluents'", ")", "is", "None", ":", "self", ".", "_initialize_non_fluents", "(", ")", "return", "d...
Returns a partial scope with non-fluents. Returns: A mapping from non-fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "non", "-", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L379-L387
train
52,468
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.state_scope
def state_scope(self, state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current state-fluents. Args: state_fluents (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.state_fluent_ordering, state_fluents))
python
def state_scope(self, state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current state-fluents. Args: state_fluents (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.state_fluent_ordering, state_fluents))
[ "def", "state_scope", "(", "self", ",", "state_fluents", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "return", "dict", "(", "zip", "(", "self", ".", "rddl", ".", "domain", ".", "state_...
Returns a partial scope with current state-fluents. Args: state_fluents (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "current", "state", "-", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L389-L398
train
52,469
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.action_scope
def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))
python
def action_scope(self, action_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.action_fluent_ordering, action_fluents))
[ "def", "action_scope", "(", "self", ",", "action_fluents", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "return", "dict", "(", "zip", "(", "self", ".", "rddl", ".", "domain", ".", "acti...
Returns a partial scope with current action-fluents. Args: action_fluents (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from action fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "current", "action", "-", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L400-L409
train
52,470
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.next_state_scope
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))
python
def next_state_scope(self, next_state_fluents: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' return dict(zip(self.rddl.domain.next_state_fluent_ordering, next_state_fluents))
[ "def", "next_state_scope", "(", "self", ",", "next_state_fluents", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "return", "dict", "(", "zip", "(", "self", ".", "rddl", ".", "domain", ".",...
Returns a partial scope with current next state-fluents. Args: next_state_fluents (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from next state fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "a", "partial", "scope", "with", "current", "next", "state", "-", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L411-L420
train
52,471
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.transition_scope
def transition_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) return scope
python
def transition_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) return scope
[ "def", "transition_scope", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "TensorFluent", "]", ":", "scope", "=", "{", "}...
Returns the complete transition fluent scope for the current `state` and `action` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "the", "complete", "transition", "fluent", "scope", "for", "the", "current", "state", "and", "action", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L422-L439
train
52,472
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.reward_scope
def reward_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) scope.update(self.next_state_scope(next_state)) return scope
python
def reward_scope(self, state: Sequence[tf.Tensor], action: Sequence[tf.Tensor], next_state: Sequence[tf.Tensor]) -> Dict[str, TensorFluent]: '''Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) scope.update(self.action_scope(action)) scope.update(self.next_state_scope(next_state)) return scope
[ "def", "reward_scope", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "action", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ",", "next_state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "...
Returns the complete reward fluent scope for the current `state`, `action` fluents, and `next_state` fluents. Args: state (Sequence[tf.Tensor]): The current state fluents. action (Sequence[tf.Tensor]): The action fluents. next_state (Sequence[tf.Tensor]): The next state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "the", "complete", "reward", "fluent", "scope", "for", "the", "current", "state", "action", "fluents", "and", "next_state", "fluents", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L441-L461
train
52,473
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler.state_invariant_scope
def state_invariant_scope(self, state: Sequence[tf.Tensor]): '''Returns the state invariant fluent scope for the current `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) return scope
python
def state_invariant_scope(self, state: Sequence[tf.Tensor]): '''Returns the state invariant fluent scope for the current `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`. ''' scope = {} scope.update(self.non_fluents_scope()) scope.update(self.state_scope(state)) return scope
[ "def", "state_invariant_scope", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", ":", "scope", "=", "{", "}", "scope", ".", "update", "(", "self", ".", "non_fluents_scope", "(", ")", ")", "scope", ".", "update", "(", "s...
Returns the state invariant fluent scope for the current `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from fluent names to :obj:`rddl2tf.fluent.TensorFluent`.
[ "Returns", "the", "state", "invariant", "fluent", "scope", "for", "the", "current", "state", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L463-L475
train
52,474
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_pvariables
def _initialize_pvariables(self, pvariables: Dict[str, PVariable], ordering: List[str], initializer: Optional[InitializerList] = None) -> List[Tuple[str, TensorFluent]]: '''Instantiates `pvariables` given an initialization list and returns a list of TensorFluents in the given `ordering`. Returns: List[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor. ''' if initializer is not None: init = dict() for ((name, args), value) in initializer: arity = len(args) if args is not None else 0 name = '{}/{}'.format(name, arity) init[name] = init.get(name, []) init[name].append((args, value)) fluents = [] for name in ordering: pvar = pvariables[name] shape = self.rddl._param_types_to_shape(pvar.param_types) dtype = utils.range_type_to_dtype(pvar.range) fluent = np.full(shape, pvar.default) if initializer is not None: for args, val in init.get(name, []): if args is not None: idx = [] for ptype, arg in zip(pvar.param_types, args): idx.append(self.rddl.object_table[ptype]['idx'][arg]) idx = tuple(idx) fluent[idx] = val else: fluent = val with self.graph.as_default(): t = tf.constant(fluent, dtype=dtype, name=utils.identifier(name)) scope = [None] * len(t.shape) fluent = TensorFluent(t, scope, batch=False) fluent_pair = (name, fluent) fluents.append(fluent_pair) return fluents
python
def _initialize_pvariables(self, pvariables: Dict[str, PVariable], ordering: List[str], initializer: Optional[InitializerList] = None) -> List[Tuple[str, TensorFluent]]: '''Instantiates `pvariables` given an initialization list and returns a list of TensorFluents in the given `ordering`. Returns: List[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor. ''' if initializer is not None: init = dict() for ((name, args), value) in initializer: arity = len(args) if args is not None else 0 name = '{}/{}'.format(name, arity) init[name] = init.get(name, []) init[name].append((args, value)) fluents = [] for name in ordering: pvar = pvariables[name] shape = self.rddl._param_types_to_shape(pvar.param_types) dtype = utils.range_type_to_dtype(pvar.range) fluent = np.full(shape, pvar.default) if initializer is not None: for args, val in init.get(name, []): if args is not None: idx = [] for ptype, arg in zip(pvar.param_types, args): idx.append(self.rddl.object_table[ptype]['idx'][arg]) idx = tuple(idx) fluent[idx] = val else: fluent = val with self.graph.as_default(): t = tf.constant(fluent, dtype=dtype, name=utils.identifier(name)) scope = [None] * len(t.shape) fluent = TensorFluent(t, scope, batch=False) fluent_pair = (name, fluent) fluents.append(fluent_pair) return fluents
[ "def", "_initialize_pvariables", "(", "self", ",", "pvariables", ":", "Dict", "[", "str", ",", "PVariable", "]", ",", "ordering", ":", "List", "[", "str", "]", ",", "initializer", ":", "Optional", "[", "InitializerList", "]", "=", "None", ")", "->", "Lis...
Instantiates `pvariables` given an initialization list and returns a list of TensorFluents in the given `ordering`. Returns: List[Tuple[str, TensorFluent]]: A list of pairs of fluent name and fluent tensor.
[ "Instantiates", "pvariables", "given", "an", "initialization", "list", "and", "returns", "a", "list", "of", "TensorFluents", "in", "the", "given", "ordering", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L497-L541
train
52,475
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_non_fluents
def _initialize_non_fluents(self): '''Returns the non-fluents instantiated.''' non_fluents = self.rddl.domain.non_fluents initializer = self.rddl.non_fluents.init_non_fluent self.non_fluents = self._initialize_pvariables( non_fluents, self.rddl.domain.non_fluent_ordering, initializer) return self.non_fluents
python
def _initialize_non_fluents(self): '''Returns the non-fluents instantiated.''' non_fluents = self.rddl.domain.non_fluents initializer = self.rddl.non_fluents.init_non_fluent self.non_fluents = self._initialize_pvariables( non_fluents, self.rddl.domain.non_fluent_ordering, initializer) return self.non_fluents
[ "def", "_initialize_non_fluents", "(", "self", ")", ":", "non_fluents", "=", "self", ".", "rddl", ".", "domain", ".", "non_fluents", "initializer", "=", "self", ".", "rddl", ".", "non_fluents", ".", "init_non_fluent", "self", ".", "non_fluents", "=", "self", ...
Returns the non-fluents instantiated.
[ "Returns", "the", "non", "-", "fluents", "instantiated", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L543-L551
train
52,476
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_initial_state_fluents
def _initialize_initial_state_fluents(self): '''Returns the initial state-fluents instantiated.''' state_fluents = self.rddl.domain.state_fluents initializer = self.rddl.instance.init_state self.initial_state_fluents = self._initialize_pvariables( state_fluents, self.rddl.domain.state_fluent_ordering, initializer) return self.initial_state_fluents
python
def _initialize_initial_state_fluents(self): '''Returns the initial state-fluents instantiated.''' state_fluents = self.rddl.domain.state_fluents initializer = self.rddl.instance.init_state self.initial_state_fluents = self._initialize_pvariables( state_fluents, self.rddl.domain.state_fluent_ordering, initializer) return self.initial_state_fluents
[ "def", "_initialize_initial_state_fluents", "(", "self", ")", ":", "state_fluents", "=", "self", ".", "rddl", ".", "domain", ".", "state_fluents", "initializer", "=", "self", ".", "rddl", ".", "instance", ".", "init_state", "self", ".", "initial_state_fluents", ...
Returns the initial state-fluents instantiated.
[ "Returns", "the", "initial", "state", "-", "fluents", "instantiated", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L553-L561
train
52,477
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._initialize_default_action_fluents
def _initialize_default_action_fluents(self): '''Returns the default action-fluents instantiated.''' action_fluents = self.rddl.domain.action_fluents self.default_action_fluents = self._initialize_pvariables( action_fluents, self.rddl.domain.action_fluent_ordering) return self.default_action_fluents
python
def _initialize_default_action_fluents(self): '''Returns the default action-fluents instantiated.''' action_fluents = self.rddl.domain.action_fluents self.default_action_fluents = self._initialize_pvariables( action_fluents, self.rddl.domain.action_fluent_ordering) return self.default_action_fluents
[ "def", "_initialize_default_action_fluents", "(", "self", ")", ":", "action_fluents", "=", "self", ".", "rddl", ".", "domain", ".", "action_fluents", "self", ".", "default_action_fluents", "=", "self", ".", "_initialize_pvariables", "(", "action_fluents", ",", "self...
Returns the default action-fluents instantiated.
[ "Returns", "the", "default", "action", "-", "fluents", "instantiated", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L563-L569
train
52,478
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_batch_fluents
def _compile_batch_fluents(self, fluents: List[Tuple[str, TensorFluent]], batch_size: int) -> Sequence[tf.Tensor]: '''Compiles `fluents` into tensors with given `batch_size`. Returns: Sequence[tf.Tensor]: A tuple of tensors with first dimension corresponding to the batch size. ''' batch_fluents = [] with self.graph.as_default(): for name, fluent in fluents: name_scope = utils.identifier(name) with tf.name_scope(name_scope): t = tf.stack([fluent.tensor] * batch_size) batch_fluents.append(t) return tuple(batch_fluents)
python
def _compile_batch_fluents(self, fluents: List[Tuple[str, TensorFluent]], batch_size: int) -> Sequence[tf.Tensor]: '''Compiles `fluents` into tensors with given `batch_size`. Returns: Sequence[tf.Tensor]: A tuple of tensors with first dimension corresponding to the batch size. ''' batch_fluents = [] with self.graph.as_default(): for name, fluent in fluents: name_scope = utils.identifier(name) with tf.name_scope(name_scope): t = tf.stack([fluent.tensor] * batch_size) batch_fluents.append(t) return tuple(batch_fluents)
[ "def", "_compile_batch_fluents", "(", "self", ",", "fluents", ":", "List", "[", "Tuple", "[", "str", ",", "TensorFluent", "]", "]", ",", "batch_size", ":", "int", ")", "->", "Sequence", "[", "tf", ".", "Tensor", "]", ":", "batch_fluents", "=", "[", "]"...
Compiles `fluents` into tensors with given `batch_size`. Returns: Sequence[tf.Tensor]: A tuple of tensors with first dimension corresponding to the batch size.
[ "Compiles", "fluents", "into", "tensors", "with", "given", "batch_size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L571-L587
train
52,479
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_expression
def _compile_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent. ''' etype2compiler = { 'constant': self._compile_constant_expression, 'pvar': self._compile_pvariable_expression, 'randomvar': self._compile_random_variable_expression, 'arithmetic': self._compile_arithmetic_expression, 'boolean': self._compile_boolean_expression, 'relational': self._compile_relational_expression, 'func': self._compile_function_expression, 'control': self._compile_control_flow_expression, 'aggregation': self._compile_aggregation_expression } etype = expr.etype if etype[0] not in etype2compiler: raise ValueError('Expression type unknown: {}'.format(etype)) with self.graph.as_default(): compiler_fn = etype2compiler[etype[0]] return compiler_fn(expr, scope, batch_size, noise)
python
def _compile_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent. ''' etype2compiler = { 'constant': self._compile_constant_expression, 'pvar': self._compile_pvariable_expression, 'randomvar': self._compile_random_variable_expression, 'arithmetic': self._compile_arithmetic_expression, 'boolean': self._compile_boolean_expression, 'relational': self._compile_relational_expression, 'func': self._compile_function_expression, 'control': self._compile_control_flow_expression, 'aggregation': self._compile_aggregation_expression } etype = expr.etype if etype[0] not in etype2compiler: raise ValueError('Expression type unknown: {}'.format(etype)) with self.graph.as_default(): compiler_fn = etype2compiler[etype[0]] return compiler_fn(expr, scope, batch_size, noise)
[ "def", "_compile_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "List", "[...
Compile the expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled TensorFluent.
[ "Compile", "the", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L589-L623
train
52,480
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_constant_expression
def _compile_constant_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a constant expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args dtype = utils.python_type_to_dtype(etype[1]) fluent = TensorFluent.constant(args, dtype=dtype) return fluent
python
def _compile_constant_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a constant expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args dtype = utils.python_type_to_dtype(etype[1]) fluent = TensorFluent.constant(args, dtype=dtype) return fluent
[ "def", "_compile_constant_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "Li...
Compile a constant expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL constant expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "constant", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L625-L645
train
52,481
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_pvariable_expression
def _compile_pvariable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a pvariable expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args name = expr._pvar_to_name(args) if name not in scope: raise ValueError('Variable {} not in scope.'.format(name)) fluent = scope[name] scope = args[1] if args[1] is not None else [] if isinstance(fluent, TensorFluent): fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch) elif isinstance(fluent, tf.Tensor): fluent = TensorFluent(fluent, scope, batch=self.batch_mode) else: raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent)) return fluent
python
def _compile_pvariable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a pvariable expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args name = expr._pvar_to_name(args) if name not in scope: raise ValueError('Variable {} not in scope.'.format(name)) fluent = scope[name] scope = args[1] if args[1] is not None else [] if isinstance(fluent, TensorFluent): fluent = TensorFluent(fluent.tensor, scope, batch=fluent.batch) elif isinstance(fluent, tf.Tensor): fluent = TensorFluent(fluent, scope, batch=self.batch_mode) else: raise ValueError('Variable in scope must be TensorFluent-like: {}'.format(fluent)) return fluent
[ "def", "_compile_pvariable_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "L...
Compile a pvariable expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL pvariable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "pvariable", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L647-L676
train
52,482
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_random_variable_expression
def _compile_random_variable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a random variable expression `expr` into a TensorFluent in the given `scope` with optional batch size. If `reparam` tensor is given, then it conditionally stops gradient backpropagation at the batch level where `reparam` is False. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL random variable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'KronDelta': sample = self._compile_expression(args[0], scope, batch_size, noise) elif etype[1] == 'Bernoulli': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Bernoulli(mean, batch_size) elif etype[1] == 'Uniform': low = self._compile_expression(args[0], scope, batch_size, noise) high = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Uniform(low, high, batch_size) elif etype[1] == 'Normal': if noise is None: mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Normal(mean, variance, batch_size) else: xi = noise.pop() xi = TensorFluent(xi, scope=[], batch=True) mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) sample = mean + TensorFluent.sqrt(variance) * xi elif etype[1] == 'Laplace': mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Laplace(mean, variance, batch_size) elif etype[1] == 'Gamma': shape = self._compile_expression(args[0], scope, batch_size, noise) scale = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Gamma(shape, scale, batch_size) elif etype[1] == 'Exponential': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Exponential(mean, batch_size) else: raise ValueError('Invalid random variable expression:\n{}.'.format(expr)) return sample
python
def _compile_random_variable_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a random variable expression `expr` into a TensorFluent in the given `scope` with optional batch size. If `reparam` tensor is given, then it conditionally stops gradient backpropagation at the batch level where `reparam` is False. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL random variable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'KronDelta': sample = self._compile_expression(args[0], scope, batch_size, noise) elif etype[1] == 'Bernoulli': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Bernoulli(mean, batch_size) elif etype[1] == 'Uniform': low = self._compile_expression(args[0], scope, batch_size, noise) high = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Uniform(low, high, batch_size) elif etype[1] == 'Normal': if noise is None: mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Normal(mean, variance, batch_size) else: xi = noise.pop() xi = TensorFluent(xi, scope=[], batch=True) mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) sample = mean + TensorFluent.sqrt(variance) * xi elif etype[1] == 'Laplace': mean = self._compile_expression(args[0], scope, batch_size, noise) variance = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Laplace(mean, variance, batch_size) elif etype[1] == 'Gamma': shape = self._compile_expression(args[0], scope, batch_size, noise) scale = self._compile_expression(args[1], scope, batch_size, noise) dist, sample = TensorFluent.Gamma(shape, scale, batch_size) elif etype[1] == 'Exponential': mean = self._compile_expression(args[0], scope, batch_size, noise) dist, sample = TensorFluent.Exponential(mean, batch_size) else: raise ValueError('Invalid random variable expression:\n{}.'.format(expr)) return sample
[ "def", "_compile_random_variable_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "["...
Compile a random variable expression `expr` into a TensorFluent in the given `scope` with optional batch size. If `reparam` tensor is given, then it conditionally stops gradient backpropagation at the batch level where `reparam` is False. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL random variable expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "random", "variable", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L678-L734
train
52,483
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_arithmetic_expression
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2op = { '+': lambda x: x, '-': lambda x: -x } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2op = { '+': lambda x, y: x + y, '-': lambda x, y: x - y, '*': lambda x, y: x * y, '/': lambda x, y: x / y, } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
python
def _compile_arithmetic_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2op = { '+': lambda x: x, '-': lambda x: -x } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2op = { '+': lambda x, y: x + y, '-': lambda x, y: x - y, '*': lambda x, y: x * y, '/': lambda x, y: x / y, } if etype[1] not in etype2op: raise ValueError('Invalid binary arithmetic expression:\n{}'.format(expr)) op = etype2op[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
[ "def", "_compile_arithmetic_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "...
Compile an arithmetic expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL arithmetic expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "an", "arithmetic", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L736-L784
train
52,484
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_function_expression
def _compile_function_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2func = { 'abs': TensorFluent.abs, 'exp': TensorFluent.exp, 'log': TensorFluent.log, 'sqrt': TensorFluent.sqrt, 'cos': TensorFluent.cos, 'sin': TensorFluent.sin, 'tan': TensorFluent.tan, 'acos': TensorFluent.acos, 'arccos': TensorFluent.acos, 'asin': TensorFluent.asin, 'arcsin': TensorFluent.asin, 'atan': TensorFluent.atan, 'arctan': TensorFluent.atan, 'round': TensorFluent.round, 'ceil': TensorFluent.ceil, 'floor': TensorFluent.floor } if etype[1] not in etype2func: raise ValueError('Invalid unary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2func = { 'pow': TensorFluent.pow, 'max': TensorFluent.max, 'min': TensorFluent.min } if etype[1] not in etype2func: raise ValueError('Invalid binary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
python
def _compile_function_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if len(args) == 1: etype2func = { 'abs': TensorFluent.abs, 'exp': TensorFluent.exp, 'log': TensorFluent.log, 'sqrt': TensorFluent.sqrt, 'cos': TensorFluent.cos, 'sin': TensorFluent.sin, 'tan': TensorFluent.tan, 'acos': TensorFluent.acos, 'arccos': TensorFluent.acos, 'asin': TensorFluent.asin, 'arcsin': TensorFluent.asin, 'atan': TensorFluent.atan, 'arctan': TensorFluent.atan, 'round': TensorFluent.round, 'ceil': TensorFluent.ceil, 'floor': TensorFluent.floor } if etype[1] not in etype2func: raise ValueError('Invalid unary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) fluent = op(x) else: etype2func = { 'pow': TensorFluent.pow, 'max': TensorFluent.max, 'min': TensorFluent.min } if etype[1] not in etype2func: raise ValueError('Invalid binary function expression:\n{}'.format(expr)) op = etype2func[etype[1]] x = self._compile_expression(args[0], scope, batch_size, noise) y = self._compile_expression(args[1], scope, batch_size, noise) fluent = op(x, y) return fluent
[ "def", "_compile_function_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", "Li...
Compile a function expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL function expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "function", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L874-L936
train
52,485
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_control_flow_expression
def _compile_control_flow_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'if': condition = self._compile_expression(args[0], scope, batch_size, noise) true_case = self._compile_expression(args[1], scope, batch_size, noise) false_case = self._compile_expression(args[2], scope, batch_size, noise) fluent = TensorFluent.if_then_else(condition, true_case, false_case) else: raise ValueError('Invalid control flow expression:\n{}'.format(expr)) return fluent
python
def _compile_control_flow_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args if etype[1] == 'if': condition = self._compile_expression(args[0], scope, batch_size, noise) true_case = self._compile_expression(args[1], scope, batch_size, noise) false_case = self._compile_expression(args[2], scope, batch_size, noise) fluent = TensorFluent.if_then_else(condition, true_case, false_case) else: raise ValueError('Invalid control flow expression:\n{}'.format(expr)) return fluent
[ "def", "_compile_control_flow_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", ...
Compile a control flow expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL control flow expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "a", "control", "flow", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L938-L963
train
52,486
thiagopbueno/rddl2tf
rddl2tf/compiler.py
Compiler._compile_aggregation_expression
def _compile_aggregation_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an aggregation expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args typed_var_list = args[:-1] vars_list = [var for _, (var, _) in typed_var_list] expr = args[-1] x = self._compile_expression(expr, scope) etype2aggr = { 'sum': x.sum, 'prod': x.prod, 'avg': x.avg, 'maximum': x.maximum, 'minimum': x.minimum, 'exists': x.exists, 'forall': x.forall } if etype[1] not in etype2aggr: raise ValueError('Invalid aggregation expression {}.'.format(expr)) aggr = etype2aggr[etype[1]] fluent = aggr(vars_list=vars_list) return fluent
python
def _compile_aggregation_expression(self, expr: Expression, scope: Dict[str, TensorFluent], batch_size: Optional[int] = None, noise: Optional[List[tf.Tensor]] = None) -> TensorFluent: '''Compile an aggregation expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent. ''' etype = expr.etype args = expr.args typed_var_list = args[:-1] vars_list = [var for _, (var, _) in typed_var_list] expr = args[-1] x = self._compile_expression(expr, scope) etype2aggr = { 'sum': x.sum, 'prod': x.prod, 'avg': x.avg, 'maximum': x.maximum, 'minimum': x.minimum, 'exists': x.exists, 'forall': x.forall } if etype[1] not in etype2aggr: raise ValueError('Invalid aggregation expression {}.'.format(expr)) aggr = etype2aggr[etype[1]] fluent = aggr(vars_list=vars_list) return fluent
[ "def", "_compile_aggregation_expression", "(", "self", ",", "expr", ":", "Expression", ",", "scope", ":", "Dict", "[", "str", ",", "TensorFluent", "]", ",", "batch_size", ":", "Optional", "[", "int", "]", "=", "None", ",", "noise", ":", "Optional", "[", ...
Compile an aggregation expression `expr` into a TensorFluent in the given `scope` with optional batch size. Args: expr (:obj:`rddl2tf.expr.Expression`): A RDDL aggregation expression. scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope. batch_size (Optional[size]): The batch size. Returns: :obj:`rddl2tf.fluent.TensorFluent`: The compiled expression as a TensorFluent.
[ "Compile", "an", "aggregation", "expression", "expr", "into", "a", "TensorFluent", "in", "the", "given", "scope", "with", "optional", "batch", "size", "." ]
f7c03d3a74d2663807c1e23e04eeed2e85166b71
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L965-L1006
train
52,487
yymao/easyquery
easyquery.py
Query.variable_names
def variable_names(self): """ Get all variable names required for this query """ if self._variable_names is None: if self._operator is None: if self._operands is None: self._variable_names = tuple() else: self._variable_names = self._get_variable_names(self._operands) elif self._operator == 'NOT': self._variable_names = self._operands.variable_names else: v = list() for op in self._operands: v.extend(op.variable_names) self._variable_names = tuple(set(v)) return self._variable_names
python
def variable_names(self): """ Get all variable names required for this query """ if self._variable_names is None: if self._operator is None: if self._operands is None: self._variable_names = tuple() else: self._variable_names = self._get_variable_names(self._operands) elif self._operator == 'NOT': self._variable_names = self._operands.variable_names else: v = list() for op in self._operands: v.extend(op.variable_names) self._variable_names = tuple(set(v)) return self._variable_names
[ "def", "variable_names", "(", "self", ")", ":", "if", "self", ".", "_variable_names", "is", "None", ":", "if", "self", ".", "_operator", "is", "None", ":", "if", "self", ".", "_operands", "is", "None", ":", "self", ".", "_variable_names", "=", "tuple", ...
Get all variable names required for this query
[ "Get", "all", "variable", "names", "required", "for", "this", "query" ]
cd94c100e26f59042cd9ffb26d0a7b61cdcd457d
https://github.com/yymao/easyquery/blob/cd94c100e26f59042cd9ffb26d0a7b61cdcd457d/easyquery.py#L313-L334
train
52,488
non-Jedi/gyr
gyr/matrix_objects.py
Event.user
def user(self): """Creates a User object when requested.""" try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
python
def user(self): """Creates a User object when requested.""" try: return self._user except AttributeError: self._user = MatrixUser(self.mxid, self.Api(identity=self.mxid)) return self._user
[ "def", "user", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_user", "except", "AttributeError", ":", "self", ".", "_user", "=", "MatrixUser", "(", "self", ".", "mxid", ",", "self", ".", "Api", "(", "identity", "=", "self", ".", "mxid", ...
Creates a User object when requested.
[ "Creates", "a", "User", "object", "when", "requested", "." ]
9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L49-L55
train
52,489
non-Jedi/gyr
gyr/matrix_objects.py
Event.room
def room(self): """Creates a Room object when requested.""" try: return self._room except AttributeError: room_id = self.json["room_id"] self._room = MatrixRoom(room_id, self.Api) return self._room
python
def room(self): """Creates a Room object when requested.""" try: return self._room except AttributeError: room_id = self.json["room_id"] self._room = MatrixRoom(room_id, self.Api) return self._room
[ "def", "room", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_room", "except", "AttributeError", ":", "room_id", "=", "self", ".", "json", "[", "\"room_id\"", "]", "self", ".", "_room", "=", "MatrixRoom", "(", "room_id", ",", "self", ".", ...
Creates a Room object when requested.
[ "Creates", "a", "Room", "object", "when", "requested", "." ]
9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L58-L65
train
52,490
non-Jedi/gyr
gyr/matrix_objects.py
MatrixUser.join
def join(self, room_str): """Joins room id or alias even if it must first be created.""" response = self.user_api.join_room(room_str) return self._mkroom(response["room_id"])
python
def join(self, room_str): """Joins room id or alias even if it must first be created.""" response = self.user_api.join_room(room_str) return self._mkroom(response["room_id"])
[ "def", "join", "(", "self", ",", "room_str", ")", ":", "response", "=", "self", ".", "user_api", ".", "join_room", "(", "room_str", ")", "return", "self", ".", "_mkroom", "(", "response", "[", "\"room_id\"", "]", ")" ]
Joins room id or alias even if it must first be created.
[ "Joins", "room", "id", "or", "alias", "even", "if", "it", "must", "first", "be", "created", "." ]
9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e
https://github.com/non-Jedi/gyr/blob/9f7bfe033b9d3bbfd3a9e8aea02e35526b53125e/gyr/matrix_objects.py#L234-L237
train
52,491
alfredodeza/notario
notario/decorators.py
optional
def optional(_object): """ This decorator has a double functionality, it can wrap validators and make them optional or it can wrap keys and make that entry optional. **Optional Validator:** Allows to have validators work only when there is a value that contains some data, otherwise it will just not pass the information to the actual validator and will not fail as a result. As any normal decorator, it can be used corectly with the decorator syntax or in the actual schema. This is how it would look in a schema:: ('key', optional(my_validator)) Where ``my_validator`` can be any validator that accepts a single argument. In case a class based validator is being used (like the ``recursive`` or ``iterables`` then it would look like:: ('key', optional(class_validator(('key', 'value')))) Of course, the schema should vary depending on your needs, it is just the way of constructing the validator call that should be important. **Optional Keys:** Sometimes a given data structure may present optional entries. For example this data:: data = {'required': 1, 'optional': 2} To represent this, you will need to declare the `optional` key in the schema but by wrapping the key with this decorator you will basically tell the validation engine that if that key is present it should be validated, otherwise, it should be skipped. This is how the schema would look:: schema = (('required', 1), (optional('optional'), 1)) The above schema would allow data that is missing the ``optional`` key. The data below would pass validation without any issues:: data = {'required': 1} """ if is_callable(_object): validator = _object @wraps(validator) def decorated(value): if value: return validator(value) return return decorated else: def optional(*args): return _object optional.is_optional = True optional._object = _object return optional
python
def optional(_object): """ This decorator has a double functionality, it can wrap validators and make them optional or it can wrap keys and make that entry optional. **Optional Validator:** Allows to have validators work only when there is a value that contains some data, otherwise it will just not pass the information to the actual validator and will not fail as a result. As any normal decorator, it can be used corectly with the decorator syntax or in the actual schema. This is how it would look in a schema:: ('key', optional(my_validator)) Where ``my_validator`` can be any validator that accepts a single argument. In case a class based validator is being used (like the ``recursive`` or ``iterables`` then it would look like:: ('key', optional(class_validator(('key', 'value')))) Of course, the schema should vary depending on your needs, it is just the way of constructing the validator call that should be important. **Optional Keys:** Sometimes a given data structure may present optional entries. For example this data:: data = {'required': 1, 'optional': 2} To represent this, you will need to declare the `optional` key in the schema but by wrapping the key with this decorator you will basically tell the validation engine that if that key is present it should be validated, otherwise, it should be skipped. This is how the schema would look:: schema = (('required', 1), (optional('optional'), 1)) The above schema would allow data that is missing the ``optional`` key. The data below would pass validation without any issues:: data = {'required': 1} """ if is_callable(_object): validator = _object @wraps(validator) def decorated(value): if value: return validator(value) return return decorated else: def optional(*args): return _object optional.is_optional = True optional._object = _object return optional
[ "def", "optional", "(", "_object", ")", ":", "if", "is_callable", "(", "_object", ")", ":", "validator", "=", "_object", "@", "wraps", "(", "validator", ")", "def", "decorated", "(", "value", ")", ":", "if", "value", ":", "return", "validator", "(", "v...
This decorator has a double functionality, it can wrap validators and make them optional or it can wrap keys and make that entry optional. **Optional Validator:** Allows to have validators work only when there is a value that contains some data, otherwise it will just not pass the information to the actual validator and will not fail as a result. As any normal decorator, it can be used corectly with the decorator syntax or in the actual schema. This is how it would look in a schema:: ('key', optional(my_validator)) Where ``my_validator`` can be any validator that accepts a single argument. In case a class based validator is being used (like the ``recursive`` or ``iterables`` then it would look like:: ('key', optional(class_validator(('key', 'value')))) Of course, the schema should vary depending on your needs, it is just the way of constructing the validator call that should be important. **Optional Keys:** Sometimes a given data structure may present optional entries. For example this data:: data = {'required': 1, 'optional': 2} To represent this, you will need to declare the `optional` key in the schema but by wrapping the key with this decorator you will basically tell the validation engine that if that key is present it should be validated, otherwise, it should be skipped. This is how the schema would look:: schema = (('required', 1), (optional('optional'), 1)) The above schema would allow data that is missing the ``optional`` key. The data below would pass validation without any issues:: data = {'required': 1}
[ "This", "decorator", "has", "a", "double", "functionality", "it", "can", "wrap", "validators", "and", "make", "them", "optional", "or", "it", "can", "wrap", "keys", "and", "make", "that", "entry", "optional", "." ]
d5dc2edfcb75d9291ced3f2551f368c35dd31475
https://github.com/alfredodeza/notario/blob/d5dc2edfcb75d9291ced3f2551f368c35dd31475/notario/decorators.py#L95-L156
train
52,492
RowleyGroup/pyqueue
pyqueue/job.py
Job.set_walltime
def set_walltime(self, walltime): """ Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self """ if not isinstance(walltime, timedelta): raise TypeError( 'walltime must be an instance of datetime.timedelta. %s given' % type(walltime) ) self._options['walltime'] = walltime return self
python
def set_walltime(self, walltime): """ Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self """ if not isinstance(walltime, timedelta): raise TypeError( 'walltime must be an instance of datetime.timedelta. %s given' % type(walltime) ) self._options['walltime'] = walltime return self
[ "def", "set_walltime", "(", "self", ",", "walltime", ")", ":", "if", "not", "isinstance", "(", "walltime", ",", "timedelta", ")", ":", "raise", "TypeError", "(", "'walltime must be an instance of datetime.timedelta. %s given'", "%", "type", "(", "walltime", ")", "...
Setting a walltime for the job >>> job.set_walltime(datetime.timedelta(hours=2, minutes=30)) :param walltime: Walltime of the job (an instance of timedelta) :returns: self :rtype: self
[ "Setting", "a", "walltime", "for", "the", "job" ]
24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f
https://github.com/RowleyGroup/pyqueue/blob/24de6e1b06b9626ed94d0d5a859bc71bd3afbb4f/pyqueue/job.py#L155-L172
train
52,493
ocaballeror/LyricFetch
lyricfetch/scraping.py
get_url
def get_url(url, parser='html'): """ Requests the specified url and returns a BeautifulSoup object with its contents. """ url = request.quote(url, safe=':/?=&') logger.debug('URL: %s', url) req = request.Request(url, headers={'User-Agent': 'foobar'}) try: response = request.urlopen(req) except HTTPError: raise except (ssl.SSLError, URLError): # Some websites (like metal-archives) use older TLS versions and can # make the ssl module trow a VERSION_TOO_LOW error. Here we try to use # the older TLSv1 to see if we can fix that context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) response = request.urlopen(req, context=context) response = response.read() if parser == 'html': return BeautifulSoup(response, 'html.parser', from_encoding='utf-8') elif parser == 'json': return json.loads(response) elif parser == 'raw': return response.decode() raise ValueError('Unrecognized parser')
python
def get_url(url, parser='html'): """ Requests the specified url and returns a BeautifulSoup object with its contents. """ url = request.quote(url, safe=':/?=&') logger.debug('URL: %s', url) req = request.Request(url, headers={'User-Agent': 'foobar'}) try: response = request.urlopen(req) except HTTPError: raise except (ssl.SSLError, URLError): # Some websites (like metal-archives) use older TLS versions and can # make the ssl module trow a VERSION_TOO_LOW error. Here we try to use # the older TLSv1 to see if we can fix that context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) response = request.urlopen(req, context=context) response = response.read() if parser == 'html': return BeautifulSoup(response, 'html.parser', from_encoding='utf-8') elif parser == 'json': return json.loads(response) elif parser == 'raw': return response.decode() raise ValueError('Unrecognized parser')
[ "def", "get_url", "(", "url", ",", "parser", "=", "'html'", ")", ":", "url", "=", "request", ".", "quote", "(", "url", ",", "safe", "=", "':/?=&'", ")", "logger", ".", "debug", "(", "'URL: %s'", ",", "url", ")", "req", "=", "request", ".", "Request...
Requests the specified url and returns a BeautifulSoup object with its contents.
[ "Requests", "the", "specified", "url", "and", "returns", "a", "BeautifulSoup", "object", "with", "its", "contents", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L17-L43
train
52,494
ocaballeror/LyricFetch
lyricfetch/scraping.py
get_lastfm
def get_lastfm(method, lastfm_key='', **kwargs): """ Request the specified method from the lastfm api. """ if not lastfm_key: if 'lastfm_key' not in CONFIG or not CONFIG['lastfm_key']: logger.warning('No lastfm key configured') return '' else: lastfm_key = CONFIG['lastfm_key'] url = 'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json' url = url.format(method, lastfm_key) for key in kwargs: url += '&{}={}'.format(key, kwargs[key]) response = get_url(url, parser='json') if 'error' in response: logger.error('Error number %d in lastfm query: %s', response['error'], response['message']) return '' return response
python
def get_lastfm(method, lastfm_key='', **kwargs): """ Request the specified method from the lastfm api. """ if not lastfm_key: if 'lastfm_key' not in CONFIG or not CONFIG['lastfm_key']: logger.warning('No lastfm key configured') return '' else: lastfm_key = CONFIG['lastfm_key'] url = 'http://ws.audioscrobbler.com/2.0/?method={}&api_key={}&format=json' url = url.format(method, lastfm_key) for key in kwargs: url += '&{}={}'.format(key, kwargs[key]) response = get_url(url, parser='json') if 'error' in response: logger.error('Error number %d in lastfm query: %s', response['error'], response['message']) return '' return response
[ "def", "get_lastfm", "(", "method", ",", "lastfm_key", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "not", "lastfm_key", ":", "if", "'lastfm_key'", "not", "in", "CONFIG", "or", "not", "CONFIG", "[", "'lastfm_key'", "]", ":", "logger", ".", "warn...
Request the specified method from the lastfm api.
[ "Request", "the", "specified", "method", "from", "the", "lastfm", "api", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L46-L68
train
52,495
ocaballeror/LyricFetch
lyricfetch/scraping.py
metrolyrics
def metrolyrics(song): """ Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found. """ translate = {URLESCAPE: '', ' ': '-'} title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) url = 'http://www.metrolyrics.com/{}-lyrics-{}.html'.format(title, artist) soup = get_url(url) body = soup.find(id='lyrics-body-text') if body is None: return '' text = '' verses = body.find_all('p') for verse in verses: text += verse.get_text().strip() text += '\n\n' return text.strip()
python
def metrolyrics(song): """ Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found. """ translate = {URLESCAPE: '', ' ': '-'} title = song.title.lower() title = normalize(title, translate) title = re.sub(r'\-{2,}', '-', title) artist = song.artist.lower() artist = normalize(artist, translate) artist = re.sub(r'\-{2,}', '-', artist) url = 'http://www.metrolyrics.com/{}-lyrics-{}.html'.format(title, artist) soup = get_url(url) body = soup.find(id='lyrics-body-text') if body is None: return '' text = '' verses = body.find_all('p') for verse in verses: text += verse.get_text().strip() text += '\n\n' return text.strip()
[ "def", "metrolyrics", "(", "song", ")", ":", "translate", "=", "{", "URLESCAPE", ":", "''", ",", "' '", ":", "'-'", "}", "title", "=", "song", ".", "title", ".", "lower", "(", ")", "title", "=", "normalize", "(", "title", ",", "translate", ")", "ti...
Returns the lyrics found in metrolyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "metrolyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L106-L131
train
52,496
ocaballeror/LyricFetch
lyricfetch/scraping.py
darklyrics
def darklyrics(song): """ Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found. """ # Darklyrics relies on the album name if not hasattr(song, 'album') or not song.album: song.fetch_album_name() if not hasattr(song, 'album') or not song.album: # If we don't have the name of the album, there's nothing we can do # on darklyrics return '' artist = song.artist.lower() artist = normalize(artist, URLESCAPES, '') album = song.album.lower() album = normalize(album, URLESCAPES, '') title = song.title url = 'http://www.darklyrics.com/lyrics/{}/{}.html'.format(artist, album) soup = get_url(url) text = '' for header in soup.find_all('h3'): song = str(header.get_text()) next_sibling = header.next_sibling if song.lower().find(title.lower()) != -1: while next_sibling is not None and\ (next_sibling.name is None or next_sibling.name != 'h3'): if next_sibling.name is None: text += str(next_sibling) next_sibling = next_sibling.next_sibling return text.strip()
python
def darklyrics(song): """ Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found. """ # Darklyrics relies on the album name if not hasattr(song, 'album') or not song.album: song.fetch_album_name() if not hasattr(song, 'album') or not song.album: # If we don't have the name of the album, there's nothing we can do # on darklyrics return '' artist = song.artist.lower() artist = normalize(artist, URLESCAPES, '') album = song.album.lower() album = normalize(album, URLESCAPES, '') title = song.title url = 'http://www.darklyrics.com/lyrics/{}/{}.html'.format(artist, album) soup = get_url(url) text = '' for header in soup.find_all('h3'): song = str(header.get_text()) next_sibling = header.next_sibling if song.lower().find(title.lower()) != -1: while next_sibling is not None and\ (next_sibling.name is None or next_sibling.name != 'h3'): if next_sibling.name is None: text += str(next_sibling) next_sibling = next_sibling.next_sibling return text.strip()
[ "def", "darklyrics", "(", "song", ")", ":", "# Darklyrics relies on the album name", "if", "not", "hasattr", "(", "song", ",", "'album'", ")", "or", "not", "song", ".", "album", ":", "song", ".", "fetch_album_name", "(", ")", "if", "not", "hasattr", "(", "...
Returns the lyrics found in darklyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "darklyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L134-L167
train
52,497
ocaballeror/LyricFetch
lyricfetch/scraping.py
azlyrics
def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
python
def azlyrics(song): """ Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found. """ artist = song.artist.lower() if artist[0:2] == 'a ': artist = artist[2:] artist = normalize(artist, URLESCAPES, '') title = song.title.lower() title = normalize(title, URLESCAPES, '') url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist, title) soup = get_url(url) body = soup.find_all('div', class_='')[-1] return body.get_text().strip()
[ "def", "azlyrics", "(", "song", ")", ":", "artist", "=", "song", ".", "artist", ".", "lower", "(", ")", "if", "artist", "[", "0", ":", "2", "]", "==", "'a '", ":", "artist", "=", "artist", "[", "2", ":", "]", "artist", "=", "normalize", "(", "a...
Returns the lyrics found in azlyrics for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "azlyrics", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L170-L185
train
52,498
ocaballeror/LyricFetch
lyricfetch/scraping.py
metalarchives
def metalarchives(song): """ Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found. """ artist = normalize(song.artist) title = normalize(song.title) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url(url, parser='json') if not soup: return '' song_id_re = re.compile(r'lyricsLink_([0-9]*)') ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub) if not ids: return '' if None in ids: ids.remove(None) ids = map(lambda a: a.group(1), ids) for song_id in ids: url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url(url.format(song_id), parser='html') lyrics = lyrics.get_text().strip() if not re.search('lyrics not available', lyrics): return lyrics return ''
python
def metalarchives(song): """ Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found. """ artist = normalize(song.artist) title = normalize(song.title) url = 'https://www.metal-archives.com/search/ajax-advanced/searching/songs' url += f'/?songTitle={title}&bandName={artist}&ExactBandMatch=1' soup = get_url(url, parser='json') if not soup: return '' song_id_re = re.compile(r'lyricsLink_([0-9]*)') ids = set(re.search(song_id_re, a) for sub in soup['aaData'] for a in sub) if not ids: return '' if None in ids: ids.remove(None) ids = map(lambda a: a.group(1), ids) for song_id in ids: url = 'https://www.metal-archives.com/release/ajax-view-lyrics/id/{}' lyrics = get_url(url.format(song_id), parser='html') lyrics = lyrics.get_text().strip() if not re.search('lyrics not available', lyrics): return lyrics return ''
[ "def", "metalarchives", "(", "song", ")", ":", "artist", "=", "normalize", "(", "song", ".", "artist", ")", "title", "=", "normalize", "(", "song", ".", "title", ")", "url", "=", "'https://www.metal-archives.com/search/ajax-advanced/searching/songs'", "url", "+=",...
Returns the lyrics found in MetalArchives for the specified mp3 file or an empty string if not found.
[ "Returns", "the", "lyrics", "found", "in", "MetalArchives", "for", "the", "specified", "mp3", "file", "or", "an", "empty", "string", "if", "not", "found", "." ]
86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb
https://github.com/ocaballeror/LyricFetch/blob/86e62fb39c4c413ad7e1acf5bf0d28c9ed7c8fcb/lyricfetch/scraping.py#L215-L244
train
52,499