repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
cloudboss/friend
friend/utils.py
retry_bool
def retry_bool(callback, times=3, cap=120000): """ Retry a callback function if it returns False. :param function callback: The function to call :keyword int times: Number of times to retry on initial failure :keyword int cap: Maximum wait time in milliseconds :returns: The return value of the callback :rtype: bool """ for attempt in range(times + 1): if attempt > 0: time.sleep(retry_wait_time(attempt, cap) / 1000.0) ret = callback() if ret or attempt == times: break return ret
python
def retry_bool(callback, times=3, cap=120000): """ Retry a callback function if it returns False. :param function callback: The function to call :keyword int times: Number of times to retry on initial failure :keyword int cap: Maximum wait time in milliseconds :returns: The return value of the callback :rtype: bool """ for attempt in range(times + 1): if attempt > 0: time.sleep(retry_wait_time(attempt, cap) / 1000.0) ret = callback() if ret or attempt == times: break return ret
[ "def", "retry_bool", "(", "callback", ",", "times", "=", "3", ",", "cap", "=", "120000", ")", ":", "for", "attempt", "in", "range", "(", "times", "+", "1", ")", ":", "if", "attempt", ">", "0", ":", "time", ".", "sleep", "(", "retry_wait_time", "(",...
Retry a callback function if it returns False. :param function callback: The function to call :keyword int times: Number of times to retry on initial failure :keyword int cap: Maximum wait time in milliseconds :returns: The return value of the callback :rtype: bool
[ "Retry", "a", "callback", "function", "if", "it", "returns", "False", "." ]
train
https://github.com/cloudboss/friend/blob/3357e6ec849552e3ae9ed28017ff0926e4006e4e/friend/utils.py#L96-L112
cloudboss/friend
friend/utils.py
retryable
def retryable(retryer=retry_ex, times=3, cap=120000): """ A decorator to make a function retry. By default the retry occurs when an exception is thrown, but this may be changed by modifying the ``retryer`` argument. See also :py:func:`retry_ex` and :py:func:`retry_bool`. By default :py:func:`retry_ex` is used as the retry function. Note that the decorator must be called even if not given keyword arguments. :param function retryer: A function to handle retries :param int times: Number of times to retry on initial failure :param int cap: Maximum wait time in milliseconds :Example: :: @retryable() def can_fail(): .... @retryable(retryer=retry_bool, times=10) def can_fail_bool(): .... """ def _retryable(func): @f.wraps(func) def wrapper(*args, **kwargs): return retryer(lambda: func(*args, **kwargs), times, cap) return wrapper return _retryable
python
def retryable(retryer=retry_ex, times=3, cap=120000): """ A decorator to make a function retry. By default the retry occurs when an exception is thrown, but this may be changed by modifying the ``retryer`` argument. See also :py:func:`retry_ex` and :py:func:`retry_bool`. By default :py:func:`retry_ex` is used as the retry function. Note that the decorator must be called even if not given keyword arguments. :param function retryer: A function to handle retries :param int times: Number of times to retry on initial failure :param int cap: Maximum wait time in milliseconds :Example: :: @retryable() def can_fail(): .... @retryable(retryer=retry_bool, times=10) def can_fail_bool(): .... """ def _retryable(func): @f.wraps(func) def wrapper(*args, **kwargs): return retryer(lambda: func(*args, **kwargs), times, cap) return wrapper return _retryable
[ "def", "retryable", "(", "retryer", "=", "retry_ex", ",", "times", "=", "3", ",", "cap", "=", "120000", ")", ":", "def", "_retryable", "(", "func", ")", ":", "@", "f", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", ...
A decorator to make a function retry. By default the retry occurs when an exception is thrown, but this may be changed by modifying the ``retryer`` argument. See also :py:func:`retry_ex` and :py:func:`retry_bool`. By default :py:func:`retry_ex` is used as the retry function. Note that the decorator must be called even if not given keyword arguments. :param function retryer: A function to handle retries :param int times: Number of times to retry on initial failure :param int cap: Maximum wait time in milliseconds :Example: :: @retryable() def can_fail(): .... @retryable(retryer=retry_bool, times=10) def can_fail_bool(): ....
[ "A", "decorator", "to", "make", "a", "function", "retry", ".", "By", "default", "the", "retry", "occurs", "when", "an", "exception", "is", "thrown", "but", "this", "may", "be", "changed", "by", "modifying", "the", "retryer", "argument", "." ]
train
https://github.com/cloudboss/friend/blob/3357e6ec849552e3ae9ed28017ff0926e4006e4e/friend/utils.py#L115-L148
cloudboss/friend
friend/utils.py
ensure_environment
def ensure_environment(variables): """ Check os.environ to ensure that a given collection of variables has been set. :param variables: A collection of environment variable names :returns: os.environ :raises IncompleteEnvironment: if any variables are not set, with the exception's ``variables`` attribute populated with the missing variables """ missing = [v for v in variables if v not in os.environ] if missing: formatted = ', '.join(missing) message = 'Environment variables not set: {}'.format(formatted) raise IncompleteEnvironment(message, missing) return os.environ
python
def ensure_environment(variables): """ Check os.environ to ensure that a given collection of variables has been set. :param variables: A collection of environment variable names :returns: os.environ :raises IncompleteEnvironment: if any variables are not set, with the exception's ``variables`` attribute populated with the missing variables """ missing = [v for v in variables if v not in os.environ] if missing: formatted = ', '.join(missing) message = 'Environment variables not set: {}'.format(formatted) raise IncompleteEnvironment(message, missing) return os.environ
[ "def", "ensure_environment", "(", "variables", ")", ":", "missing", "=", "[", "v", "for", "v", "in", "variables", "if", "v", "not", "in", "os", ".", "environ", "]", "if", "missing", ":", "formatted", "=", "', '", ".", "join", "(", "missing", ")", "me...
Check os.environ to ensure that a given collection of variables has been set. :param variables: A collection of environment variable names :returns: os.environ :raises IncompleteEnvironment: if any variables are not set, with the exception's ``variables`` attribute populated with the missing variables
[ "Check", "os", ".", "environ", "to", "ensure", "that", "a", "given", "collection", "of", "variables", "has", "been", "set", "." ]
train
https://github.com/cloudboss/friend/blob/3357e6ec849552e3ae9ed28017ff0926e4006e4e/friend/utils.py#L151-L167
BD2KOnFHIR/i2b2model
i2b2model/sqlsupport/i2b2tables.py
change_column_length
def change_column_length(table: Table, column: Column, length: int, engine: Engine) -> None: """ Change the column length in the supplied table """ if column.type.length < length: print("Changing length of {} from {} to {}".format(column, column.type.length, length)) column.type.length = length column_name = column.name column_type = column.type.compile(engine.dialect) engine.execute('ALTER TABLE {table} ALTER COLUMN {column_name} TYPE {column_type}'.format(**locals()))
python
def change_column_length(table: Table, column: Column, length: int, engine: Engine) -> None: """ Change the column length in the supplied table """ if column.type.length < length: print("Changing length of {} from {} to {}".format(column, column.type.length, length)) column.type.length = length column_name = column.name column_type = column.type.compile(engine.dialect) engine.execute('ALTER TABLE {table} ALTER COLUMN {column_name} TYPE {column_type}'.format(**locals()))
[ "def", "change_column_length", "(", "table", ":", "Table", ",", "column", ":", "Column", ",", "length", ":", "int", ",", "engine", ":", "Engine", ")", "->", "None", ":", "if", "column", ".", "type", ".", "length", "<", "length", ":", "print", "(", "\...
Change the column length in the supplied table
[ "Change", "the", "column", "length", "in", "the", "supplied", "table" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/sqlsupport/i2b2tables.py#L72-L80
BD2KOnFHIR/i2b2model
i2b2model/sqlsupport/i2b2tables.py
I2B2Tables._db_urls
def _db_urls(opts: Namespace) -> Tuple[str, str]: """ Return the crc and ontology db urls :param opts: options :return: Tuple w/ crc and ontology url """ return opts.crcdb.replace("//", "//{crcuser}:{crcpassword}@".format(**opts.__dict__)),\ opts.ontodb.replace("//", "//{ontouser}:{ontopassword}@".format(**opts.__dict__))
python
def _db_urls(opts: Namespace) -> Tuple[str, str]: """ Return the crc and ontology db urls :param opts: options :return: Tuple w/ crc and ontology url """ return opts.crcdb.replace("//", "//{crcuser}:{crcpassword}@".format(**opts.__dict__)),\ opts.ontodb.replace("//", "//{ontouser}:{ontopassword}@".format(**opts.__dict__))
[ "def", "_db_urls", "(", "opts", ":", "Namespace", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "return", "opts", ".", "crcdb", ".", "replace", "(", "\"//\"", ",", "\"//{crcuser}:{crcpassword}@\"", ".", "format", "(", "*", "*", "opts", ".", "__...
Return the crc and ontology db urls :param opts: options :return: Tuple w/ crc and ontology url
[ "Return", "the", "crc", "and", "ontology", "db", "urls", ":", "param", "opts", ":", "options", ":", "return", ":", "Tuple", "w", "/", "crc", "and", "ontology", "url" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/sqlsupport/i2b2tables.py#L55-L62
BD2KOnFHIR/i2b2model
i2b2model/sqlsupport/i2b2tables.py
I2B2Tables._tables
def _tables(self) -> List[Tuple[str, str]]: """ Return a list of all known tables and and its full URI :return: table name and full URI """ return [(k.rsplit('.', 1)[1] if '.' in k else k, k) for k in self._ont_tables.keys()]
python
def _tables(self) -> List[Tuple[str, str]]: """ Return a list of all known tables and and its full URI :return: table name and full URI """ return [(k.rsplit('.', 1)[1] if '.' in k else k, k) for k in self._ont_tables.keys()]
[ "def", "_tables", "(", "self", ")", "->", "List", "[", "Tuple", "[", "str", ",", "str", "]", "]", ":", "return", "[", "(", "k", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "1", "]", "if", "'.'", "in", "k", "else", "k", ",", "k", ")", "...
Return a list of all known tables and and its full URI :return: table name and full URI
[ "Return", "a", "list", "of", "all", "known", "tables", "and", "and", "its", "full", "URI", ":", "return", ":", "table", "name", "and", "full", "URI" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/sqlsupport/i2b2tables.py#L64-L69
adammhaile/gitdata
gitdata/assertion.py
isdir
def isdir(path, message): """ Raise an exception if the given directory does not exist. :param path: The path to a directory to be tested :param message: A custom message to report in the exception :raises: FileNotFoundError """ if not os.path.isdir(path): raise FileNotFoundError( errno.ENOENT, "{}: {}".format(message, os.strerror(errno.ENOENT)), path)
python
def isdir(path, message): """ Raise an exception if the given directory does not exist. :param path: The path to a directory to be tested :param message: A custom message to report in the exception :raises: FileNotFoundError """ if not os.path.isdir(path): raise FileNotFoundError( errno.ENOENT, "{}: {}".format(message, os.strerror(errno.ENOENT)), path)
[ "def", "isdir", "(", "path", ",", "message", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "raise", "FileNotFoundError", "(", "errno", ".", "ENOENT", ",", "\"{}: {}\"", ".", "format", "(", "message", ",", "os", ".", ...
Raise an exception if the given directory does not exist. :param path: The path to a directory to be tested :param message: A custom message to report in the exception :raises: FileNotFoundError
[ "Raise", "an", "exception", "if", "the", "given", "directory", "does", "not", "exist", "." ]
train
https://github.com/adammhaile/gitdata/blob/93112899737d63855655d438e3027192abd76a37/gitdata/assertion.py#L24-L36
cohorte/cohorte-herald
python/herald/rshell.py
_HeraldOutputStream.flush
def flush(self): """ Sends buffered data to the target """ # Flush buffer line = self._buffer.getvalue() self._buffer = StringIO() # Send the message content = {"session_id": self._session, "text": line} self._herald.fire(self._peer, beans.Message(MSG_CLIENT_PRINT, content))
python
def flush(self): """ Sends buffered data to the target """ # Flush buffer line = self._buffer.getvalue() self._buffer = StringIO() # Send the message content = {"session_id": self._session, "text": line} self._herald.fire(self._peer, beans.Message(MSG_CLIENT_PRINT, content))
[ "def", "flush", "(", "self", ")", ":", "# Flush buffer", "line", "=", "self", ".", "_buffer", ".", "getvalue", "(", ")", "self", ".", "_buffer", "=", "StringIO", "(", ")", "# Send the message", "content", "=", "{", "\"session_id\"", ":", "self", ".", "_s...
Sends buffered data to the target
[ "Sends", "buffered", "data", "to", "the", "target" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/rshell.py#L337-L347
cohorte/cohorte-herald
python/herald/rshell.py
_HeraldInputStream.readline
def readline(self): """ Waits for a line from the Herald client """ content = {"session_id": self._session} prompt_msg = self._herald.send( self._peer, beans.Message(MSG_CLIENT_PROMPT, content)) if prompt_msg.subject == MSG_SERVER_CLOSE: # Client closed its shell raise EOFError return prompt_msg.content
python
def readline(self): """ Waits for a line from the Herald client """ content = {"session_id": self._session} prompt_msg = self._herald.send( self._peer, beans.Message(MSG_CLIENT_PROMPT, content)) if prompt_msg.subject == MSG_SERVER_CLOSE: # Client closed its shell raise EOFError return prompt_msg.content
[ "def", "readline", "(", "self", ")", ":", "content", "=", "{", "\"session_id\"", ":", "self", ".", "_session", "}", "prompt_msg", "=", "self", ".", "_herald", ".", "send", "(", "self", ".", "_peer", ",", "beans", ".", "Message", "(", "MSG_CLIENT_PROMPT",...
Waits for a line from the Herald client
[ "Waits", "for", "a", "line", "from", "the", "Herald", "client" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/rshell.py#L366-L377
chrlie/frogsay
src/frogsay/speech.py
make_frog_fresco
def make_frog_fresco(text, width, padding=8): """\ Formats your lovely text into a speech bubble spouted by this adorable little frog. """ stem = r' /' frog = r""" {text} {stem} @..@ (----) ( >__< ) ^^ ~~ ^^""" offset = len(stem) - 1 formatted_indent = ' ' * offset formatted_text = textwrap.fill(text, width=width-padding, initial_indent=formatted_indent, subsequent_indent=formatted_indent) return frog.format(stem=stem, text=formatted_text)
python
def make_frog_fresco(text, width, padding=8): """\ Formats your lovely text into a speech bubble spouted by this adorable little frog. """ stem = r' /' frog = r""" {text} {stem} @..@ (----) ( >__< ) ^^ ~~ ^^""" offset = len(stem) - 1 formatted_indent = ' ' * offset formatted_text = textwrap.fill(text, width=width-padding, initial_indent=formatted_indent, subsequent_indent=formatted_indent) return frog.format(stem=stem, text=formatted_text)
[ "def", "make_frog_fresco", "(", "text", ",", "width", ",", "padding", "=", "8", ")", ":", "stem", "=", "r' /'", "frog", "=", "r\"\"\"\n{text}\n{stem}\n @..@\n (----)\n( >__< )\n^^ ~~ ^^\"\"\"", "offset", "=", "len", "(", "stem", ")", "-", "1", "formatted_i...
\ Formats your lovely text into a speech bubble spouted by this adorable little frog.
[ "\\", "Formats", "your", "lovely", "text", "into", "a", "speech", "bubble", "spouted", "by", "this", "adorable", "little", "frog", "." ]
train
https://github.com/chrlie/frogsay/blob/1c21e1401dc24719732218af830d34b842ab10b9/src/frogsay/speech.py#L5-L24
azraq27/neural
neural/freesurfer.py
mgz_to_nifti
def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
python
def mgz_to_nifti(filename,prefix=None,gzip=True): '''Convert ``filename`` to a NIFTI file using ``mri_convert``''' setup_freesurfer() if prefix==None: prefix = nl.prefix(filename) + '.nii' if gzip and not prefix.endswith('.gz'): prefix += '.gz' nl.run([os.path.join(freesurfer_home,'bin','mri_convert'),filename,prefix],products=prefix)
[ "def", "mgz_to_nifti", "(", "filename", ",", "prefix", "=", "None", ",", "gzip", "=", "True", ")", ":", "setup_freesurfer", "(", ")", "if", "prefix", "==", "None", ":", "prefix", "=", "nl", ".", "prefix", "(", "filename", ")", "+", "'.nii'", "if", "g...
Convert ``filename`` to a NIFTI file using ``mri_convert``
[ "Convert", "filename", "to", "a", "NIFTI", "file", "using", "mri_convert" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/freesurfer.py#L34-L41
azraq27/neural
neural/freesurfer.py
guess_home
def guess_home(): '''If ``freesurfer_home`` is not set, try to make an intelligent guess at it''' global freesurfer_home if freesurfer_home != None: return True # if we already have it in the path, use that fv = nl.which('freeview') if fv: freesurfer_home = parpar_dir(os.path.realpath(fv)) return True for guess_dir in guess_locations: if os.path.exists(guess_dir): freesurfer_home = guess_dir return True return False
python
def guess_home(): '''If ``freesurfer_home`` is not set, try to make an intelligent guess at it''' global freesurfer_home if freesurfer_home != None: return True # if we already have it in the path, use that fv = nl.which('freeview') if fv: freesurfer_home = parpar_dir(os.path.realpath(fv)) return True for guess_dir in guess_locations: if os.path.exists(guess_dir): freesurfer_home = guess_dir return True return False
[ "def", "guess_home", "(", ")", ":", "global", "freesurfer_home", "if", "freesurfer_home", "!=", "None", ":", "return", "True", "# if we already have it in the path, use that", "fv", "=", "nl", ".", "which", "(", "'freeview'", ")", "if", "fv", ":", "freesurfer_home...
If ``freesurfer_home`` is not set, try to make an intelligent guess at it
[ "If", "freesurfer_home", "is", "not", "set", "try", "to", "make", "an", "intelligent", "guess", "at", "it" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/freesurfer.py#L43-L57
azraq27/neural
neural/freesurfer.py
setup_freesurfer
def setup_freesurfer(): '''Setup the freesurfer environment variables''' guess_home() os.environ['FREESURFER_HOME'] = freesurfer_home os.environ['SUBJECTS_DIR'] = subjects_dir # Run the setup script and collect the output: o = subprocess.check_output(['bash','-c','source %s/SetUpFreeSurfer.sh && env' % freesurfer_home]) env = [(a.partition('=')[0],a.partition('=')[2]) for a in o.split('\n') if len(a.strip())>0] for e in env: os.environ[e[0]] = e[1] environ_setup = True
python
def setup_freesurfer(): '''Setup the freesurfer environment variables''' guess_home() os.environ['FREESURFER_HOME'] = freesurfer_home os.environ['SUBJECTS_DIR'] = subjects_dir # Run the setup script and collect the output: o = subprocess.check_output(['bash','-c','source %s/SetUpFreeSurfer.sh && env' % freesurfer_home]) env = [(a.partition('=')[0],a.partition('=')[2]) for a in o.split('\n') if len(a.strip())>0] for e in env: os.environ[e[0]] = e[1] environ_setup = True
[ "def", "setup_freesurfer", "(", ")", ":", "guess_home", "(", ")", "os", ".", "environ", "[", "'FREESURFER_HOME'", "]", "=", "freesurfer_home", "os", ".", "environ", "[", "'SUBJECTS_DIR'", "]", "=", "subjects_dir", "# Run the setup script and collect the output:", "o...
Setup the freesurfer environment variables
[ "Setup", "the", "freesurfer", "environment", "variables" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/freesurfer.py#L60-L70
azraq27/neural
neural/freesurfer.py
recon_all
def recon_all(subj_id,anatomies): '''Run the ``recon_all`` script''' if not environ_setup: setup_freesurfer() if isinstance(anatomies,basestring): anatomies = [anatomies] nl.run([os.path.join(freesurfer_home,'bin','recon-all'),'-all','-subjid',subj_id] + [['-i',anat] for anat in anatomies])
python
def recon_all(subj_id,anatomies): '''Run the ``recon_all`` script''' if not environ_setup: setup_freesurfer() if isinstance(anatomies,basestring): anatomies = [anatomies] nl.run([os.path.join(freesurfer_home,'bin','recon-all'),'-all','-subjid',subj_id] + [['-i',anat] for anat in anatomies])
[ "def", "recon_all", "(", "subj_id", ",", "anatomies", ")", ":", "if", "not", "environ_setup", ":", "setup_freesurfer", "(", ")", "if", "isinstance", "(", "anatomies", ",", "basestring", ")", ":", "anatomies", "=", "[", "anatomies", "]", "nl", ".", "run", ...
Run the ``recon_all`` script
[ "Run", "the", "recon_all", "script" ]
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/freesurfer.py#L72-L78
mmerickel/subparse
src/subparse/__init__.py
parse_docstring
def parse_docstring(docstring): """ Parse a PEP-257 docstring. SHORT -> blank line -> LONG """ short_desc = long_desc = '' if docstring: docstring = trim(docstring.lstrip('\n')) lines = docstring.split('\n\n', 1) short_desc = lines[0].strip().replace('\n', ' ') if len(lines) > 1: long_desc = lines[1].strip() return short_desc, long_desc
python
def parse_docstring(docstring): """ Parse a PEP-257 docstring. SHORT -> blank line -> LONG """ short_desc = long_desc = '' if docstring: docstring = trim(docstring.lstrip('\n')) lines = docstring.split('\n\n', 1) short_desc = lines[0].strip().replace('\n', ' ') if len(lines) > 1: long_desc = lines[1].strip() return short_desc, long_desc
[ "def", "parse_docstring", "(", "docstring", ")", ":", "short_desc", "=", "long_desc", "=", "''", "if", "docstring", ":", "docstring", "=", "trim", "(", "docstring", ".", "lstrip", "(", "'\\n'", ")", ")", "lines", "=", "docstring", ".", "split", "(", "'\\...
Parse a PEP-257 docstring. SHORT -> blank line -> LONG
[ "Parse", "a", "PEP", "-", "257", "docstring", "." ]
train
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L256-L271
mmerickel/subparse
src/subparse/__init__.py
CLI.add_command
def add_command(self, factory, main, name=None, context_kwargs=None): """ Attach a command directly to the :class:`CLI` object. """ if name is None: name = factory.__name__.replace('_', '-') if context_kwargs is None: context_kwargs = {} short_desc, long_desc = parse_docstring(factory.__doc__) if long_desc: long_desc = short_desc + '\n\n' + long_desc # determine the absolute import string if relative if isinstance(main, str) and ( main.startswith('.') or main.startswith(':') ): module = __import__(factory.__module__, None, None, ['__doc__']) package = package_for_module(module) if main in ['.', ':']: main = package.__name__ else: main = package.__name__ + main self.commands[name] = CommandMeta( factory=factory, main=main, name=name, help=short_desc, description=long_desc, context_kwargs=context_kwargs, )
python
def add_command(self, factory, main, name=None, context_kwargs=None): """ Attach a command directly to the :class:`CLI` object. """ if name is None: name = factory.__name__.replace('_', '-') if context_kwargs is None: context_kwargs = {} short_desc, long_desc = parse_docstring(factory.__doc__) if long_desc: long_desc = short_desc + '\n\n' + long_desc # determine the absolute import string if relative if isinstance(main, str) and ( main.startswith('.') or main.startswith(':') ): module = __import__(factory.__module__, None, None, ['__doc__']) package = package_for_module(module) if main in ['.', ':']: main = package.__name__ else: main = package.__name__ + main self.commands[name] = CommandMeta( factory=factory, main=main, name=name, help=short_desc, description=long_desc, context_kwargs=context_kwargs, )
[ "def", "add_command", "(", "self", ",", "factory", ",", "main", ",", "name", "=", "None", ",", "context_kwargs", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "factory", ".", "__name__", ".", "replace", "(", "'_'", ",", "'-'", ...
Attach a command directly to the :class:`CLI` object.
[ "Attach", "a", "command", "directly", "to", "the", ":", "class", ":", "CLI", "object", "." ]
train
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L68-L101
mmerickel/subparse
src/subparse/__init__.py
CLI.command
def command(self, *args, **kwargs): """ Attach a command to the current :class:`CLI` object. The function should accept an instance of an :class:`argparse.ArgumentParser` and use it to define extra arguments and options. These options will only affect the specified command. """ def wrapper(func): self.add_command(func, *args, **kwargs) return func return wrapper
python
def command(self, *args, **kwargs): """ Attach a command to the current :class:`CLI` object. The function should accept an instance of an :class:`argparse.ArgumentParser` and use it to define extra arguments and options. These options will only affect the specified command. """ def wrapper(func): self.add_command(func, *args, **kwargs) return func return wrapper
[ "def", "command", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "wrapper", "(", "func", ")", ":", "self", ".", "add_command", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func", "return", "wra...
Attach a command to the current :class:`CLI` object. The function should accept an instance of an :class:`argparse.ArgumentParser` and use it to define extra arguments and options. These options will only affect the specified command.
[ "Attach", "a", "command", "to", "the", "current", ":", "class", ":", "CLI", "object", "." ]
train
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L103-L118
mmerickel/subparse
src/subparse/__init__.py
CLI.load_commands
def load_commands(self, obj): """ Load commands defined on an arbitrary object. All functions decorated with the :func:`subparse.command` decorator attached the specified object will be loaded. The object may be a dictionary, an arbitrary python object, or a dotted path. The dotted path may be absolute, or relative to the current package by specifying a leading '.' (e.g. ``'.commands'``). """ if isinstance(obj, str): if obj.startswith('.') or obj.startswith(':'): package = caller_package() if obj in ['.', ':']: obj = package.__name__ else: obj = package.__name__ + obj obj = pkg_resources.EntryPoint.parse('x=%s' % obj).resolve() command.discover_and_call(obj, self.command)
python
def load_commands(self, obj): """ Load commands defined on an arbitrary object. All functions decorated with the :func:`subparse.command` decorator attached the specified object will be loaded. The object may be a dictionary, an arbitrary python object, or a dotted path. The dotted path may be absolute, or relative to the current package by specifying a leading '.' (e.g. ``'.commands'``). """ if isinstance(obj, str): if obj.startswith('.') or obj.startswith(':'): package = caller_package() if obj in ['.', ':']: obj = package.__name__ else: obj = package.__name__ + obj obj = pkg_resources.EntryPoint.parse('x=%s' % obj).resolve() command.discover_and_call(obj, self.command)
[ "def", "load_commands", "(", "self", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "str", ")", ":", "if", "obj", ".", "startswith", "(", "'.'", ")", "or", "obj", ".", "startswith", "(", "':'", ")", ":", "package", "=", "caller_package", ...
Load commands defined on an arbitrary object. All functions decorated with the :func:`subparse.command` decorator attached the specified object will be loaded. The object may be a dictionary, an arbitrary python object, or a dotted path. The dotted path may be absolute, or relative to the current package by specifying a leading '.' (e.g. ``'.commands'``).
[ "Load", "commands", "defined", "on", "an", "arbitrary", "object", "." ]
train
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L120-L140
mmerickel/subparse
src/subparse/__init__.py
CLI.load_commands_from_entry_point
def load_commands_from_entry_point(self, specifier): """ Load commands defined within a pkg_resources entry point. Each entry will be a module that should be searched for functions decorated with the :func:`subparse.command` decorator. This operation is not recursive. """ for ep in pkg_resources.iter_entry_points(specifier): module = ep.load() command.discover_and_call(module, self.command)
python
def load_commands_from_entry_point(self, specifier): """ Load commands defined within a pkg_resources entry point. Each entry will be a module that should be searched for functions decorated with the :func:`subparse.command` decorator. This operation is not recursive. """ for ep in pkg_resources.iter_entry_points(specifier): module = ep.load() command.discover_and_call(module, self.command)
[ "def", "load_commands_from_entry_point", "(", "self", ",", "specifier", ")", ":", "for", "ep", "in", "pkg_resources", ".", "iter_entry_points", "(", "specifier", ")", ":", "module", "=", "ep", ".", "load", "(", ")", "command", ".", "discover_and_call", "(", ...
Load commands defined within a pkg_resources entry point. Each entry will be a module that should be searched for functions decorated with the :func:`subparse.command` decorator. This operation is not recursive.
[ "Load", "commands", "defined", "within", "a", "pkg_resources", "entry", "point", "." ]
train
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L142-L153
mmerickel/subparse
src/subparse/__init__.py
CLI.run
def run(self, argv=None): """ Run the command-line application. This will dispatch to the specified function or raise a ``SystemExit`` and output the appropriate usage information if there is an error parsing the arguments. The default ``argv`` is equivalent to ``sys.argv[1:]``. """ if argv is None: # pragma: no cover argv = sys.argv[1:] argv = [str(v) for v in argv] meta, args = parse_args(self, argv) context_factory = contextmanager(make_generator(self.context_factory)) with context_factory(self, args, **meta.context_kwargs) as context: main = load_main(meta) return main(context, args) or 0
python
def run(self, argv=None): """ Run the command-line application. This will dispatch to the specified function or raise a ``SystemExit`` and output the appropriate usage information if there is an error parsing the arguments. The default ``argv`` is equivalent to ``sys.argv[1:]``. """ if argv is None: # pragma: no cover argv = sys.argv[1:] argv = [str(v) for v in argv] meta, args = parse_args(self, argv) context_factory = contextmanager(make_generator(self.context_factory)) with context_factory(self, args, **meta.context_kwargs) as context: main = load_main(meta) return main(context, args) or 0
[ "def", "run", "(", "self", ",", "argv", "=", "None", ")", ":", "if", "argv", "is", "None", ":", "# pragma: no cover", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "argv", "=", "[", "str", "(", "v", ")", "for", "v", "in", "argv", "]", ...
Run the command-line application. This will dispatch to the specified function or raise a ``SystemExit`` and output the appropriate usage information if there is an error parsing the arguments. The default ``argv`` is equivalent to ``sys.argv[1:]``.
[ "Run", "the", "command", "-", "line", "application", "." ]
train
https://github.com/mmerickel/subparse/blob/a4bb94b709c0776ccf81f6dcb47922fa5910c19f/src/subparse/__init__.py#L155-L173
cohorte/cohorte-herald
python/run_xmpp.py
main
def main(xmpp_server, xmpp_port, peer_name, node_name, app_id, xmpp_jid=None, xmpp_password=None): """ Runs the framework :param xmpp_server: Address of the XMPP server :param xmpp_port: Port of the XMPP server :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID :param xmpp_jid: XMPP JID, None for Anonymous login :param xmpp_password: XMPP account password """ # Create the framework framework = pelix.framework.create_framework( ('pelix.ipopo.core', 'pelix.ipopo.waiting', 'pelix.shell.core', 'pelix.shell.ipopo', 'pelix.shell.console', # Herald core 'herald.core', 'herald.directory', 'herald.shell', # Herald XMPP 'herald.transports.xmpp.directory', 'herald.transports.xmpp.transport', # RPC 'pelix.remote.dispatcher', 'pelix.remote.registry', 'herald.remote.discovery', 'herald.remote.herald_xmlrpc',), {herald.FWPROP_NODE_UID: node_name, herald.FWPROP_NODE_NAME: node_name, herald.FWPROP_PEER_NAME: peer_name, herald.FWPROP_APPLICATION_ID: app_id}) context = framework.get_bundle_context() # Start everything framework.start() # Instantiate components with use_waiting_list(context) as ipopo: # ... XMPP Transport ipopo.add(herald.transports.xmpp.FACTORY_TRANSPORT, "herald-xmpp-transport", {herald.transports.xmpp.PROP_XMPP_SERVER: xmpp_server, herald.transports.xmpp.PROP_XMPP_PORT: xmpp_port, herald.transports.xmpp.PROP_XMPP_JID: xmpp_jid, herald.transports.xmpp.PROP_XMPP_PASSWORD: xmpp_password}) # Start the framework and wait for it to stop framework.wait_for_stop()
python
def main(xmpp_server, xmpp_port, peer_name, node_name, app_id, xmpp_jid=None, xmpp_password=None): """ Runs the framework :param xmpp_server: Address of the XMPP server :param xmpp_port: Port of the XMPP server :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID :param xmpp_jid: XMPP JID, None for Anonymous login :param xmpp_password: XMPP account password """ # Create the framework framework = pelix.framework.create_framework( ('pelix.ipopo.core', 'pelix.ipopo.waiting', 'pelix.shell.core', 'pelix.shell.ipopo', 'pelix.shell.console', # Herald core 'herald.core', 'herald.directory', 'herald.shell', # Herald XMPP 'herald.transports.xmpp.directory', 'herald.transports.xmpp.transport', # RPC 'pelix.remote.dispatcher', 'pelix.remote.registry', 'herald.remote.discovery', 'herald.remote.herald_xmlrpc',), {herald.FWPROP_NODE_UID: node_name, herald.FWPROP_NODE_NAME: node_name, herald.FWPROP_PEER_NAME: peer_name, herald.FWPROP_APPLICATION_ID: app_id}) context = framework.get_bundle_context() # Start everything framework.start() # Instantiate components with use_waiting_list(context) as ipopo: # ... XMPP Transport ipopo.add(herald.transports.xmpp.FACTORY_TRANSPORT, "herald-xmpp-transport", {herald.transports.xmpp.PROP_XMPP_SERVER: xmpp_server, herald.transports.xmpp.PROP_XMPP_PORT: xmpp_port, herald.transports.xmpp.PROP_XMPP_JID: xmpp_jid, herald.transports.xmpp.PROP_XMPP_PASSWORD: xmpp_password}) # Start the framework and wait for it to stop framework.wait_for_stop()
[ "def", "main", "(", "xmpp_server", ",", "xmpp_port", ",", "peer_name", ",", "node_name", ",", "app_id", ",", "xmpp_jid", "=", "None", ",", "xmpp_password", "=", "None", ")", ":", "# Create the framework", "framework", "=", "pelix", ".", "framework", ".", "cr...
Runs the framework :param xmpp_server: Address of the XMPP server :param xmpp_port: Port of the XMPP server :param peer_name: Name of the peer :param node_name: Name (also, UID) of the node hosting the peer :param app_id: Application ID :param xmpp_jid: XMPP JID, None for Anonymous login :param xmpp_password: XMPP account password
[ "Runs", "the", "framework" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/run_xmpp.py#L49-L104
JIC-CSB/jicbioimage.transform
jicbioimage/transform/__init__.py
remove_small_objects
def remove_small_objects(image, min_size=50, connectivity=1): """Remove small objects from an boolean image. :param image: boolean numpy array or :class:`jicbioimage.core.image.Image` :returns: boolean :class:`jicbioimage.core.image.Image` """ return skimage.morphology.remove_small_objects(image, min_size=min_size, connectivity=connectivity)
python
def remove_small_objects(image, min_size=50, connectivity=1): """Remove small objects from an boolean image. :param image: boolean numpy array or :class:`jicbioimage.core.image.Image` :returns: boolean :class:`jicbioimage.core.image.Image` """ return skimage.morphology.remove_small_objects(image, min_size=min_size, connectivity=connectivity)
[ "def", "remove_small_objects", "(", "image", ",", "min_size", "=", "50", ",", "connectivity", "=", "1", ")", ":", "return", "skimage", ".", "morphology", ".", "remove_small_objects", "(", "image", ",", "min_size", "=", "min_size", ",", "connectivity", "=", "...
Remove small objects from an boolean image. :param image: boolean numpy array or :class:`jicbioimage.core.image.Image` :returns: boolean :class:`jicbioimage.core.image.Image`
[ "Remove", "small", "objects", "from", "an", "boolean", "image", "." ]
train
https://github.com/JIC-CSB/jicbioimage.transform/blob/494c282d964c3a9b54c2a1b3730f5625ea2a494b/jicbioimage/transform/__init__.py#L94-L102
JIC-CSB/jicbioimage.transform
jicbioimage/transform/__init__.py
invert
def invert(image): """Return an inverted image of the same dtype. Assumes the full range of the input dtype is in use and that no negative values are present in the input image. :param image: :class:`jicbioimage.core.image.Image` :returns: inverted image of the same dtype as the input """ if image.dtype == bool: return np.logical_not(image) maximum = np.iinfo(image.dtype).max maximum_array = np.ones(image.shape, dtype=image.dtype) * maximum return maximum_array - image
python
def invert(image): """Return an inverted image of the same dtype. Assumes the full range of the input dtype is in use and that no negative values are present in the input image. :param image: :class:`jicbioimage.core.image.Image` :returns: inverted image of the same dtype as the input """ if image.dtype == bool: return np.logical_not(image) maximum = np.iinfo(image.dtype).max maximum_array = np.ones(image.shape, dtype=image.dtype) * maximum return maximum_array - image
[ "def", "invert", "(", "image", ")", ":", "if", "image", ".", "dtype", "==", "bool", ":", "return", "np", ".", "logical_not", "(", "image", ")", "maximum", "=", "np", ".", "iinfo", "(", "image", ".", "dtype", ")", ".", "max", "maximum_array", "=", "...
Return an inverted image of the same dtype. Assumes the full range of the input dtype is in use and that no negative values are present in the input image. :param image: :class:`jicbioimage.core.image.Image` :returns: inverted image of the same dtype as the input
[ "Return", "an", "inverted", "image", "of", "the", "same", "dtype", "." ]
train
https://github.com/JIC-CSB/jicbioimage.transform/blob/494c282d964c3a9b54c2a1b3730f5625ea2a494b/jicbioimage/transform/__init__.py#L106-L119
TheOstrichIO/ostrichlib
ostrich/utils/proc.py
run
def run(*popenargs, **kwargs): """Run command with arguments and return a `CompletedProcess` instance. The returned instance will have attributes args, returncode, stdout and stderr. By default, stdout and stderr are not captured, and those attributes will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. If `check` is True and the exit code was non-zero, it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute, and output & stderr attributes if those streams were captured. If `timeout` is given, and the process takes too long, a TimeoutExpired exception will be raised, if timeout is supported in the underlying Popen implementation (e.g. Python >= 3.2, or an available subprocess32 package). There is an optional argument `input`, allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's `stdin` argument, as it will be used internally. The other arguments are the same as for the Popen constructor. If universal_newlines=True is passed, the `input` argument must be a string and stdout/stderr in the returned object will be strings rather than bytes. """ stdin = kwargs.pop('input', None) timeout = kwargs.pop('timeout', None) check = kwargs.pop('check', False) if stdin is not None: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') kwargs['stdin'] = PIPE process = Popen(*popenargs, **kwargs) try: if __timeout__: stdout, stderr = process.communicate(stdin, timeout=timeout) else: stdout, stderr = process.communicate(stdin) except TimeoutExpired: # this will never happen if __timeout__ is False process.kill() stdout, stderr = process.communicate() # pylint: disable=no-member raise _TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) except: process.kill() process.wait() raise retcode = process.poll() if check and retcode: raise CalledProcessError(retcode, popenargs, output=stdout, stderr=stderr) return CompletedProcess(popenargs, retcode, stdout, stderr)
python
def run(*popenargs, **kwargs): """Run command with arguments and return a `CompletedProcess` instance. The returned instance will have attributes args, returncode, stdout and stderr. By default, stdout and stderr are not captured, and those attributes will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. If `check` is True and the exit code was non-zero, it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute, and output & stderr attributes if those streams were captured. If `timeout` is given, and the process takes too long, a TimeoutExpired exception will be raised, if timeout is supported in the underlying Popen implementation (e.g. Python >= 3.2, or an available subprocess32 package). There is an optional argument `input`, allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's `stdin` argument, as it will be used internally. The other arguments are the same as for the Popen constructor. If universal_newlines=True is passed, the `input` argument must be a string and stdout/stderr in the returned object will be strings rather than bytes. """ stdin = kwargs.pop('input', None) timeout = kwargs.pop('timeout', None) check = kwargs.pop('check', False) if stdin is not None: if 'stdin' in kwargs: raise ValueError('stdin and input arguments may not both be used.') kwargs['stdin'] = PIPE process = Popen(*popenargs, **kwargs) try: if __timeout__: stdout, stderr = process.communicate(stdin, timeout=timeout) else: stdout, stderr = process.communicate(stdin) except TimeoutExpired: # this will never happen if __timeout__ is False process.kill() stdout, stderr = process.communicate() # pylint: disable=no-member raise _TimeoutExpired(process.args, timeout, output=stdout, stderr=stderr) except: process.kill() process.wait() raise retcode = process.poll() if check and retcode: raise CalledProcessError(retcode, popenargs, output=stdout, stderr=stderr) return CompletedProcess(popenargs, retcode, stdout, stderr)
[ "def", "run", "(", "*", "popenargs", ",", "*", "*", "kwargs", ")", ":", "stdin", "=", "kwargs", ".", "pop", "(", "'input'", ",", "None", ")", "timeout", "=", "kwargs", ".", "pop", "(", "'timeout'", ",", "None", ")", "check", "=", "kwargs", ".", "...
Run command with arguments and return a `CompletedProcess` instance. The returned instance will have attributes args, returncode, stdout and stderr. By default, stdout and stderr are not captured, and those attributes will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them. If `check` is True and the exit code was non-zero, it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute, and output & stderr attributes if those streams were captured. If `timeout` is given, and the process takes too long, a TimeoutExpired exception will be raised, if timeout is supported in the underlying Popen implementation (e.g. Python >= 3.2, or an available subprocess32 package). There is an optional argument `input`, allowing you to pass a string to the subprocess's stdin. If you use this argument you may not also use the Popen constructor's `stdin` argument, as it will be used internally. The other arguments are the same as for the Popen constructor. If universal_newlines=True is passed, the `input` argument must be a string and stdout/stderr in the returned object will be strings rather than bytes.
[ "Run", "command", "with", "arguments", "and", "return", "a", "CompletedProcess", "instance", "." ]
train
https://github.com/TheOstrichIO/ostrichlib/blob/ed97634ccbfb8b5042e61fbd0ac9a27aef281bcb/ostrich/utils/proc.py#L129-L187
TheOstrichIO/ostrichlib
ostrich/utils/proc.py
CompletedProcess.check_returncode
def check_returncode(self): """Raise CalledProcessError if the exit code is non-zero.""" if self.returncode: raise CalledProcessError(self.returncode, self.args, self.stdout, self.stderr)
python
def check_returncode(self): """Raise CalledProcessError if the exit code is non-zero.""" if self.returncode: raise CalledProcessError(self.returncode, self.args, self.stdout, self.stderr)
[ "def", "check_returncode", "(", "self", ")", ":", "if", "self", ".", "returncode", ":", "raise", "CalledProcessError", "(", "self", ".", "returncode", ",", "self", ".", "args", ",", "self", ".", "stdout", ",", "self", ".", "stderr", ")" ]
Raise CalledProcessError if the exit code is non-zero.
[ "Raise", "CalledProcessError", "if", "the", "exit", "code", "is", "non", "-", "zero", "." ]
train
https://github.com/TheOstrichIO/ostrichlib/blob/ed97634ccbfb8b5042e61fbd0ac9a27aef281bcb/ostrich/utils/proc.py#L122-L126
timothydmorton/simpledist
simpledist/distributions.py
double_lorgauss
def double_lorgauss(x,p): """Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian. Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Input parameters: mu (mode of distribution), sig1 (LH Gaussian width), sig2 (RH Gaussian width), gam1 (LH Lorentzian width), gam2 (RH Lorentzian width), G1 (LH Gaussian "strength"), G2 (RH Gaussian "strength"). Returns ------- values : float or array-like Double LorGauss distribution evaluated at input(s). If single value provided, single value returned. """ mu,sig1,sig2,gam1,gam2,G1,G2 = p gam1 = float(gam1) gam2 = float(gam2) G1 = abs(G1) G2 = abs(G2) sig1 = abs(sig1) sig2 = abs(sig2) gam1 = abs(gam1) gab2 = abs(gam2) L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) - (gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) + (gam2/gam1)*(4-G1-G2)) L1 = 4 - G1 - G2 - L2 #print G1,G2,L1,L2 y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\ L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2) y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\ L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2) lo = (x < mu) hi = (x >= mu) return y1*lo + y2*hi
python
def double_lorgauss(x,p): """Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian. Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Input parameters: mu (mode of distribution), sig1 (LH Gaussian width), sig2 (RH Gaussian width), gam1 (LH Lorentzian width), gam2 (RH Lorentzian width), G1 (LH Gaussian "strength"), G2 (RH Gaussian "strength"). Returns ------- values : float or array-like Double LorGauss distribution evaluated at input(s). If single value provided, single value returned. """ mu,sig1,sig2,gam1,gam2,G1,G2 = p gam1 = float(gam1) gam2 = float(gam2) G1 = abs(G1) G2 = abs(G2) sig1 = abs(sig1) sig2 = abs(sig2) gam1 = abs(gam1) gab2 = abs(gam2) L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) - (gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) + (gam2/gam1)*(4-G1-G2)) L1 = 4 - G1 - G2 - L2 #print G1,G2,L1,L2 y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\ L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2) y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\ L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2) lo = (x < mu) hi = (x >= mu) return y1*lo + y2*hi
[ "def", "double_lorgauss", "(", "x", ",", "p", ")", ":", "mu", ",", "sig1", ",", "sig2", ",", "gam1", ",", "gam2", ",", "G1", ",", "G2", "=", "p", "gam1", "=", "float", "(", "gam1", ")", "gam2", "=", "float", "(", "gam2", ")", "G1", "=", "abs"...
Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian. Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Input parameters: mu (mode of distribution), sig1 (LH Gaussian width), sig2 (RH Gaussian width), gam1 (LH Lorentzian width), gam2 (RH Lorentzian width), G1 (LH Gaussian "strength"), G2 (RH Gaussian "strength"). Returns ------- values : float or array-like Double LorGauss distribution evaluated at input(s). If single value provided, single value returned.
[ "Evaluates", "a", "normalized", "distribution", "that", "is", "a", "mixture", "of", "a", "double", "-", "sided", "Gaussian", "and", "Double", "-", "sided", "Lorentzian", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L615-L664
timothydmorton/simpledist
simpledist/distributions.py
fit_double_lorgauss
def fit_double_lorgauss(bins,h,Ntry=5): """Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram. Uses a grid of starting guesses to try to avoid local minima. Parameters ---------- bins, h : array-like Bins and heights of a histogram, as returned by, e.g., `np.histogram`. Ntry : int, optional Spacing of grid for starting guesses. Will try `Ntry**2` different initial values of the "Gaussian strength" parameters `G1` and `G2`. Returns ------- parameters : tuple Parameters of best-fit "double LorGauss" distribution. Raises ------ ImportError If the lmfit module is not available. """ try: from lmfit import minimize, Parameters, Parameter, report_fit except ImportError: raise ImportError('you need lmfit to use this function.') #make sure histogram is normalized h /= np.trapz(h,bins) #zero-pad the ends of the distribution to keep fits positive N = len(bins) dbin = (bins[1:]-bins[:-1]).mean() newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10), bins, np.linspace(bins.max(),bins.max() + N/10*dbin,N/10))) newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10))) mu0 = bins[np.argmax(newh)] sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))]) def set_params(G1,G2): params = Parameters() params.add('mu',value=mu0) params.add('sig1',value=sig0) params.add('sig2',value=sig0) params.add('gam1',value=sig0/10) params.add('gam2',value=sig0/10) params.add('G1',value=G1) params.add('G2',value=G2) return params sum_devsq_best = np.inf outkeep = None for G1 in np.linspace(0.1,1.9,Ntry): for G2 in np.linspace(0.1,1.9,Ntry): params = set_params(G1,G2) def residual(ps): pars = (params['mu'].value, params['sig1'].value, params['sig2'].value, params['gam1'].value, params['gam2'].value, params['G1'].value, params['G2'].value) hmodel = double_lorgauss(newbins,pars) return newh-hmodel out = minimize(residual,params) pars = (out.params['mu'].value,out.params['sig1'].value, out.params['sig2'].value,out.params['gam1'].value, out.params['gam2'].value,out.params['G1'].value, out.params['G2'].value) sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum() #print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2) if sum_devsq < sum_devsq_best: sum_devsq_best = sum_devsq outkeep = out return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value), abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value), abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value), abs(outkeep.params['G2'].value))
python
def fit_double_lorgauss(bins,h,Ntry=5): """Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram. Uses a grid of starting guesses to try to avoid local minima. Parameters ---------- bins, h : array-like Bins and heights of a histogram, as returned by, e.g., `np.histogram`. Ntry : int, optional Spacing of grid for starting guesses. Will try `Ntry**2` different initial values of the "Gaussian strength" parameters `G1` and `G2`. Returns ------- parameters : tuple Parameters of best-fit "double LorGauss" distribution. Raises ------ ImportError If the lmfit module is not available. """ try: from lmfit import minimize, Parameters, Parameter, report_fit except ImportError: raise ImportError('you need lmfit to use this function.') #make sure histogram is normalized h /= np.trapz(h,bins) #zero-pad the ends of the distribution to keep fits positive N = len(bins) dbin = (bins[1:]-bins[:-1]).mean() newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10), bins, np.linspace(bins.max(),bins.max() + N/10*dbin,N/10))) newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10))) mu0 = bins[np.argmax(newh)] sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))]) def set_params(G1,G2): params = Parameters() params.add('mu',value=mu0) params.add('sig1',value=sig0) params.add('sig2',value=sig0) params.add('gam1',value=sig0/10) params.add('gam2',value=sig0/10) params.add('G1',value=G1) params.add('G2',value=G2) return params sum_devsq_best = np.inf outkeep = None for G1 in np.linspace(0.1,1.9,Ntry): for G2 in np.linspace(0.1,1.9,Ntry): params = set_params(G1,G2) def residual(ps): pars = (params['mu'].value, params['sig1'].value, params['sig2'].value, params['gam1'].value, params['gam2'].value, params['G1'].value, params['G2'].value) hmodel = double_lorgauss(newbins,pars) return newh-hmodel out = minimize(residual,params) pars = (out.params['mu'].value,out.params['sig1'].value, out.params['sig2'].value,out.params['gam1'].value, out.params['gam2'].value,out.params['G1'].value, out.params['G2'].value) sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum() #print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2) if sum_devsq < sum_devsq_best: sum_devsq_best = sum_devsq outkeep = out return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value), abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value), abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value), abs(outkeep.params['G2'].value))
[ "def", "fit_double_lorgauss", "(", "bins", ",", "h", ",", "Ntry", "=", "5", ")", ":", "try", ":", "from", "lmfit", "import", "minimize", ",", "Parameters", ",", "Parameter", ",", "report_fit", "except", "ImportError", ":", "raise", "ImportError", "(", "'yo...
Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram. Uses a grid of starting guesses to try to avoid local minima. Parameters ---------- bins, h : array-like Bins and heights of a histogram, as returned by, e.g., `np.histogram`. Ntry : int, optional Spacing of grid for starting guesses. Will try `Ntry**2` different initial values of the "Gaussian strength" parameters `G1` and `G2`. Returns ------- parameters : tuple Parameters of best-fit "double LorGauss" distribution. Raises ------ ImportError If the lmfit module is not available.
[ "Uses", "lmfit", "to", "fit", "a", "Double", "LorGauss", "distribution", "to", "a", "provided", "histogram", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L666-L754
timothydmorton/simpledist
simpledist/distributions.py
doublegauss
def doublegauss(x,p): """Evaluates normalized two-sided Gaussian distribution Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) Returns ------- value : float or array-like Distribution evaluated at input value(s). If single value provided, single value returned. """ mu,sig1,sig2 = p x = np.atleast_1d(x) A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.) ylo = A*np.exp(-(x-mu)**2/(2*sig1**2)) yhi = A*np.exp(-(x-mu)**2/(2*sig2**2)) y = x*0 wlo = np.where(x < mu) whi = np.where(x >= mu) y[wlo] = ylo[wlo] y[whi] = yhi[whi] if np.size(x)==1: return y[0] else: return y
python
def doublegauss(x,p): """Evaluates normalized two-sided Gaussian distribution Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) Returns ------- value : float or array-like Distribution evaluated at input value(s). If single value provided, single value returned. """ mu,sig1,sig2 = p x = np.atleast_1d(x) A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.) ylo = A*np.exp(-(x-mu)**2/(2*sig1**2)) yhi = A*np.exp(-(x-mu)**2/(2*sig2**2)) y = x*0 wlo = np.where(x < mu) whi = np.where(x >= mu) y[wlo] = ylo[wlo] y[whi] = yhi[whi] if np.size(x)==1: return y[0] else: return y
[ "def", "doublegauss", "(", "x", ",", "p", ")", ":", "mu", ",", "sig1", ",", "sig2", "=", "p", "x", "=", "np", ".", "atleast_1d", "(", "x", ")", "A", "=", "1.", "/", "(", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", ")", "*", "(", ...
Evaluates normalized two-sided Gaussian distribution Parameters ---------- x : float or array-like Value(s) at which to evaluate distribution p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) Returns ------- value : float or array-like Distribution evaluated at input value(s). If single value provided, single value returned.
[ "Evaluates", "normalized", "two", "-", "sided", "Gaussian", "distribution" ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L792-L824
timothydmorton/simpledist
simpledist/distributions.py
doublegauss_cdf
def doublegauss_cdf(x,p): """Cumulative distribution function for two-sided Gaussian Parameters ---------- x : float Input values at which to calculate CDF. p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) """ x = np.atleast_1d(x) mu,sig1,sig2 = p sig1 = np.absolute(sig1) sig2 = np.absolute(sig2) ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2))) yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2))) lo = x < mu hi = x >= mu return ylo*lo + yhi*hi
python
def doublegauss_cdf(x,p): """Cumulative distribution function for two-sided Gaussian Parameters ---------- x : float Input values at which to calculate CDF. p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width) """ x = np.atleast_1d(x) mu,sig1,sig2 = p sig1 = np.absolute(sig1) sig2 = np.absolute(sig2) ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2))) yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2))) lo = x < mu hi = x >= mu return ylo*lo + yhi*hi
[ "def", "doublegauss_cdf", "(", "x", ",", "p", ")", ":", "x", "=", "np", ".", "atleast_1d", "(", "x", ")", "mu", ",", "sig1", ",", "sig2", "=", "p", "sig1", "=", "np", ".", "absolute", "(", "sig1", ")", "sig2", "=", "np", ".", "absolute", "(", ...
Cumulative distribution function for two-sided Gaussian Parameters ---------- x : float Input values at which to calculate CDF. p : array-like Parameters of distribution: (mu: mode of distribution, sig1: LH width, sig2: RH width)
[ "Cumulative", "distribution", "function", "for", "two", "-", "sided", "Gaussian" ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L826-L847
timothydmorton/simpledist
simpledist/distributions.py
fit_doublegauss_samples
def fit_doublegauss_samples(samples,**kwargs): """Fits a two-sided Gaussian to a set of samples. Calculates 0.16, 0.5, and 0.84 quantiles and passes these to `fit_doublegauss` for fitting. Parameters ---------- samples : array-like Samples to which to fit the Gaussian. kwargs Keyword arguments passed to `fit_doublegauss`. """ sorted_samples = np.sort(samples) N = len(samples) med = sorted_samples[N/2] siglo = med - sorted_samples[int(0.16*N)] sighi = sorted_samples[int(0.84*N)] - med return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
python
def fit_doublegauss_samples(samples,**kwargs): """Fits a two-sided Gaussian to a set of samples. Calculates 0.16, 0.5, and 0.84 quantiles and passes these to `fit_doublegauss` for fitting. Parameters ---------- samples : array-like Samples to which to fit the Gaussian. kwargs Keyword arguments passed to `fit_doublegauss`. """ sorted_samples = np.sort(samples) N = len(samples) med = sorted_samples[N/2] siglo = med - sorted_samples[int(0.16*N)] sighi = sorted_samples[int(0.84*N)] - med return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
[ "def", "fit_doublegauss_samples", "(", "samples", ",", "*", "*", "kwargs", ")", ":", "sorted_samples", "=", "np", ".", "sort", "(", "samples", ")", "N", "=", "len", "(", "samples", ")", "med", "=", "sorted_samples", "[", "N", "/", "2", "]", "siglo", ...
Fits a two-sided Gaussian to a set of samples. Calculates 0.16, 0.5, and 0.84 quantiles and passes these to `fit_doublegauss` for fitting. Parameters ---------- samples : array-like Samples to which to fit the Gaussian. kwargs Keyword arguments passed to `fit_doublegauss`.
[ "Fits", "a", "two", "-", "sided", "Gaussian", "to", "a", "set", "of", "samples", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L849-L868
timothydmorton/simpledist
simpledist/distributions.py
fit_doublegauss
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True): """Fits a two-sided Gaussian distribution to match a given confidence interval. The center of the distribution may be either the median or the mode. Parameters ---------- med : float The center of the distribution to which to fit. Default this will be the mode unless the `median` keyword is set to True. siglo : float Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is the "lower error bar." sighi : float Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is the "upper error bar." interval : float, optional The confidence interval enclosed by the provided error bars. Default is 0.683 (1-sigma). p0 : array-like, optional Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`). median : bool, optional Whether to treat the `med` parameter as the median or mode (default will be mode). return_distribution: bool, optional If `True`, then function will return a `DoubleGauss_Distribution` object. Otherwise, will return just the parameters. """ if median: q1 = 0.5 - (interval/2) q2 = 0.5 + (interval/2) targetvals = np.array([med-siglo,med,med+sighi]) qvals = np.array([q1,0.5,q2]) def objfn(pars): logging.debug('{}'.format(pars)) logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals)) return doublegauss_cdf(targetvals,pars) - qvals if p0 is None: p0 = [med,siglo,sighi] pfit,success = leastsq(objfn,p0) else: q1 = 0.5 - (interval/2) q2 = 0.5 + (interval/2) targetvals = np.array([med-siglo,med+sighi]) qvals = np.array([q1,q2]) def objfn(pars): params = (med,pars[0],pars[1]) return doublegauss_cdf(targetvals,params) - qvals if p0 is None: p0 = [siglo,sighi] pfit,success = leastsq(objfn,p0) pfit = (med,pfit[0],pfit[1]) if return_distribution: dist = DoubleGauss_Distribution(*pfit) return dist else: return pfit
python
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True): """Fits a two-sided Gaussian distribution to match a given confidence interval. The center of the distribution may be either the median or the mode. Parameters ---------- med : float The center of the distribution to which to fit. Default this will be the mode unless the `median` keyword is set to True. siglo : float Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is the "lower error bar." sighi : float Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is the "upper error bar." interval : float, optional The confidence interval enclosed by the provided error bars. Default is 0.683 (1-sigma). p0 : array-like, optional Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`). median : bool, optional Whether to treat the `med` parameter as the median or mode (default will be mode). return_distribution: bool, optional If `True`, then function will return a `DoubleGauss_Distribution` object. Otherwise, will return just the parameters. """ if median: q1 = 0.5 - (interval/2) q2 = 0.5 + (interval/2) targetvals = np.array([med-siglo,med,med+sighi]) qvals = np.array([q1,0.5,q2]) def objfn(pars): logging.debug('{}'.format(pars)) logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals)) return doublegauss_cdf(targetvals,pars) - qvals if p0 is None: p0 = [med,siglo,sighi] pfit,success = leastsq(objfn,p0) else: q1 = 0.5 - (interval/2) q2 = 0.5 + (interval/2) targetvals = np.array([med-siglo,med+sighi]) qvals = np.array([q1,q2]) def objfn(pars): params = (med,pars[0],pars[1]) return doublegauss_cdf(targetvals,params) - qvals if p0 is None: p0 = [siglo,sighi] pfit,success = leastsq(objfn,p0) pfit = (med,pfit[0],pfit[1]) if return_distribution: dist = DoubleGauss_Distribution(*pfit) return dist else: return pfit
[ "def", "fit_doublegauss", "(", "med", ",", "siglo", ",", "sighi", ",", "interval", "=", "0.683", ",", "p0", "=", "None", ",", "median", "=", "False", ",", "return_distribution", "=", "True", ")", ":", "if", "median", ":", "q1", "=", "0.5", "-", "(", ...
Fits a two-sided Gaussian distribution to match a given confidence interval. The center of the distribution may be either the median or the mode. Parameters ---------- med : float The center of the distribution to which to fit. Default this will be the mode unless the `median` keyword is set to True. siglo : float Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is the "lower error bar." sighi : float Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is the "upper error bar." interval : float, optional The confidence interval enclosed by the provided error bars. Default is 0.683 (1-sigma). p0 : array-like, optional Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`). median : bool, optional Whether to treat the `med` parameter as the median or mode (default will be mode). return_distribution: bool, optional If `True`, then function will return a `DoubleGauss_Distribution` object. Otherwise, will return just the parameters.
[ "Fits", "a", "two", "-", "sided", "Gaussian", "distribution", "to", "match", "a", "given", "confidence", "interval", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L871-L937
timothydmorton/simpledist
simpledist/distributions.py
Distribution.pctile
def pctile(self,pct,res=1000): """Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float """ grid = np.linspace(self.minval,self.maxval,res) return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
python
def pctile(self,pct,res=1000): """Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float """ grid = np.linspace(self.minval,self.maxval,res) return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
[ "def", "pctile", "(", "self", ",", "pct", ",", "res", "=", "1000", ")", ":", "grid", "=", "np", ".", "linspace", "(", "self", ".", "minval", ",", "self", ".", "maxval", ",", "res", ")", "return", "grid", "[", "np", ".", "argmin", "(", "np", "."...
Returns the desired percentile of the distribution. Will only work if properly normalized. Designed to mimic the `ppf` method of the `scipy.stats` random variate objects. Works by gridding the CDF at a given resolution and matching the nearest point. NB, this is of course not as precise as an analytic ppf. Parameters ---------- pct : float Percentile between 0 and 1. res : int, optional The resolution at which to grid the CDF to find the percentile. Returns ------- percentile : float
[ "Returns", "the", "desired", "percentile", "of", "the", "distribution", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L136-L158
timothydmorton/simpledist
simpledist/distributions.py
Distribution.save_hdf
def save_hdf(self,filename,path='',res=1000,logspace=False): """Saves distribution to an HDF5 file. Saves a pandas `dataframe` object containing tabulated pdf and cdf values at a specfied resolution. After saving to a particular path, a distribution may be regenerated using the `Distribution_FromH5` subclass. Parameters ---------- filename : string File in which to save the distribution. Should end in .h5. path : string, optional Path in which to save the distribution within the .h5 file. By default this is an empty string, which will lead to saving the `fns` dataframe at the root level of the file. res : int, optional Resolution at which to grid the distribution for saving. logspace : bool, optional Sets whether the tabulated function should be gridded with log or linear spacing. Default will be logspace=False, corresponding to linear gridding. """ if logspace: vals = np.logspace(np.log10(self.minval), np.log10(self.maxval), res) else: vals = np.linspace(self.minval,self.maxval,res) d = {'vals':vals, 'pdf':self(vals), 'cdf':self.cdf(vals)} df = pd.DataFrame(d) df.to_hdf(filename,path+'/fns') if hasattr(self,'samples'): s = pd.Series(self.samples) s.to_hdf(filename,path+'/samples') store = pd.HDFStore(filename) attrs = store.get_storer('{}/fns'.format(path)).attrs attrs.keywords = self.keywords attrs.disttype = type(self) store.close()
python
def save_hdf(self,filename,path='',res=1000,logspace=False): """Saves distribution to an HDF5 file. Saves a pandas `dataframe` object containing tabulated pdf and cdf values at a specfied resolution. After saving to a particular path, a distribution may be regenerated using the `Distribution_FromH5` subclass. Parameters ---------- filename : string File in which to save the distribution. Should end in .h5. path : string, optional Path in which to save the distribution within the .h5 file. By default this is an empty string, which will lead to saving the `fns` dataframe at the root level of the file. res : int, optional Resolution at which to grid the distribution for saving. logspace : bool, optional Sets whether the tabulated function should be gridded with log or linear spacing. Default will be logspace=False, corresponding to linear gridding. """ if logspace: vals = np.logspace(np.log10(self.minval), np.log10(self.maxval), res) else: vals = np.linspace(self.minval,self.maxval,res) d = {'vals':vals, 'pdf':self(vals), 'cdf':self.cdf(vals)} df = pd.DataFrame(d) df.to_hdf(filename,path+'/fns') if hasattr(self,'samples'): s = pd.Series(self.samples) s.to_hdf(filename,path+'/samples') store = pd.HDFStore(filename) attrs = store.get_storer('{}/fns'.format(path)).attrs attrs.keywords = self.keywords attrs.disttype = type(self) store.close()
[ "def", "save_hdf", "(", "self", ",", "filename", ",", "path", "=", "''", ",", "res", "=", "1000", ",", "logspace", "=", "False", ")", ":", "if", "logspace", ":", "vals", "=", "np", ".", "logspace", "(", "np", ".", "log10", "(", "self", ".", "minv...
Saves distribution to an HDF5 file. Saves a pandas `dataframe` object containing tabulated pdf and cdf values at a specfied resolution. After saving to a particular path, a distribution may be regenerated using the `Distribution_FromH5` subclass. Parameters ---------- filename : string File in which to save the distribution. Should end in .h5. path : string, optional Path in which to save the distribution within the .h5 file. By default this is an empty string, which will lead to saving the `fns` dataframe at the root level of the file. res : int, optional Resolution at which to grid the distribution for saving. logspace : bool, optional Sets whether the tabulated function should be gridded with log or linear spacing. Default will be logspace=False, corresponding to linear gridding.
[ "Saves", "distribution", "to", "an", "HDF5", "file", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L162-L206
timothydmorton/simpledist
simpledist/distributions.py
Distribution.plot
def plot(self,minval=None,maxval=None,fig=None,log=False, npts=500,**kwargs): """ Plots distribution. Parameters ---------- minval : float,optional minimum value to plot. Required if minval of Distribution is `-np.inf`. maxval : float, optional maximum value to plot. Required if maxval of Distribution is `np.inf`. fig : None or int, optional Parameter to pass to `setfig`. If `None`, then a new figure is created; if a non-zero integer, the plot will go to that figure (clearing everything first), if zero, then will overplot on current axes. log : bool, optional If `True`, the x-spacing of the points to plot will be logarithmic. npoints : int, optional Number of points to plot. kwargs Keyword arguments are passed to plt.plot Raises ------ ValueError If finite lower and upper bounds are not provided. """ if minval is None: minval = self.minval if maxval is None: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)') if log: xs = np.logspace(np.log10(minval),np.log10(maxval),npts) else: xs = np.linspace(minval,maxval,npts) setfig(fig) plt.plot(xs,self(xs),**kwargs) plt.xlabel(self.name) plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
python
def plot(self,minval=None,maxval=None,fig=None,log=False, npts=500,**kwargs): """ Plots distribution. Parameters ---------- minval : float,optional minimum value to plot. Required if minval of Distribution is `-np.inf`. maxval : float, optional maximum value to plot. Required if maxval of Distribution is `np.inf`. fig : None or int, optional Parameter to pass to `setfig`. If `None`, then a new figure is created; if a non-zero integer, the plot will go to that figure (clearing everything first), if zero, then will overplot on current axes. log : bool, optional If `True`, the x-spacing of the points to plot will be logarithmic. npoints : int, optional Number of points to plot. kwargs Keyword arguments are passed to plt.plot Raises ------ ValueError If finite lower and upper bounds are not provided. """ if minval is None: minval = self.minval if maxval is None: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)') if log: xs = np.logspace(np.log10(minval),np.log10(maxval),npts) else: xs = np.linspace(minval,maxval,npts) setfig(fig) plt.plot(xs,self(xs),**kwargs) plt.xlabel(self.name) plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
[ "def", "plot", "(", "self", ",", "minval", "=", "None", ",", "maxval", "=", "None", ",", "fig", "=", "None", ",", "log", "=", "False", ",", "npts", "=", "500", ",", "*", "*", "kwargs", ")", ":", "if", "minval", "is", "None", ":", "minval", "=",...
Plots distribution. Parameters ---------- minval : float,optional minimum value to plot. Required if minval of Distribution is `-np.inf`. maxval : float, optional maximum value to plot. Required if maxval of Distribution is `np.inf`. fig : None or int, optional Parameter to pass to `setfig`. If `None`, then a new figure is created; if a non-zero integer, the plot will go to that figure (clearing everything first), if zero, then will overplot on current axes. log : bool, optional If `True`, the x-spacing of the points to plot will be logarithmic. npoints : int, optional Number of points to plot. kwargs Keyword arguments are passed to plt.plot Raises ------ ValueError If finite lower and upper bounds are not provided.
[ "Plots", "distribution", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L244-L294
timothydmorton/simpledist
simpledist/distributions.py
Distribution.resample
def resample(self,N,minval=None,maxval=None,log=False,res=1e4): """Returns random samples generated according to the distribution Mirrors basic functionality of `rvs` method for `scipy.stats` random variates. Implemented by mapping uniform numbers onto the inverse CDF using a closest-matching grid approach. Parameters ---------- N : int Number of samples to return minval,maxval : float, optional Minimum/maximum values to resample. Should both usually just be `None`, which will default to `self.minval`/`self.maxval`. log : bool, optional Whether grid should be log- or linear-spaced. res : int, optional Resolution of CDF grid used. Returns ------- values : ndarray N samples. Raises ------ ValueError If maxval/minval are +/- infinity, this doesn't work because of the grid-based approach. """ N = int(N) if minval is None: if hasattr(self,'minval_cdf'): minval = self.minval_cdf else: minval = self.minval if maxval is None: if hasattr(self,'maxval_cdf'): maxval = self.maxval_cdf else: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)') u = rand.random(size=N) if log: vals = np.logspace(log10(minval),log10(maxval),res) else: vals = np.linspace(minval,maxval,res) #sometimes cdf is flat. so ys will need to be uniqued ys,yinds = np.unique(self.cdf(vals), return_index=True) vals = vals[yinds] inds = np.digitize(u,ys) return vals[inds]
python
def resample(self,N,minval=None,maxval=None,log=False,res=1e4): """Returns random samples generated according to the distribution Mirrors basic functionality of `rvs` method for `scipy.stats` random variates. Implemented by mapping uniform numbers onto the inverse CDF using a closest-matching grid approach. Parameters ---------- N : int Number of samples to return minval,maxval : float, optional Minimum/maximum values to resample. Should both usually just be `None`, which will default to `self.minval`/`self.maxval`. log : bool, optional Whether grid should be log- or linear-spaced. res : int, optional Resolution of CDF grid used. Returns ------- values : ndarray N samples. Raises ------ ValueError If maxval/minval are +/- infinity, this doesn't work because of the grid-based approach. """ N = int(N) if minval is None: if hasattr(self,'minval_cdf'): minval = self.minval_cdf else: minval = self.minval if maxval is None: if hasattr(self,'maxval_cdf'): maxval = self.maxval_cdf else: maxval = self.maxval if maxval==np.inf or minval==-np.inf: raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)') u = rand.random(size=N) if log: vals = np.logspace(log10(minval),log10(maxval),res) else: vals = np.linspace(minval,maxval,res) #sometimes cdf is flat. so ys will need to be uniqued ys,yinds = np.unique(self.cdf(vals), return_index=True) vals = vals[yinds] inds = np.digitize(u,ys) return vals[inds]
[ "def", "resample", "(", "self", ",", "N", ",", "minval", "=", "None", ",", "maxval", "=", "None", ",", "log", "=", "False", ",", "res", "=", "1e4", ")", ":", "N", "=", "int", "(", "N", ")", "if", "minval", "is", "None", ":", "if", "hasattr", ...
Returns random samples generated according to the distribution Mirrors basic functionality of `rvs` method for `scipy.stats` random variates. Implemented by mapping uniform numbers onto the inverse CDF using a closest-matching grid approach. Parameters ---------- N : int Number of samples to return minval,maxval : float, optional Minimum/maximum values to resample. Should both usually just be `None`, which will default to `self.minval`/`self.maxval`. log : bool, optional Whether grid should be log- or linear-spaced. res : int, optional Resolution of CDF grid used. Returns ------- values : ndarray N samples. Raises ------ ValueError If maxval/minval are +/- infinity, this doesn't work because of the grid-based approach.
[ "Returns", "random", "samples", "generated", "according", "to", "the", "distribution" ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L296-L357
timothydmorton/simpledist
simpledist/distributions.py
Hist_Distribution.plothist
def plothist(self,fig=None,**kwargs): """Plots a histogram of samples using provided bins. Parameters ---------- fig : None or int Parameter passed to `setfig`. kwargs Keyword arguments passed to `plt.hist`. """ setfig(fig) plt.hist(self.samples,bins=self.bins,**kwargs)
python
def plothist(self,fig=None,**kwargs): """Plots a histogram of samples using provided bins. Parameters ---------- fig : None or int Parameter passed to `setfig`. kwargs Keyword arguments passed to `plt.hist`. """ setfig(fig) plt.hist(self.samples,bins=self.bins,**kwargs)
[ "def", "plothist", "(", "self", ",", "fig", "=", "None", ",", "*", "*", "kwargs", ")", ":", "setfig", "(", "fig", ")", "plt", ".", "hist", "(", "self", ".", "samples", ",", "bins", "=", "self", ".", "bins", ",", "*", "*", "kwargs", ")" ]
Plots a histogram of samples using provided bins. Parameters ---------- fig : None or int Parameter passed to `setfig`. kwargs Keyword arguments passed to `plt.hist`.
[ "Plots", "a", "histogram", "of", "samples", "using", "provided", "bins", ".", "Parameters", "----------", "fig", ":", "None", "or", "int", "Parameter", "passed", "to", "setfig", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L548-L560
timothydmorton/simpledist
simpledist/distributions.py
Hist_Distribution.resample
def resample(self,N): """Returns a bootstrap resampling of provided samples. Parameters ---------- N : int Number of samples. """ inds = rand.randint(len(self.samples),size=N) return self.samples[inds]
python
def resample(self,N): """Returns a bootstrap resampling of provided samples. Parameters ---------- N : int Number of samples. """ inds = rand.randint(len(self.samples),size=N) return self.samples[inds]
[ "def", "resample", "(", "self", ",", "N", ")", ":", "inds", "=", "rand", ".", "randint", "(", "len", "(", "self", ".", "samples", ")", ",", "size", "=", "N", ")", "return", "self", ".", "samples", "[", "inds", "]" ]
Returns a bootstrap resampling of provided samples. Parameters ---------- N : int Number of samples.
[ "Returns", "a", "bootstrap", "resampling", "of", "provided", "samples", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L562-L571
timothydmorton/simpledist
simpledist/distributions.py
Box_Distribution.resample
def resample(self,N): """Returns a random sampling. """ return rand.random(size=N)*(self.maxval - self.minval) + self.minval
python
def resample(self,N): """Returns a random sampling. """ return rand.random(size=N)*(self.maxval - self.minval) + self.minval
[ "def", "resample", "(", "self", ",", "N", ")", ":", "return", "rand", ".", "random", "(", "size", "=", "N", ")", "*", "(", "self", ".", "maxval", "-", "self", ".", "minval", ")", "+", "self", ".", "minval" ]
Returns a random sampling.
[ "Returns", "a", "random", "sampling", "." ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L606-L609
timothydmorton/simpledist
simpledist/distributions.py
DoubleGauss_Distribution.resample
def resample(self,N,**kwargs): """Random resampling of the doublegauss distribution """ lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo) hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi) u = rand.random(size=N) hi = (u < float(self.sighi)/(self.sighi + self.siglo)) lo = (u >= float(self.sighi)/(self.sighi + self.siglo)) vals = np.zeros(N) vals[hi] = hivals[hi] vals[lo] = lovals[lo] return vals
python
def resample(self,N,**kwargs): """Random resampling of the doublegauss distribution """ lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo) hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi) u = rand.random(size=N) hi = (u < float(self.sighi)/(self.sighi + self.siglo)) lo = (u >= float(self.sighi)/(self.sighi + self.siglo)) vals = np.zeros(N) vals[hi] = hivals[hi] vals[lo] = lovals[lo] return vals
[ "def", "resample", "(", "self", ",", "N", ",", "*", "*", "kwargs", ")", ":", "lovals", "=", "self", ".", "mu", "-", "np", ".", "absolute", "(", "rand", ".", "normal", "(", "size", "=", "N", ")", "*", "self", ".", "siglo", ")", "hivals", "=", ...
Random resampling of the doublegauss distribution
[ "Random", "resampling", "of", "the", "doublegauss", "distribution" ]
train
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L982-L995
klahnakoski/mo-times
mo_times/vendor/dateutil/tz.py
tzname_in_python2
def tzname_in_python2(myfunc): """Change unicode output into bytestrings in Python 2 tzname() API changed in Python 3. It used to return bytes, but was changed to unicode strings """ def inner_func(*args, **kwargs): if PY3: return myfunc(*args, **kwargs) else: return myfunc(*args, **kwargs).encode() return inner_func
python
def tzname_in_python2(myfunc): """Change unicode output into bytestrings in Python 2 tzname() API changed in Python 3. It used to return bytes, but was changed to unicode strings """ def inner_func(*args, **kwargs): if PY3: return myfunc(*args, **kwargs) else: return myfunc(*args, **kwargs).encode() return inner_func
[ "def", "tzname_in_python2", "(", "myfunc", ")", ":", "def", "inner_func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "PY3", ":", "return", "myfunc", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "myfunc", "(...
Change unicode output into bytestrings in Python 2 tzname() API changed in Python 3. It used to return bytes, but was changed to unicode strings
[ "Change", "unicode", "output", "into", "bytestrings", "in", "Python", "2" ]
train
https://github.com/klahnakoski/mo-times/blob/e64a720b9796e076adeb0d5773ec6915ca045b9d/mo_times/vendor/dateutil/tz.py#L29-L40
maaku/lookahead
lookahead.py
lookahead
def lookahead(iterable, *args, **kwargs): """Constructs a generator over the iterable yielding look-ahead (and/or “look-behind”) tuples. One tuple will be generated for each element value accessible from the iterator, containing that element and the number of elements specified just prior to and immediately after. When no such element exists, the None is used instead. If one were to think of iterator as a list, this is would be similar to appending [None]*lookahead and prepending [None]*lookbehind, then iterating over and returning a sliding window of length lookbehind+1+ lookahead (except of course that instead of generating the such a list, this implementation generates (and caches) lookahead values only as needed). lookahead() may be called with 1, 2, or 3 parameters: lookahead(iterable) Defaults to lookahead=1, lookbehind=0 lookahead(iterable, lookahead) Defaults to lookbehind=0 lookahead(iterable, lookbehind, lookahead) Notice that lookahead is now the 3rd parameter! Example semantics: lookahead(iterable): yield (item, next) lookahead(iterable, 2): yield (item, next, next+1) lookahead(iterable, 1, 2): yield (prev, item, next, next+1) lookahead(iterable, p, n): yeild (prev, ..., prev+p-1, item, next, ..., next+n-1) """ # Deal with our funny parameter handling (2 optional positional # parameters, with the *2nd* optional parameter taking precendence # if only one is specified): if len(args) == 0: num_prev, num_next = 0, 1 elif len(args) == 1: num_prev, num_next = 0, args[0] elif len(args) == 2: num_prev, num_next = args[0], args[1] else: raise TypeError("%s() takes 1, 2, or 3 arguments (%d given)" % (lookahead.__name__, len(args))) # Construct an iterator over iterable (has no effect if it is # already iterable): iterator = iter(iterable) # Set the lookbehind positions to None and generate the first element. # This will immediately raise StopIteration in the trivial case: lst = [None]*num_prev + [iterator.next()] # Prime the needed lookahead values: for x in xrange(num_next): try: lst.append(iterator.next()) except StopIteration: lst.append(None) num_next -= 1 # Yield the current tuple, then shift the list and generate a new item: for item in iterator: yield tuple(lst) lst = lst[1:] + [item] # Yield the last full tuple, then continue with None for each lookahead # position: for x in xrange(num_next+1): yield tuple(lst) lst = lst[1:] + [None] # Done! raise StopIteration
python
def lookahead(iterable, *args, **kwargs): """Constructs a generator over the iterable yielding look-ahead (and/or “look-behind”) tuples. One tuple will be generated for each element value accessible from the iterator, containing that element and the number of elements specified just prior to and immediately after. When no such element exists, the None is used instead. If one were to think of iterator as a list, this is would be similar to appending [None]*lookahead and prepending [None]*lookbehind, then iterating over and returning a sliding window of length lookbehind+1+ lookahead (except of course that instead of generating the such a list, this implementation generates (and caches) lookahead values only as needed). lookahead() may be called with 1, 2, or 3 parameters: lookahead(iterable) Defaults to lookahead=1, lookbehind=0 lookahead(iterable, lookahead) Defaults to lookbehind=0 lookahead(iterable, lookbehind, lookahead) Notice that lookahead is now the 3rd parameter! Example semantics: lookahead(iterable): yield (item, next) lookahead(iterable, 2): yield (item, next, next+1) lookahead(iterable, 1, 2): yield (prev, item, next, next+1) lookahead(iterable, p, n): yeild (prev, ..., prev+p-1, item, next, ..., next+n-1) """ # Deal with our funny parameter handling (2 optional positional # parameters, with the *2nd* optional parameter taking precendence # if only one is specified): if len(args) == 0: num_prev, num_next = 0, 1 elif len(args) == 1: num_prev, num_next = 0, args[0] elif len(args) == 2: num_prev, num_next = args[0], args[1] else: raise TypeError("%s() takes 1, 2, or 3 arguments (%d given)" % (lookahead.__name__, len(args))) # Construct an iterator over iterable (has no effect if it is # already iterable): iterator = iter(iterable) # Set the lookbehind positions to None and generate the first element. # This will immediately raise StopIteration in the trivial case: lst = [None]*num_prev + [iterator.next()] # Prime the needed lookahead values: for x in xrange(num_next): try: lst.append(iterator.next()) except StopIteration: lst.append(None) num_next -= 1 # Yield the current tuple, then shift the list and generate a new item: for item in iterator: yield tuple(lst) lst = lst[1:] + [item] # Yield the last full tuple, then continue with None for each lookahead # position: for x in xrange(num_next+1): yield tuple(lst) lst = lst[1:] + [None] # Done! raise StopIteration
[ "def", "lookahead", "(", "iterable", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# Deal with our funny parameter handling (2 optional positional", "# parameters, with the *2nd* optional parameter taking precendence", "# if only one is specified):", "if", "len", "(", "...
Constructs a generator over the iterable yielding look-ahead (and/or “look-behind”) tuples. One tuple will be generated for each element value accessible from the iterator, containing that element and the number of elements specified just prior to and immediately after. When no such element exists, the None is used instead. If one were to think of iterator as a list, this is would be similar to appending [None]*lookahead and prepending [None]*lookbehind, then iterating over and returning a sliding window of length lookbehind+1+ lookahead (except of course that instead of generating the such a list, this implementation generates (and caches) lookahead values only as needed). lookahead() may be called with 1, 2, or 3 parameters: lookahead(iterable) Defaults to lookahead=1, lookbehind=0 lookahead(iterable, lookahead) Defaults to lookbehind=0 lookahead(iterable, lookbehind, lookahead) Notice that lookahead is now the 3rd parameter! Example semantics: lookahead(iterable): yield (item, next) lookahead(iterable, 2): yield (item, next, next+1) lookahead(iterable, 1, 2): yield (prev, item, next, next+1) lookahead(iterable, p, n): yeild (prev, ..., prev+p-1, item, next, ..., next+n-1)
[ "Constructs", "a", "generator", "over", "the", "iterable", "yielding", "look", "-", "ahead", "(", "and", "/", "or", "“look", "-", "behind”", ")", "tuples", ".", "One", "tuple", "will", "be", "generated", "for", "each", "element", "value", "accessible", "fr...
train
https://github.com/maaku/lookahead/blob/63983c2b1dbbcb36e0b777e587185836a03bd592/lookahead.py#L21-L92
jreinhardt/constraining-order
src/constrainingorder/solver.py
ac3
def ac3(space): """ AC-3 algorithm. This reduces the domains of the variables by propagating constraints to ensure arc consistency. :param Space space: The space to reduce """ #determine arcs arcs = {} for name in space.variables: arcs[name] = set([]) for const in space.constraints: for vname1,vname2 in product(const.vnames,const.vnames): if vname1 != vname2: #this is pessimistic, we assume that each constraint #pairwisely couples all variables it affects arcs[vname1].add(vname2) #enforce node consistency for vname in space.variables: for const in space.constraints: _unary(space,const,vname) #assemble work list worklist = set([]) for v1 in space.variables: for v2 in space.variables: for const in space.constraints: if _binary(space,const,v1,v2): for name in arcs[v1]: worklist.add((v1,name)) #work through work list while worklist: v1,v2 = worklist.pop() for const in space.constraints: if _binary(space,const,v1,v2): for vname in arcs[v1]: worklist.add((v1,vname))
python
def ac3(space): """ AC-3 algorithm. This reduces the domains of the variables by propagating constraints to ensure arc consistency. :param Space space: The space to reduce """ #determine arcs arcs = {} for name in space.variables: arcs[name] = set([]) for const in space.constraints: for vname1,vname2 in product(const.vnames,const.vnames): if vname1 != vname2: #this is pessimistic, we assume that each constraint #pairwisely couples all variables it affects arcs[vname1].add(vname2) #enforce node consistency for vname in space.variables: for const in space.constraints: _unary(space,const,vname) #assemble work list worklist = set([]) for v1 in space.variables: for v2 in space.variables: for const in space.constraints: if _binary(space,const,v1,v2): for name in arcs[v1]: worklist.add((v1,name)) #work through work list while worklist: v1,v2 = worklist.pop() for const in space.constraints: if _binary(space,const,v1,v2): for vname in arcs[v1]: worklist.add((v1,vname))
[ "def", "ac3", "(", "space", ")", ":", "#determine arcs", "arcs", "=", "{", "}", "for", "name", "in", "space", ".", "variables", ":", "arcs", "[", "name", "]", "=", "set", "(", "[", "]", ")", "for", "const", "in", "space", ".", "constraints", ":", ...
AC-3 algorithm. This reduces the domains of the variables by propagating constraints to ensure arc consistency. :param Space space: The space to reduce
[ "AC", "-", "3", "algorithm", ".", "This", "reduces", "the", "domains", "of", "the", "variables", "by", "propagating", "constraints", "to", "ensure", "arc", "consistency", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L32-L70
jreinhardt/constraining-order
src/constrainingorder/solver.py
_unary
def _unary(space,const,name): """ Reduce the domain of variable name to be node-consistent with this constraint, i.e. remove those values for the variable that are not consistent with the constraint. returns True if the domain of name was modified """ if not name in const.vnames: return False if space.variables[name].discrete: values = const.domains[name] else: values = const.domains[name] space.domains[name] = space.domains[name].intersection(values) return True
python
def _unary(space,const,name): """ Reduce the domain of variable name to be node-consistent with this constraint, i.e. remove those values for the variable that are not consistent with the constraint. returns True if the domain of name was modified """ if not name in const.vnames: return False if space.variables[name].discrete: values = const.domains[name] else: values = const.domains[name] space.domains[name] = space.domains[name].intersection(values) return True
[ "def", "_unary", "(", "space", ",", "const", ",", "name", ")", ":", "if", "not", "name", "in", "const", ".", "vnames", ":", "return", "False", "if", "space", ".", "variables", "[", "name", "]", ".", "discrete", ":", "values", "=", "const", ".", "do...
Reduce the domain of variable name to be node-consistent with this constraint, i.e. remove those values for the variable that are not consistent with the constraint. returns True if the domain of name was modified
[ "Reduce", "the", "domain", "of", "variable", "name", "to", "be", "node", "-", "consistent", "with", "this", "constraint", "i", ".", "e", ".", "remove", "those", "values", "for", "the", "variable", "that", "are", "not", "consistent", "with", "the", "constra...
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L72-L88
jreinhardt/constraining-order
src/constrainingorder/solver.py
_binary
def _binary(space,const,name1,name2): """ reduce the domain of variable name1 to be two-consistent (arc-consistent) with this constraint, i.e. remove those values for the variable name1, for which no values for name2 exist such that this pair is consistent with the constraint returns True if the domain of name1 was modified """ if not (name1 in const.vnames and name2 in const.vnames): return False remove = set([]) for v1 in space.domains[name1].iter_members(): for v2 in space.domains[name2].iter_members(): if const.consistent({name1 : v1, name2 : v2}): break else: remove.add(v1) if len(remove) > 0: if space.variables[name1].discrete: remove = DiscreteSet(remove) else: remove = IntervalSet.from_values(remove) space.domains[name1] = space.domains[name1].difference(remove) return True else: return False
python
def _binary(space,const,name1,name2): """ reduce the domain of variable name1 to be two-consistent (arc-consistent) with this constraint, i.e. remove those values for the variable name1, for which no values for name2 exist such that this pair is consistent with the constraint returns True if the domain of name1 was modified """ if not (name1 in const.vnames and name2 in const.vnames): return False remove = set([]) for v1 in space.domains[name1].iter_members(): for v2 in space.domains[name2].iter_members(): if const.consistent({name1 : v1, name2 : v2}): break else: remove.add(v1) if len(remove) > 0: if space.variables[name1].discrete: remove = DiscreteSet(remove) else: remove = IntervalSet.from_values(remove) space.domains[name1] = space.domains[name1].difference(remove) return True else: return False
[ "def", "_binary", "(", "space", ",", "const", ",", "name1", ",", "name2", ")", ":", "if", "not", "(", "name1", "in", "const", ".", "vnames", "and", "name2", "in", "const", ".", "vnames", ")", ":", "return", "False", "remove", "=", "set", "(", "[", ...
reduce the domain of variable name1 to be two-consistent (arc-consistent) with this constraint, i.e. remove those values for the variable name1, for which no values for name2 exist such that this pair is consistent with the constraint returns True if the domain of name1 was modified
[ "reduce", "the", "domain", "of", "variable", "name1", "to", "be", "two", "-", "consistent", "(", "arc", "-", "consistent", ")", "with", "this", "constraint", "i", ".", "e", ".", "remove", "those", "values", "for", "the", "variable", "name1", "for", "whic...
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L90-L118
jreinhardt/constraining-order
src/constrainingorder/solver.py
solve
def solve(space,method='backtrack',ordering=None): """ Generator for all solutions. :param str method: the solution method to employ :param ordering: an optional parameter ordering :type ordering: sequence of parameter names Methods: :"backtrack": simple chronological backtracking :"ac-lookahead": full lookahead """ if ordering is None: ordering = list(space.variables.keys()) if not space.is_discrete(): raise ValueError("Can not backtrack on non-discrete space") if method=='backtrack': for label in _backtrack(space,{},ordering): yield label elif method=='ac-lookahead': for label in _lookahead(space,{},ordering): yield label else: raise ValueError("Unknown solution method: %s" % method)
python
def solve(space,method='backtrack',ordering=None): """ Generator for all solutions. :param str method: the solution method to employ :param ordering: an optional parameter ordering :type ordering: sequence of parameter names Methods: :"backtrack": simple chronological backtracking :"ac-lookahead": full lookahead """ if ordering is None: ordering = list(space.variables.keys()) if not space.is_discrete(): raise ValueError("Can not backtrack on non-discrete space") if method=='backtrack': for label in _backtrack(space,{},ordering): yield label elif method=='ac-lookahead': for label in _lookahead(space,{},ordering): yield label else: raise ValueError("Unknown solution method: %s" % method)
[ "def", "solve", "(", "space", ",", "method", "=", "'backtrack'", ",", "ordering", "=", "None", ")", ":", "if", "ordering", "is", "None", ":", "ordering", "=", "list", "(", "space", ".", "variables", ".", "keys", "(", ")", ")", "if", "not", "space", ...
Generator for all solutions. :param str method: the solution method to employ :param ordering: an optional parameter ordering :type ordering: sequence of parameter names Methods: :"backtrack": simple chronological backtracking :"ac-lookahead": full lookahead
[ "Generator", "for", "all", "solutions", "." ]
train
https://github.com/jreinhardt/constraining-order/blob/04d00e4cad0fa9bedf15f2e89b8fd667c0495edc/src/constrainingorder/solver.py#L120-L146
henzk/ape
ape/main.py
get_task_parser
def get_task_parser(task): """ Construct an ArgumentParser for the given task. This function returns a tuple (parser, proxy_args). If task accepts varargs only, proxy_args is True. If task accepts only positional and explicit keyword args, proxy args is False. """ args, varargs, keywords, defaults = inspect.getargspec(task) defaults = defaults or [] parser = argparse.ArgumentParser( prog='ape ' + task.__name__, add_help=False, description=task.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) posargslen = len(args) - len(defaults) if varargs is None and keywords is None: for idx, arg in enumerate(args): if idx < posargslen: parser.add_argument(arg) else: default = defaults[idx - posargslen] parser.add_argument('--' + arg, default=default) return parser, False elif not args and varargs and not keywords and not defaults: return parser, True else: raise InvalidTask(ERRMSG_UNSUPPORTED_SIG % task.__name__)
python
def get_task_parser(task): """ Construct an ArgumentParser for the given task. This function returns a tuple (parser, proxy_args). If task accepts varargs only, proxy_args is True. If task accepts only positional and explicit keyword args, proxy args is False. """ args, varargs, keywords, defaults = inspect.getargspec(task) defaults = defaults or [] parser = argparse.ArgumentParser( prog='ape ' + task.__name__, add_help=False, description=task.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter ) posargslen = len(args) - len(defaults) if varargs is None and keywords is None: for idx, arg in enumerate(args): if idx < posargslen: parser.add_argument(arg) else: default = defaults[idx - posargslen] parser.add_argument('--' + arg, default=default) return parser, False elif not args and varargs and not keywords and not defaults: return parser, True else: raise InvalidTask(ERRMSG_UNSUPPORTED_SIG % task.__name__)
[ "def", "get_task_parser", "(", "task", ")", ":", "args", ",", "varargs", ",", "keywords", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "task", ")", "defaults", "=", "defaults", "or", "[", "]", "parser", "=", "argparse", ".", "ArgumentParser", ...
Construct an ArgumentParser for the given task. This function returns a tuple (parser, proxy_args). If task accepts varargs only, proxy_args is True. If task accepts only positional and explicit keyword args, proxy args is False.
[ "Construct", "an", "ArgumentParser", "for", "the", "given", "task", "." ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/main.py#L21-L50
henzk/ape
ape/main.py
invoke_task
def invoke_task(task, args): """ Parse args and invoke task function. :param task: task function to invoke :param args: arguments to the task (list of str) :return: result of task function :rtype: object """ parser, proxy_args = get_task_parser(task) if proxy_args: return task(*args) else: pargs = parser.parse_args(args) return task(**vars(pargs))
python
def invoke_task(task, args): """ Parse args and invoke task function. :param task: task function to invoke :param args: arguments to the task (list of str) :return: result of task function :rtype: object """ parser, proxy_args = get_task_parser(task) if proxy_args: return task(*args) else: pargs = parser.parse_args(args) return task(**vars(pargs))
[ "def", "invoke_task", "(", "task", ",", "args", ")", ":", "parser", ",", "proxy_args", "=", "get_task_parser", "(", "task", ")", "if", "proxy_args", ":", "return", "task", "(", "*", "args", ")", "else", ":", "pargs", "=", "parser", ".", "parse_args", "...
Parse args and invoke task function. :param task: task function to invoke :param args: arguments to the task (list of str) :return: result of task function :rtype: object
[ "Parse", "args", "and", "invoke", "task", "function", "." ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/main.py#L53-L67
henzk/ape
ape/main.py
get_task_module
def get_task_module(feature): """ Return imported task module of feature. This function first tries to import the feature and raises FeatureNotFound if that is not possible. Thereafter, it looks for a submodules called ``apetasks`` and ``tasks`` in that order. If such a submodule exists, it is imported and returned. :param feature: name of feature to fet task module for. :raises: FeatureNotFound if feature_module could not be imported. :return: imported module containing the ape tasks of feature or None, if module cannot be imported. """ try: importlib.import_module(feature) except ImportError: raise FeatureNotFound(feature) tasks_module = None # ape tasks may be located in a module called apetasks # or (if no apetasks module exists) in a module called tasks try: tasks_module = importlib.import_module(feature + '.apetasks') except ImportError: # No apetasks module in feature ... try tasks pass try: tasks_module = importlib.import_module(feature + '.tasks') except ImportError: # No tasks module in feature ... skip it pass return tasks_module
python
def get_task_module(feature): """ Return imported task module of feature. This function first tries to import the feature and raises FeatureNotFound if that is not possible. Thereafter, it looks for a submodules called ``apetasks`` and ``tasks`` in that order. If such a submodule exists, it is imported and returned. :param feature: name of feature to fet task module for. :raises: FeatureNotFound if feature_module could not be imported. :return: imported module containing the ape tasks of feature or None, if module cannot be imported. """ try: importlib.import_module(feature) except ImportError: raise FeatureNotFound(feature) tasks_module = None # ape tasks may be located in a module called apetasks # or (if no apetasks module exists) in a module called tasks try: tasks_module = importlib.import_module(feature + '.apetasks') except ImportError: # No apetasks module in feature ... try tasks pass try: tasks_module = importlib.import_module(feature + '.tasks') except ImportError: # No tasks module in feature ... skip it pass return tasks_module
[ "def", "get_task_module", "(", "feature", ")", ":", "try", ":", "importlib", ".", "import_module", "(", "feature", ")", "except", "ImportError", ":", "raise", "FeatureNotFound", "(", "feature", ")", "tasks_module", "=", "None", "# ape tasks may be located in a modul...
Return imported task module of feature. This function first tries to import the feature and raises FeatureNotFound if that is not possible. Thereafter, it looks for a submodules called ``apetasks`` and ``tasks`` in that order. If such a submodule exists, it is imported and returned. :param feature: name of feature to fet task module for. :raises: FeatureNotFound if feature_module could not be imported. :return: imported module containing the ape tasks of feature or None, if module cannot be imported.
[ "Return", "imported", "task", "module", "of", "feature", "." ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/main.py#L70-L105
henzk/ape
ape/main.py
run
def run(args, features=None): """ Run an ape task. Composes task modules out of the selected features and calls the task with arguments. :param args: list comprised of task name followed by arguments :param features: list of features to compose before invoking the task """ features = features or [] for feature in features: tasks_module = get_task_module(feature) if tasks_module: tasks.superimpose(tasks_module) if len(args) < 2 or (len(args) == 2 and args[1] == 'help'): tasks.help() else: taskname = args[1] try: task = tasks.get_task(taskname, include_helpers=False) except TaskNotFound: print('Task "%s" not found! Use "ape help" to get usage information.' % taskname) else: remaining_args = args[2:] if len(args) > 2 else [] invoke_task(task, remaining_args)
python
def run(args, features=None): """ Run an ape task. Composes task modules out of the selected features and calls the task with arguments. :param args: list comprised of task name followed by arguments :param features: list of features to compose before invoking the task """ features = features or [] for feature in features: tasks_module = get_task_module(feature) if tasks_module: tasks.superimpose(tasks_module) if len(args) < 2 or (len(args) == 2 and args[1] == 'help'): tasks.help() else: taskname = args[1] try: task = tasks.get_task(taskname, include_helpers=False) except TaskNotFound: print('Task "%s" not found! Use "ape help" to get usage information.' % taskname) else: remaining_args = args[2:] if len(args) > 2 else [] invoke_task(task, remaining_args)
[ "def", "run", "(", "args", ",", "features", "=", "None", ")", ":", "features", "=", "features", "or", "[", "]", "for", "feature", "in", "features", ":", "tasks_module", "=", "get_task_module", "(", "feature", ")", "if", "tasks_module", ":", "tasks", ".",...
Run an ape task. Composes task modules out of the selected features and calls the task with arguments. :param args: list comprised of task name followed by arguments :param features: list of features to compose before invoking the task
[ "Run", "an", "ape", "task", "." ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/main.py#L108-L134
henzk/ape
ape/main.py
main
def main(): """ Entry point when used via command line. Features are given using the environment variable ``PRODUCT_EQUATION``. If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points to an existing equation file that selection is used. (if ``APE_PREPEND_FEATURES`` is given, those features are prepended) If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised. """ # check APE_PREPEND_FEATURES features = os.environ.get('APE_PREPEND_FEATURES', '').split() # features can be specified inline in PRODUCT_EQUATION inline_features = os.environ.get('PRODUCT_EQUATION', '').split() if inline_features: # append inline features features += inline_features else: # fallback: features are specified in equation file feature_file = os.environ.get('PRODUCT_EQUATION_FILENAME', '') if feature_file: # append features from equation file features += get_features_from_equation_file(feature_file) else: if not features: raise EnvironmentIncomplete( 'Error running ape:\n' 'Either the PRODUCT_EQUATION or ' 'PRODUCT_EQUATION_FILENAME environment ' 'variable needs to be set!' ) # run ape with features selected run(sys.argv, features=features)
python
def main(): """ Entry point when used via command line. Features are given using the environment variable ``PRODUCT_EQUATION``. If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points to an existing equation file that selection is used. (if ``APE_PREPEND_FEATURES`` is given, those features are prepended) If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised. """ # check APE_PREPEND_FEATURES features = os.environ.get('APE_PREPEND_FEATURES', '').split() # features can be specified inline in PRODUCT_EQUATION inline_features = os.environ.get('PRODUCT_EQUATION', '').split() if inline_features: # append inline features features += inline_features else: # fallback: features are specified in equation file feature_file = os.environ.get('PRODUCT_EQUATION_FILENAME', '') if feature_file: # append features from equation file features += get_features_from_equation_file(feature_file) else: if not features: raise EnvironmentIncomplete( 'Error running ape:\n' 'Either the PRODUCT_EQUATION or ' 'PRODUCT_EQUATION_FILENAME environment ' 'variable needs to be set!' ) # run ape with features selected run(sys.argv, features=features)
[ "def", "main", "(", ")", ":", "# check APE_PREPEND_FEATURES", "features", "=", "os", ".", "environ", ".", "get", "(", "'APE_PREPEND_FEATURES'", ",", "''", ")", ".", "split", "(", ")", "# features can be specified inline in PRODUCT_EQUATION", "inline_features", "=", ...
Entry point when used via command line. Features are given using the environment variable ``PRODUCT_EQUATION``. If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points to an existing equation file that selection is used. (if ``APE_PREPEND_FEATURES`` is given, those features are prepended) If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised.
[ "Entry", "point", "when", "used", "via", "command", "line", "." ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/main.py#L137-L172
hzdg/django-ecstatic
ecstatic/management/commands/eccollect.py
CollectNewMixin.collect
def collect(self): """ Perform the bulk of the work of collectstatic. Split off from handle_noargs() to facilitate testing. """ if self.symlink: if sys.platform == 'win32': raise CommandError("Symlinking is not supported by this " "platform (%s)." % sys.platform) if not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir('') handler = self._get_handler() do_post_process = self.post_process and hasattr(self.storage, 'post_process') found_files = SortedDict() for finder in finders.get_finders(): for path, storage in finder.list(self.ignore_patterns): # Prefix the relative path if the source storage contains it if getattr(storage, 'prefix', None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) if self.progressive_post_process and do_post_process: try: self._post_process( {prefixed_path: (storage, path)}, self.dry_run) except ValueError as e: message = ('%s current storage requires all files' ' to have been collected first. Try ' ' ecstatic.storage.CachedStaticFilesStorage' \ % e) raise ValueError(message) if not self.progressive_post_process and do_post_process: self._post_process(found_files, self.dry_run) return { 'modified': self.copied_files + self.symlinked_files, 'unmodified': self.unmodified_files, 'post_processed': self.post_processed_files, }
python
def collect(self): """ Perform the bulk of the work of collectstatic. Split off from handle_noargs() to facilitate testing. """ if self.symlink: if sys.platform == 'win32': raise CommandError("Symlinking is not supported by this " "platform (%s)." % sys.platform) if not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir('') handler = self._get_handler() do_post_process = self.post_process and hasattr(self.storage, 'post_process') found_files = SortedDict() for finder in finders.get_finders(): for path, storage in finder.list(self.ignore_patterns): # Prefix the relative path if the source storage contains it if getattr(storage, 'prefix', None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) if self.progressive_post_process and do_post_process: try: self._post_process( {prefixed_path: (storage, path)}, self.dry_run) except ValueError as e: message = ('%s current storage requires all files' ' to have been collected first. Try ' ' ecstatic.storage.CachedStaticFilesStorage' \ % e) raise ValueError(message) if not self.progressive_post_process and do_post_process: self._post_process(found_files, self.dry_run) return { 'modified': self.copied_files + self.symlinked_files, 'unmodified': self.unmodified_files, 'post_processed': self.post_processed_files, }
[ "def", "collect", "(", "self", ")", ":", "if", "self", ".", "symlink", ":", "if", "sys", ".", "platform", "==", "'win32'", ":", "raise", "CommandError", "(", "\"Symlinking is not supported by this \"", "\"platform (%s).\"", "%", "sys", ".", "platform", ")", "i...
Perform the bulk of the work of collectstatic. Split off from handle_noargs() to facilitate testing.
[ "Perform", "the", "bulk", "of", "the", "work", "of", "collectstatic", "." ]
train
https://github.com/hzdg/django-ecstatic/blob/e2b9bd57ae19938449315457b31130c8df831911/ecstatic/management/commands/eccollect.py#L43-L94
hzdg/django-ecstatic
ecstatic/management/commands/eccollect.py
CollectNewMixin.compare
def compare(self, path, prefixed_path, source_storage): """ Returns True if the file should be copied. """ # First try a method on the command named compare_<comparison_method> # If that doesn't exist, create a comparitor that calls methods on the # storage with the name <comparison_method>, passing them the name. comparitor = getattr(self, 'compare_%s' % self.comparison_method, None) if not comparitor: comparitor = self._create_comparitor(self.comparison_method) return comparitor(path, prefixed_path, source_storage)
python
def compare(self, path, prefixed_path, source_storage): """ Returns True if the file should be copied. """ # First try a method on the command named compare_<comparison_method> # If that doesn't exist, create a comparitor that calls methods on the # storage with the name <comparison_method>, passing them the name. comparitor = getattr(self, 'compare_%s' % self.comparison_method, None) if not comparitor: comparitor = self._create_comparitor(self.comparison_method) return comparitor(path, prefixed_path, source_storage)
[ "def", "compare", "(", "self", ",", "path", ",", "prefixed_path", ",", "source_storage", ")", ":", "# First try a method on the command named compare_<comparison_method>", "# If that doesn't exist, create a comparitor that calls methods on the", "# storage with the name <comparison_metho...
Returns True if the file should be copied.
[ "Returns", "True", "if", "the", "file", "should", "be", "copied", "." ]
train
https://github.com/hzdg/django-ecstatic/blob/e2b9bd57ae19938449315457b31130c8df831911/ecstatic/management/commands/eccollect.py#L124-L134
calve/prof
prof/session.py
initiate_session
def initiate_session(config): """ Initiate a session globally used in prof : + Retrive the cookie + Log to prof Returns an initiated session """ global baseurl baseurl = config['DEFAULT']['baseurl'] if 'session' in config['DEFAULT']: cookies = { 'PHPSESSID': config['DEFAULT']['session'] } prof_session.cookies = requests.utils.cookiejar_from_dict(cookies) try: valid = verify_session(prof_session, baseurl) if not valid: # Looks like this session is not valid anymore, try to get a new one get_session(prof_session, baseurl, config) return prof_session except: print("{baseurl} not reachable. Verify your connection".format(baseurl=baseurl)) exit(1)
python
def initiate_session(config): """ Initiate a session globally used in prof : + Retrive the cookie + Log to prof Returns an initiated session """ global baseurl baseurl = config['DEFAULT']['baseurl'] if 'session' in config['DEFAULT']: cookies = { 'PHPSESSID': config['DEFAULT']['session'] } prof_session.cookies = requests.utils.cookiejar_from_dict(cookies) try: valid = verify_session(prof_session, baseurl) if not valid: # Looks like this session is not valid anymore, try to get a new one get_session(prof_session, baseurl, config) return prof_session except: print("{baseurl} not reachable. Verify your connection".format(baseurl=baseurl)) exit(1)
[ "def", "initiate_session", "(", "config", ")", ":", "global", "baseurl", "baseurl", "=", "config", "[", "'DEFAULT'", "]", "[", "'baseurl'", "]", "if", "'session'", "in", "config", "[", "'DEFAULT'", "]", ":", "cookies", "=", "{", "'PHPSESSID'", ":", "config...
Initiate a session globally used in prof : + Retrive the cookie + Log to prof Returns an initiated session
[ "Initiate", "a", "session", "globally", "used", "in", "prof", ":", "+", "Retrive", "the", "cookie", "+", "Log", "to", "prof" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/session.py#L20-L43
calve/prof
prof/session.py
verify_session
def verify_session(session, baseurl): """ Check that this session is still valid on this baseurl, ie, we get a list of projects """ request = session.post(baseurl+"/select_projet.php") return VERIFY_SESSION_STRING in request.content.decode('iso-8859-1')
python
def verify_session(session, baseurl): """ Check that this session is still valid on this baseurl, ie, we get a list of projects """ request = session.post(baseurl+"/select_projet.php") return VERIFY_SESSION_STRING in request.content.decode('iso-8859-1')
[ "def", "verify_session", "(", "session", ",", "baseurl", ")", ":", "request", "=", "session", ".", "post", "(", "baseurl", "+", "\"/select_projet.php\"", ")", "return", "VERIFY_SESSION_STRING", "in", "request", ".", "content", ".", "decode", "(", "'iso-8859-1'",...
Check that this session is still valid on this baseurl, ie, we get a list of projects
[ "Check", "that", "this", "session", "is", "still", "valid", "on", "this", "baseurl", "ie", "we", "get", "a", "list", "of", "projects" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/session.py#L46-L51
calve/prof
prof/session.py
get_session
def get_session(session, baseurl, config): """ Try to get a valid session for this baseurl, using login found in config. This function invoques Firefox if necessary """ # Read proxy for firefox if environ.get("HTTP_PROXY"): myProxy = environ.get("HTTP_PROXY") proxy = Proxy({ 'proxyType': ProxyType.MANUAL, 'httpProxy': myProxy, 'ftpProxy': myProxy, 'sslProxy': myProxy, 'noProxy': '' # set this value as desired }) else: proxy = None if 'login' in config['DEFAULT']: login, password = credentials(config['DEFAULT']['login']) else: login, password = credentials() browser = webdriver.Firefox(proxy=proxy) browser.get(baseurl) browser.find_element_by_name('login').send_keys(login) browser.find_element_by_name('passwd').send_keys(password) cookie = {'PHPSESSID': browser.get_cookie('PHPSESSID')['value']} prof_session.cookies = requests.utils.cookiejar_from_dict(cookie) print("Please log using firefox") while True: try: browser.find_element_by_css_selector("select") break except: sleep(0.5) browser.close() set_sessid(cookie['PHPSESSID']) if not verify_session(session, baseurl): print("Cannot get a valid session, retry") get_session(session, baseurl, {'DEFAULT': {}})
python
def get_session(session, baseurl, config): """ Try to get a valid session for this baseurl, using login found in config. This function invoques Firefox if necessary """ # Read proxy for firefox if environ.get("HTTP_PROXY"): myProxy = environ.get("HTTP_PROXY") proxy = Proxy({ 'proxyType': ProxyType.MANUAL, 'httpProxy': myProxy, 'ftpProxy': myProxy, 'sslProxy': myProxy, 'noProxy': '' # set this value as desired }) else: proxy = None if 'login' in config['DEFAULT']: login, password = credentials(config['DEFAULT']['login']) else: login, password = credentials() browser = webdriver.Firefox(proxy=proxy) browser.get(baseurl) browser.find_element_by_name('login').send_keys(login) browser.find_element_by_name('passwd').send_keys(password) cookie = {'PHPSESSID': browser.get_cookie('PHPSESSID')['value']} prof_session.cookies = requests.utils.cookiejar_from_dict(cookie) print("Please log using firefox") while True: try: browser.find_element_by_css_selector("select") break except: sleep(0.5) browser.close() set_sessid(cookie['PHPSESSID']) if not verify_session(session, baseurl): print("Cannot get a valid session, retry") get_session(session, baseurl, {'DEFAULT': {}})
[ "def", "get_session", "(", "session", ",", "baseurl", ",", "config", ")", ":", "# Read proxy for firefox", "if", "environ", ".", "get", "(", "\"HTTP_PROXY\"", ")", ":", "myProxy", "=", "environ", ".", "get", "(", "\"HTTP_PROXY\"", ")", "proxy", "=", "Proxy",...
Try to get a valid session for this baseurl, using login found in config. This function invoques Firefox if necessary
[ "Try", "to", "get", "a", "valid", "session", "for", "this", "baseurl", "using", "login", "found", "in", "config", ".", "This", "function", "invoques", "Firefox", "if", "necessary" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/session.py#L54-L95
calve/prof
prof/session.py
credentials
def credentials(login=None): """ Find user credentials. We should have parsed the command line for a ``--login`` option. We will try to find credentials in environment variables. We will ask user if we cannot find any in arguments nor environment """ if not login: login = environ.get("PROF_LOGIN") password = environ.get("PROF_PASSWORD") if not login: try: login = input("login? ") print("\t\tDon't get prompted everytime. Store your login in the ``~/.profrc`` config file") except KeyboardInterrupt: exit(0) if not password: try: password = getpass.getpass("pass for {0} ? ".format(login)) except KeyboardInterrupt: exit(0) return (login, password)
python
def credentials(login=None): """ Find user credentials. We should have parsed the command line for a ``--login`` option. We will try to find credentials in environment variables. We will ask user if we cannot find any in arguments nor environment """ if not login: login = environ.get("PROF_LOGIN") password = environ.get("PROF_PASSWORD") if not login: try: login = input("login? ") print("\t\tDon't get prompted everytime. Store your login in the ``~/.profrc`` config file") except KeyboardInterrupt: exit(0) if not password: try: password = getpass.getpass("pass for {0} ? ".format(login)) except KeyboardInterrupt: exit(0) return (login, password)
[ "def", "credentials", "(", "login", "=", "None", ")", ":", "if", "not", "login", ":", "login", "=", "environ", ".", "get", "(", "\"PROF_LOGIN\"", ")", "password", "=", "environ", ".", "get", "(", "\"PROF_PASSWORD\"", ")", "if", "not", "login", ":", "tr...
Find user credentials. We should have parsed the command line for a ``--login`` option. We will try to find credentials in environment variables. We will ask user if we cannot find any in arguments nor environment
[ "Find", "user", "credentials", ".", "We", "should", "have", "parsed", "the", "command", "line", "for", "a", "--", "login", "option", ".", "We", "will", "try", "to", "find", "credentials", "in", "environment", "variables", ".", "We", "will", "ask", "user", ...
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/session.py#L98-L118
gr33ndata/dysl
dysl/dyslib/lm.py
LM.display
def display(self): ''' Displays statistics about our LM ''' voc_list = [] doc_ids = self.term_count_n.keys() doc_ids.sort() for doc_id in doc_ids: ngrams = len(self.term_count_n[doc_id]['ngrams']) print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams) ngrams1 = len(self.term_count_n_1[doc_id]['ngrams']) print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1) voc_list.append(ngrams) print 'Classed Vocabularies:', voc_list print '' corpus_ngrams = len(self.corpus_count_n['ngrams']) print 'n-Grams (collection): %d' % (corpus_ngrams) corpus_ngrams1 = len(self.corpus_count_n_1['ngrams']) print '(n-1)-Grams (collection): %d' % (corpus_ngrams1) self.unseen_counts.display()
python
def display(self): ''' Displays statistics about our LM ''' voc_list = [] doc_ids = self.term_count_n.keys() doc_ids.sort() for doc_id in doc_ids: ngrams = len(self.term_count_n[doc_id]['ngrams']) print 'n-Grams (doc %s): %d' % (str(doc_id), ngrams) ngrams1 = len(self.term_count_n_1[doc_id]['ngrams']) print '(n-1)-Grams (doc %s): %d' % (str(doc_id), ngrams1) voc_list.append(ngrams) print 'Classed Vocabularies:', voc_list print '' corpus_ngrams = len(self.corpus_count_n['ngrams']) print 'n-Grams (collection): %d' % (corpus_ngrams) corpus_ngrams1 = len(self.corpus_count_n_1['ngrams']) print '(n-1)-Grams (collection): %d' % (corpus_ngrams1) self.unseen_counts.display()
[ "def", "display", "(", "self", ")", ":", "voc_list", "=", "[", "]", "doc_ids", "=", "self", ".", "term_count_n", ".", "keys", "(", ")", "doc_ids", ".", "sort", "(", ")", "for", "doc_id", "in", "doc_ids", ":", "ngrams", "=", "len", "(", "self", ".",...
Displays statistics about our LM
[ "Displays", "statistics", "about", "our", "LM" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L133-L152
gr33ndata/dysl
dysl/dyslib/lm.py
LM.get_ngram_counts
def get_ngram_counts(self): ''' Returns a list of n-gram counts Array of classes counts and last item is for corpus ''' ngram_counts = { 'classes': [], 'corpus': 0 } doc_ids = self.term_count_n.keys() doc_ids.sort() for doc_id in doc_ids: print self.term_count_n[doc_id] class_ngrams = len(self.term_count_n[doc_id]['ngrams']) ngram_counts['classes'].append(class_ngrams) corpus_ngrams = len(self.corpus_count_n['ngrams']) ngram_counts['corpus'] = corpus_ngrams return ngram_counts
python
def get_ngram_counts(self): ''' Returns a list of n-gram counts Array of classes counts and last item is for corpus ''' ngram_counts = { 'classes': [], 'corpus': 0 } doc_ids = self.term_count_n.keys() doc_ids.sort() for doc_id in doc_ids: print self.term_count_n[doc_id] class_ngrams = len(self.term_count_n[doc_id]['ngrams']) ngram_counts['classes'].append(class_ngrams) corpus_ngrams = len(self.corpus_count_n['ngrams']) ngram_counts['corpus'] = corpus_ngrams return ngram_counts
[ "def", "get_ngram_counts", "(", "self", ")", ":", "ngram_counts", "=", "{", "'classes'", ":", "[", "]", ",", "'corpus'", ":", "0", "}", "doc_ids", "=", "self", ".", "term_count_n", ".", "keys", "(", ")", "doc_ids", ".", "sort", "(", ")", "for", "doc_...
Returns a list of n-gram counts Array of classes counts and last item is for corpus
[ "Returns", "a", "list", "of", "n", "-", "gram", "counts", "Array", "of", "classes", "counts", "and", "last", "item", "is", "for", "corpus" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L156-L172
gr33ndata/dysl
dysl/dyslib/lm.py
LM.to_ngrams
def to_ngrams(self, terms): ''' Converts terms to all possibe ngrams terms: list of terms ''' if len(terms) <= self.n: return terms if self.n == 1: n_grams = [[term] for term in terms] else: n_grams = [] for i in range(0,len(terms)-self.n+1): n_grams.append(terms[i:i+self.n]) return n_grams
python
def to_ngrams(self, terms): ''' Converts terms to all possibe ngrams terms: list of terms ''' if len(terms) <= self.n: return terms if self.n == 1: n_grams = [[term] for term in terms] else: n_grams = [] for i in range(0,len(terms)-self.n+1): n_grams.append(terms[i:i+self.n]) return n_grams
[ "def", "to_ngrams", "(", "self", ",", "terms", ")", ":", "if", "len", "(", "terms", ")", "<=", "self", ".", "n", ":", "return", "terms", "if", "self", ".", "n", "==", "1", ":", "n_grams", "=", "[", "[", "term", "]", "for", "term", "in", "terms"...
Converts terms to all possibe ngrams terms: list of terms
[ "Converts", "terms", "to", "all", "possibe", "ngrams", "terms", ":", "list", "of", "terms" ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L204-L216
gr33ndata/dysl
dysl/dyslib/lm.py
LM.lr_padding
def lr_padding(self, terms): ''' Pad doc from the left and right before adding, depending on what's in self.lpad and self.rpad If any of them is '', then don't pad there. ''' lpad = rpad = [] if self.lpad: lpad = [self.lpad] * (self.n - 1) if self.rpad: rpad = [self.rpad] * (self.n - 1) return lpad + terms + rpad
python
def lr_padding(self, terms): ''' Pad doc from the left and right before adding, depending on what's in self.lpad and self.rpad If any of them is '', then don't pad there. ''' lpad = rpad = [] if self.lpad: lpad = [self.lpad] * (self.n - 1) if self.rpad: rpad = [self.rpad] * (self.n - 1) return lpad + terms + rpad
[ "def", "lr_padding", "(", "self", ",", "terms", ")", ":", "lpad", "=", "rpad", "=", "[", "]", "if", "self", ".", "lpad", ":", "lpad", "=", "[", "self", ".", "lpad", "]", "*", "(", "self", ".", "n", "-", "1", ")", "if", "self", ".", "rpad", ...
Pad doc from the left and right before adding, depending on what's in self.lpad and self.rpad If any of them is '', then don't pad there.
[ "Pad", "doc", "from", "the", "left", "and", "right", "before", "adding", "depending", "on", "what", "s", "in", "self", ".", "lpad", "and", "self", ".", "rpad", "If", "any", "of", "them", "is", "then", "don", "t", "pad", "there", "." ]
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L218-L229
gr33ndata/dysl
dysl/dyslib/lm.py
LM.add_doc
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1): ''' Add new document to our Language Model (training phase) doc_id is used here, so we build seperate LF for each doc_id I.e. if you call it more than once with same doc_id, then all terms given via doc_terms will contribute to same LM doc_terms: list of words in document to be added doc_length: the length of the document, you can provide it yourself, otherwise, we use len(doc_terms) instead. ''' if doc_length == -1: self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms)) else: self.update_lengths(doc_id=doc_id, doc_length=int(doc_length)) for term in doc_terms: self.vocabulary.add(term) terms = self.lr_padding(doc_terms) ngrams = self.to_ngrams(terms) self.update_counts(doc_id, ngrams)
python
def add_doc(self, doc_id ='', doc_terms=[], doc_length=-1): ''' Add new document to our Language Model (training phase) doc_id is used here, so we build seperate LF for each doc_id I.e. if you call it more than once with same doc_id, then all terms given via doc_terms will contribute to same LM doc_terms: list of words in document to be added doc_length: the length of the document, you can provide it yourself, otherwise, we use len(doc_terms) instead. ''' if doc_length == -1: self.update_lengths(doc_id=doc_id, doc_length=len(doc_terms)) else: self.update_lengths(doc_id=doc_id, doc_length=int(doc_length)) for term in doc_terms: self.vocabulary.add(term) terms = self.lr_padding(doc_terms) ngrams = self.to_ngrams(terms) self.update_counts(doc_id, ngrams)
[ "def", "add_doc", "(", "self", ",", "doc_id", "=", "''", ",", "doc_terms", "=", "[", "]", ",", "doc_length", "=", "-", "1", ")", ":", "if", "doc_length", "==", "-", "1", ":", "self", ".", "update_lengths", "(", "doc_id", "=", "doc_id", ",", "doc_le...
Add new document to our Language Model (training phase) doc_id is used here, so we build seperate LF for each doc_id I.e. if you call it more than once with same doc_id, then all terms given via doc_terms will contribute to same LM doc_terms: list of words in document to be added doc_length: the length of the document, you can provide it yourself, otherwise, we use len(doc_terms) instead.
[ "Add", "new", "document", "to", "our", "Language", "Model", "(", "training", "phase", ")", "doc_id", "is", "used", "here", "so", "we", "build", "seperate", "LF", "for", "each", "doc_id", "I", ".", "e", ".", "if", "you", "call", "it", "more", "than", ...
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L290-L308
gr33ndata/dysl
dysl/dyslib/lm.py
LM.calculate
def calculate(self, doc_terms=[], actual_id='', doc_length=-1): ''' Given a set of terms, doc_terms We find the doc in training data (calc_id), whose LM is more likely to produce those terms Then return the data structure calculated back doc_length is passed to pr_ngram() and pr_doc() it is up to them to use it or not. normally, it should be ignored if doc_length == -1 calculated{ prob: calculated probability Pr(calc_id/doc_terms) calc_id: Document ID in training data. actual_id: Just returned back as passed to us. seen_unseen_count: Counts for terms seen/unseen in training data } ''' calculated = { 'prob': -1, 'calc_id': '', 'actual_id': actual_id, 'seen_unseen_count': (0,0), 'all_probs': [] } terms = self.lr_padding(doc_terms) ngrams = self.to_ngrams(terms) for doc_id in self.term_count_n: #print '\n', doc_id, ':' doc_pr = 0 new_doc = True seen_count = 0 unseen_count = 0 for ngram in ngrams: (ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram, new_doc=new_doc, log=True, logbase=2, doc_length=doc_length) doc_pr += ngram_pr new_doc = False if ngram_seen: seen_count += 1 else: unseen_count += 1 doc_pr += self.pr_doc(doc_id, doc_length=doc_length) if self.verbose: print doc_id, actual_id, doc_pr calculated['all_probs'].append(doc_pr) if calculated['prob'] == -1 or doc_pr < calculated['prob']: calculated['prob'] = doc_pr calculated['calc_id'] = doc_id calculated['seen_unseen_count'] = (seen_count, unseen_count) self.unseen_counts.per_doc(doc_id=calculated['actual_id'], seen_unseen=calculated['seen_unseen_count']) self.unseen_counts.per_cic(calculated_id=calculated['calc_id'], actual_id=calculated['actual_id'], seen_unseen=calculated['seen_unseen_count']) return calculated
python
def calculate(self, doc_terms=[], actual_id='', doc_length=-1): ''' Given a set of terms, doc_terms We find the doc in training data (calc_id), whose LM is more likely to produce those terms Then return the data structure calculated back doc_length is passed to pr_ngram() and pr_doc() it is up to them to use it or not. normally, it should be ignored if doc_length == -1 calculated{ prob: calculated probability Pr(calc_id/doc_terms) calc_id: Document ID in training data. actual_id: Just returned back as passed to us. seen_unseen_count: Counts for terms seen/unseen in training data } ''' calculated = { 'prob': -1, 'calc_id': '', 'actual_id': actual_id, 'seen_unseen_count': (0,0), 'all_probs': [] } terms = self.lr_padding(doc_terms) ngrams = self.to_ngrams(terms) for doc_id in self.term_count_n: #print '\n', doc_id, ':' doc_pr = 0 new_doc = True seen_count = 0 unseen_count = 0 for ngram in ngrams: (ngram_pr, ngram_seen) = self.pr_ngram(doc_id, ngram, new_doc=new_doc, log=True, logbase=2, doc_length=doc_length) doc_pr += ngram_pr new_doc = False if ngram_seen: seen_count += 1 else: unseen_count += 1 doc_pr += self.pr_doc(doc_id, doc_length=doc_length) if self.verbose: print doc_id, actual_id, doc_pr calculated['all_probs'].append(doc_pr) if calculated['prob'] == -1 or doc_pr < calculated['prob']: calculated['prob'] = doc_pr calculated['calc_id'] = doc_id calculated['seen_unseen_count'] = (seen_count, unseen_count) self.unseen_counts.per_doc(doc_id=calculated['actual_id'], seen_unseen=calculated['seen_unseen_count']) self.unseen_counts.per_cic(calculated_id=calculated['calc_id'], actual_id=calculated['actual_id'], seen_unseen=calculated['seen_unseen_count']) return calculated
[ "def", "calculate", "(", "self", ",", "doc_terms", "=", "[", "]", ",", "actual_id", "=", "''", ",", "doc_length", "=", "-", "1", ")", ":", "calculated", "=", "{", "'prob'", ":", "-", "1", ",", "'calc_id'", ":", "''", ",", "'actual_id'", ":", "actua...
Given a set of terms, doc_terms We find the doc in training data (calc_id), whose LM is more likely to produce those terms Then return the data structure calculated back doc_length is passed to pr_ngram() and pr_doc() it is up to them to use it or not. normally, it should be ignored if doc_length == -1 calculated{ prob: calculated probability Pr(calc_id/doc_terms) calc_id: Document ID in training data. actual_id: Just returned back as passed to us. seen_unseen_count: Counts for terms seen/unseen in training data }
[ "Given", "a", "set", "of", "terms", "doc_terms", "We", "find", "the", "doc", "in", "training", "data", "(", "calc_id", ")", "whose", "LM", "is", "more", "likely", "to", "produce", "those", "terms", "Then", "return", "the", "data", "structure", "calculated"...
train
https://github.com/gr33ndata/dysl/blob/649c1d6a1761f47d49a9842e7389f6df52039155/dysl/dyslib/lm.py#L413-L466
jamesabel/pressenter2exit
pressenter2exit/pressenter2exitgui.py
PressEnter2ExitGUI.run
def run(self): """ pop up a dialog box and return when the user has closed it """ response = None root = tkinter.Tk() root.withdraw() while response is not True: response = tkinter.messagebox.askokcancel(title=self.title, message=self.pre_message) if self.post_message: print(self.post_message) self.exit_time = time.time()
python
def run(self): """ pop up a dialog box and return when the user has closed it """ response = None root = tkinter.Tk() root.withdraw() while response is not True: response = tkinter.messagebox.askokcancel(title=self.title, message=self.pre_message) if self.post_message: print(self.post_message) self.exit_time = time.time()
[ "def", "run", "(", "self", ")", ":", "response", "=", "None", "root", "=", "tkinter", ".", "Tk", "(", ")", "root", ".", "withdraw", "(", ")", "while", "response", "is", "not", "True", ":", "response", "=", "tkinter", ".", "messagebox", ".", "askokcan...
pop up a dialog box and return when the user has closed it
[ "pop", "up", "a", "dialog", "box", "and", "return", "when", "the", "user", "has", "closed", "it" ]
train
https://github.com/jamesabel/pressenter2exit/blob/bea51dd0dfb2cb61fe9a0e61f0ccb799d9e16dd9/pressenter2exit/pressenter2exitgui.py#L29-L40
MatthewScholefield/petact
petact/petact.py
download
def download(url, file=None): """ Pass file as a filename, open file object, or None to return the request bytes Args: url (str): URL of file to download file (Union[str, io, None]): One of the following: - Filename of output file - File opened in binary write mode - None: Return raw bytes instead Returns: Union[bytes, None]: Bytes of file if file is None """ import urllib.request import shutil if isinstance(file, str): file = open(file, 'wb') try: with urllib.request.urlopen(url) as response: if file: shutil.copyfileobj(response, file) else: return response.read() finally: if file: file.close()
python
def download(url, file=None): """ Pass file as a filename, open file object, or None to return the request bytes Args: url (str): URL of file to download file (Union[str, io, None]): One of the following: - Filename of output file - File opened in binary write mode - None: Return raw bytes instead Returns: Union[bytes, None]: Bytes of file if file is None """ import urllib.request import shutil if isinstance(file, str): file = open(file, 'wb') try: with urllib.request.urlopen(url) as response: if file: shutil.copyfileobj(response, file) else: return response.read() finally: if file: file.close()
[ "def", "download", "(", "url", ",", "file", "=", "None", ")", ":", "import", "urllib", ".", "request", "import", "shutil", "if", "isinstance", "(", "file", ",", "str", ")", ":", "file", "=", "open", "(", "file", ",", "'wb'", ")", "try", ":", "with"...
Pass file as a filename, open file object, or None to return the request bytes Args: url (str): URL of file to download file (Union[str, io, None]): One of the following: - Filename of output file - File opened in binary write mode - None: Return raw bytes instead Returns: Union[bytes, None]: Bytes of file if file is None
[ "Pass", "file", "as", "a", "filename", "open", "file", "object", "or", "None", "to", "return", "the", "request", "bytes" ]
train
https://github.com/MatthewScholefield/petact/blob/76988a4e38c451b2e39c1efc217c46e66c83fe94/petact/petact.py#L32-L58
MatthewScholefield/petact
petact/petact.py
download_extract_tar
def download_extract_tar(tar_url, folder, tar_filename=''): """ Download and extract the tar at the url to the given folder Args: tar_url (str): URL of tar file to download folder (str): Location of parent directory to extract to. Doesn't have to exist tar_filename (str): Location to download tar. Default is to a temp file """ try: makedirs(folder) except OSError: if not isdir(folder): raise data_file = tar_filename if not data_file: fd, data_file = mkstemp('.tar.gz') download(tar_url, os.fdopen(fd, 'wb')) else: download(tar_url, data_file) with tarfile.open(data_file) as tar: tar.extractall(path=folder)
python
def download_extract_tar(tar_url, folder, tar_filename=''): """ Download and extract the tar at the url to the given folder Args: tar_url (str): URL of tar file to download folder (str): Location of parent directory to extract to. Doesn't have to exist tar_filename (str): Location to download tar. Default is to a temp file """ try: makedirs(folder) except OSError: if not isdir(folder): raise data_file = tar_filename if not data_file: fd, data_file = mkstemp('.tar.gz') download(tar_url, os.fdopen(fd, 'wb')) else: download(tar_url, data_file) with tarfile.open(data_file) as tar: tar.extractall(path=folder)
[ "def", "download_extract_tar", "(", "tar_url", ",", "folder", ",", "tar_filename", "=", "''", ")", ":", "try", ":", "makedirs", "(", "folder", ")", "except", "OSError", ":", "if", "not", "isdir", "(", "folder", ")", ":", "raise", "data_file", "=", "tar_f...
Download and extract the tar at the url to the given folder Args: tar_url (str): URL of tar file to download folder (str): Location of parent directory to extract to. Doesn't have to exist tar_filename (str): Location to download tar. Default is to a temp file
[ "Download", "and", "extract", "the", "tar", "at", "the", "url", "to", "the", "given", "folder" ]
train
https://github.com/MatthewScholefield/petact/blob/76988a4e38c451b2e39c1efc217c46e66c83fe94/petact/petact.py#L61-L83
MatthewScholefield/petact
petact/petact.py
install_package
def install_package(tar_url, folder, md5_url='{tar_url}.md5', on_download=lambda: None, on_complete=lambda: None): """ Install or update a tar package that has an md5 Args: tar_url (str): URL of package to download folder (str): Location to extract tar. Will be created if doesn't exist md5_url (str): URL of md5 to use to check for updates on_download (Callable): Function that gets called when downloading a new update on_complete (Callable): Function that gets called when a new download is complete Returns: bool: Whether the package was updated """ data_file = join(folder, basename(tar_url)) md5_url = md5_url.format(tar_url=tar_url) try: remote_md5 = download(md5_url).decode('utf-8').split(' ')[0] except (UnicodeDecodeError, URLError): raise ValueError('Invalid MD5 url: ' + md5_url) if remote_md5 != calc_md5(data_file): on_download() if isfile(data_file): try: with tarfile.open(data_file) as tar: for i in reversed(list(tar)): try: os.remove(join(folder, i.path)) except OSError: pass except (OSError, EOFError): pass download_extract_tar(tar_url, folder, data_file) on_complete() if remote_md5 != calc_md5(data_file): raise ValueError('MD5 url does not match tar: ' + md5_url) return True return False
python
def install_package(tar_url, folder, md5_url='{tar_url}.md5', on_download=lambda: None, on_complete=lambda: None): """ Install or update a tar package that has an md5 Args: tar_url (str): URL of package to download folder (str): Location to extract tar. Will be created if doesn't exist md5_url (str): URL of md5 to use to check for updates on_download (Callable): Function that gets called when downloading a new update on_complete (Callable): Function that gets called when a new download is complete Returns: bool: Whether the package was updated """ data_file = join(folder, basename(tar_url)) md5_url = md5_url.format(tar_url=tar_url) try: remote_md5 = download(md5_url).decode('utf-8').split(' ')[0] except (UnicodeDecodeError, URLError): raise ValueError('Invalid MD5 url: ' + md5_url) if remote_md5 != calc_md5(data_file): on_download() if isfile(data_file): try: with tarfile.open(data_file) as tar: for i in reversed(list(tar)): try: os.remove(join(folder, i.path)) except OSError: pass except (OSError, EOFError): pass download_extract_tar(tar_url, folder, data_file) on_complete() if remote_md5 != calc_md5(data_file): raise ValueError('MD5 url does not match tar: ' + md5_url) return True return False
[ "def", "install_package", "(", "tar_url", ",", "folder", ",", "md5_url", "=", "'{tar_url}.md5'", ",", "on_download", "=", "lambda", ":", "None", ",", "on_complete", "=", "lambda", ":", "None", ")", ":", "data_file", "=", "join", "(", "folder", ",", "basena...
Install or update a tar package that has an md5 Args: tar_url (str): URL of package to download folder (str): Location to extract tar. Will be created if doesn't exist md5_url (str): URL of md5 to use to check for updates on_download (Callable): Function that gets called when downloading a new update on_complete (Callable): Function that gets called when a new download is complete Returns: bool: Whether the package was updated
[ "Install", "or", "update", "a", "tar", "package", "that", "has", "an", "md5" ]
train
https://github.com/MatthewScholefield/petact/blob/76988a4e38c451b2e39c1efc217c46e66c83fe94/petact/petact.py#L86-L126
randomdude999/rule_n
rule_n.py
_process_cell
def _process_cell(i, state, finite=False): """Process 3 cells and return a value from 0 to 7. """ op_1 = state[i - 1] op_2 = state[i] if i == len(state) - 1: if finite: op_3 = state[0] else: op_3 = 0 else: op_3 = state[i + 1] result = 0 for i, val in enumerate([op_3, op_2, op_1]): if val: result += 2**i return result
python
def _process_cell(i, state, finite=False): """Process 3 cells and return a value from 0 to 7. """ op_1 = state[i - 1] op_2 = state[i] if i == len(state) - 1: if finite: op_3 = state[0] else: op_3 = 0 else: op_3 = state[i + 1] result = 0 for i, val in enumerate([op_3, op_2, op_1]): if val: result += 2**i return result
[ "def", "_process_cell", "(", "i", ",", "state", ",", "finite", "=", "False", ")", ":", "op_1", "=", "state", "[", "i", "-", "1", "]", "op_2", "=", "state", "[", "i", "]", "if", "i", "==", "len", "(", "state", ")", "-", "1", ":", "if", "finite...
Process 3 cells and return a value from 0 to 7.
[ "Process", "3", "cells", "and", "return", "a", "value", "from", "0", "to", "7", "." ]
train
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L73-L88
randomdude999/rule_n
rule_n.py
_remove_lead_trail_false
def _remove_lead_trail_false(bool_list): """Remove leading and trailing false's from a list""" # The internet can be a wonderful place... for i in (0, -1): while bool_list and not bool_list[i]: bool_list.pop(i) return bool_list
python
def _remove_lead_trail_false(bool_list): """Remove leading and trailing false's from a list""" # The internet can be a wonderful place... for i in (0, -1): while bool_list and not bool_list[i]: bool_list.pop(i) return bool_list
[ "def", "_remove_lead_trail_false", "(", "bool_list", ")", ":", "# The internet can be a wonderful place...", "for", "i", "in", "(", "0", ",", "-", "1", ")", ":", "while", "bool_list", "and", "not", "bool_list", "[", "i", "]", ":", "bool_list", ".", "pop", "(...
Remove leading and trailing false's from a list
[ "Remove", "leading", "and", "trailing", "false", "s", "from", "a", "list" ]
train
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L91-L97
randomdude999/rule_n
rule_n.py
_crop_list_to_size
def _crop_list_to_size(l, size): """Make a list a certain size""" for x in range(size - len(l)): l.append(False) for x in range(len(l) - size): l.pop() return l
python
def _crop_list_to_size(l, size): """Make a list a certain size""" for x in range(size - len(l)): l.append(False) for x in range(len(l) - size): l.pop() return l
[ "def", "_crop_list_to_size", "(", "l", ",", "size", ")", ":", "for", "x", "in", "range", "(", "size", "-", "len", "(", "l", ")", ")", ":", "l", ".", "append", "(", "False", ")", "for", "x", "in", "range", "(", "len", "(", "l", ")", "-", "size...
Make a list a certain size
[ "Make", "a", "list", "a", "certain", "size" ]
train
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L100-L106
randomdude999/rule_n
rule_n.py
RuleN.process
def process(self, state): """Process a state and return the next state Usage: out = rule_110.process([True, False, True]) len(out) # 5, because a False is added to either side out == [True, True, True, True, False] out = rule_110.process([False, True, False, True]) len(out) # still 5, because leading / trailing False's are removed out2 = rule_110.process([1, 0, 1]) # Any data type in the list is okay, as # long as it's boolean value is correct out == out2 """ if not isinstance(state, list): raise TypeError("state must be list") if self.finite_canvas: state = _crop_list_to_size(state, self.canvas_size) else: state = _remove_lead_trail_false(state) state.insert(0, self.default_val) state.append(self.default_val) new_state = [] for i in range(0, len(state)): result = _process_cell(i, state, finite=self.finite_canvas) new_state.append(self.rules[result]) return new_state
python
def process(self, state): """Process a state and return the next state Usage: out = rule_110.process([True, False, True]) len(out) # 5, because a False is added to either side out == [True, True, True, True, False] out = rule_110.process([False, True, False, True]) len(out) # still 5, because leading / trailing False's are removed out2 = rule_110.process([1, 0, 1]) # Any data type in the list is okay, as # long as it's boolean value is correct out == out2 """ if not isinstance(state, list): raise TypeError("state must be list") if self.finite_canvas: state = _crop_list_to_size(state, self.canvas_size) else: state = _remove_lead_trail_false(state) state.insert(0, self.default_val) state.append(self.default_val) new_state = [] for i in range(0, len(state)): result = _process_cell(i, state, finite=self.finite_canvas) new_state.append(self.rules[result]) return new_state
[ "def", "process", "(", "self", ",", "state", ")", ":", "if", "not", "isinstance", "(", "state", ",", "list", ")", ":", "raise", "TypeError", "(", "\"state must be list\"", ")", "if", "self", ".", "finite_canvas", ":", "state", "=", "_crop_list_to_size", "(...
Process a state and return the next state Usage: out = rule_110.process([True, False, True]) len(out) # 5, because a False is added to either side out == [True, True, True, True, False] out = rule_110.process([False, True, False, True]) len(out) # still 5, because leading / trailing False's are removed out2 = rule_110.process([1, 0, 1]) # Any data type in the list is okay, as # long as it's boolean value is correct out == out2
[ "Process", "a", "state", "and", "return", "the", "next", "state", "Usage", ":" ]
train
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L206-L231
randomdude999/rule_n
rule_n.py
RuleN.iterate
def iterate(self, state): """Process a starting state over and over again. Example: for x in rule_110.iterate(state): # Do something with the current state here # Note: You should break this yourself # This breaks automatically if the previous state was the same as the # current one, but that's not gonna happen on an infinite canvas """ cur_state = state old_state = cur_state while True: cur_state = self.process(cur_state) if old_state == cur_state: break old_state = cur_state yield cur_state
python
def iterate(self, state): """Process a starting state over and over again. Example: for x in rule_110.iterate(state): # Do something with the current state here # Note: You should break this yourself # This breaks automatically if the previous state was the same as the # current one, but that's not gonna happen on an infinite canvas """ cur_state = state old_state = cur_state while True: cur_state = self.process(cur_state) if old_state == cur_state: break old_state = cur_state yield cur_state
[ "def", "iterate", "(", "self", ",", "state", ")", ":", "cur_state", "=", "state", "old_state", "=", "cur_state", "while", "True", ":", "cur_state", "=", "self", ".", "process", "(", "cur_state", ")", "if", "old_state", "==", "cur_state", ":", "break", "o...
Process a starting state over and over again. Example: for x in rule_110.iterate(state): # Do something with the current state here # Note: You should break this yourself # This breaks automatically if the previous state was the same as the # current one, but that's not gonna happen on an infinite canvas
[ "Process", "a", "starting", "state", "over", "and", "over", "again", ".", "Example", ":" ]
train
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L233-L249
bioidiap/gridtk
gridtk/local.py
JobManagerLocal.submit
def submit(self, command_line, name = None, array = None, dependencies = [], exec_dir = None, log_dir = None, dry_run = False, stop_on_failure = False, **kwargs): """Submits a job that will be executed on the local machine during a call to "run". All kwargs will simply be ignored.""" # remove duplicate dependencies dependencies = sorted(list(set(dependencies))) # add job to database self.lock() job = add_job(self.session, command_line=command_line, name=name, dependencies=dependencies, array=array, exec_dir=exec_dir, log_dir=log_dir, stop_on_failure=stop_on_failure) logger.info("Added job '%s' to the database", job) if dry_run: print("Would have added the Job", job, "to the database to be executed locally.") self.session.delete(job) logger.info("Deleted job '%s' from the database due to dry-run option", job) job_id = None else: job_id = job.unique # return the new job id self.unlock() return job_id
python
def submit(self, command_line, name = None, array = None, dependencies = [], exec_dir = None, log_dir = None, dry_run = False, stop_on_failure = False, **kwargs): """Submits a job that will be executed on the local machine during a call to "run". All kwargs will simply be ignored.""" # remove duplicate dependencies dependencies = sorted(list(set(dependencies))) # add job to database self.lock() job = add_job(self.session, command_line=command_line, name=name, dependencies=dependencies, array=array, exec_dir=exec_dir, log_dir=log_dir, stop_on_failure=stop_on_failure) logger.info("Added job '%s' to the database", job) if dry_run: print("Would have added the Job", job, "to the database to be executed locally.") self.session.delete(job) logger.info("Deleted job '%s' from the database due to dry-run option", job) job_id = None else: job_id = job.unique # return the new job id self.unlock() return job_id
[ "def", "submit", "(", "self", ",", "command_line", ",", "name", "=", "None", ",", "array", "=", "None", ",", "dependencies", "=", "[", "]", ",", "exec_dir", "=", "None", ",", "log_dir", "=", "None", ",", "dry_run", "=", "False", ",", "stop_on_failure",...
Submits a job that will be executed on the local machine during a call to "run". All kwargs will simply be ignored.
[ "Submits", "a", "job", "that", "will", "be", "executed", "on", "the", "local", "machine", "during", "a", "call", "to", "run", ".", "All", "kwargs", "will", "simply", "be", "ignored", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/local.py#L41-L62
bioidiap/gridtk
gridtk/local.py
JobManagerLocal.resubmit
def resubmit(self, job_ids = None, also_success = False, running_jobs = False, new_command=None, keep_logs=False, **kwargs): """Re-submit jobs automatically""" self.lock() # iterate over all jobs jobs = self.get_jobs(job_ids) if new_command is not None: if len(jobs) == 1: jobs[0].set_command_line(new_command) else: logger.warn("Ignoring new command since no single job id was specified") accepted_old_status = ('submitted', 'success', 'failure') if also_success else ('submitted', 'failure',) for job in jobs: # check if this job needs re-submission if running_jobs or job.status in accepted_old_status: if job.queue_name != 'local' and job.status == 'executing': logger.error("Cannot re-submit job '%s' locally since it is still running in the grid. Use 'jman stop' to stop it\'s execution!", job) else: # re-submit job to the grid logger.info("Re-submitted job '%s' to the database", job) if not keep_logs: self.delete_logs(job) job.submit('local') self.session.commit() self.unlock()
python
def resubmit(self, job_ids = None, also_success = False, running_jobs = False, new_command=None, keep_logs=False, **kwargs): """Re-submit jobs automatically""" self.lock() # iterate over all jobs jobs = self.get_jobs(job_ids) if new_command is not None: if len(jobs) == 1: jobs[0].set_command_line(new_command) else: logger.warn("Ignoring new command since no single job id was specified") accepted_old_status = ('submitted', 'success', 'failure') if also_success else ('submitted', 'failure',) for job in jobs: # check if this job needs re-submission if running_jobs or job.status in accepted_old_status: if job.queue_name != 'local' and job.status == 'executing': logger.error("Cannot re-submit job '%s' locally since it is still running in the grid. Use 'jman stop' to stop it\'s execution!", job) else: # re-submit job to the grid logger.info("Re-submitted job '%s' to the database", job) if not keep_logs: self.delete_logs(job) job.submit('local') self.session.commit() self.unlock()
[ "def", "resubmit", "(", "self", ",", "job_ids", "=", "None", ",", "also_success", "=", "False", ",", "running_jobs", "=", "False", ",", "new_command", "=", "None", ",", "keep_logs", "=", "False", ",", "*", "*", "kwargs", ")", ":", "self", ".", "lock", ...
Re-submit jobs automatically
[ "Re", "-", "submit", "jobs", "automatically" ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/local.py#L65-L89
bioidiap/gridtk
gridtk/local.py
JobManagerLocal.stop_jobs
def stop_jobs(self, job_ids=None): """Resets the status of the job to 'submitted' when they are labeled as 'executing'.""" self.lock() jobs = self.get_jobs(job_ids) for job in jobs: if job.status in ('executing', 'queued', 'waiting') and job.queue_name == 'local': logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.submit() self.session.commit() self.unlock()
python
def stop_jobs(self, job_ids=None): """Resets the status of the job to 'submitted' when they are labeled as 'executing'.""" self.lock() jobs = self.get_jobs(job_ids) for job in jobs: if job.status in ('executing', 'queued', 'waiting') and job.queue_name == 'local': logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.submit() self.session.commit() self.unlock()
[ "def", "stop_jobs", "(", "self", ",", "job_ids", "=", "None", ")", ":", "self", ".", "lock", "(", ")", "jobs", "=", "self", ".", "get_jobs", "(", "job_ids", ")", "for", "job", "in", "jobs", ":", "if", "job", ".", "status", "in", "(", "'executing'",...
Resets the status of the job to 'submitted' when they are labeled as 'executing'.
[ "Resets", "the", "status", "of", "the", "job", "to", "submitted", "when", "they", "are", "labeled", "as", "executing", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/local.py#L92-L103
bioidiap/gridtk
gridtk/local.py
JobManagerLocal.stop_job
def stop_job(self, job_id, array_id = None): """Resets the status of the given to 'submitted' when they are labeled as 'executing'.""" self.lock() job, array_job = self._job_and_array(job_id, array_id) if job is not None: if job.status in ('executing', 'queued', 'waiting'): logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.status = 'submitted' if array_job is not None and array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' if array_job is None: for array_job in job.array: if array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' self.session.commit() self.unlock()
python
def stop_job(self, job_id, array_id = None): """Resets the status of the given to 'submitted' when they are labeled as 'executing'.""" self.lock() job, array_job = self._job_and_array(job_id, array_id) if job is not None: if job.status in ('executing', 'queued', 'waiting'): logger.info("Reset job '%s' (%s) in the database", job.name, self._format_log(job.id)) job.status = 'submitted' if array_job is not None and array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' if array_job is None: for array_job in job.array: if array_job.status in ('executing', 'queued', 'waiting'): logger.debug("Reset array job '%s' in the database", array_job) array_job.status = 'submitted' self.session.commit() self.unlock()
[ "def", "stop_job", "(", "self", ",", "job_id", ",", "array_id", "=", "None", ")", ":", "self", ".", "lock", "(", ")", "job", ",", "array_job", "=", "self", ".", "_job_and_array", "(", "job_id", ",", "array_id", ")", "if", "job", "is", "not", "None", ...
Resets the status of the given to 'submitted' when they are labeled as 'executing'.
[ "Resets", "the", "status", "of", "the", "given", "to", "submitted", "when", "they", "are", "labeled", "as", "executing", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/local.py#L105-L125
bioidiap/gridtk
gridtk/local.py
JobManagerLocal._run_parallel_job
def _run_parallel_job(self, job_id, array_id = None, no_log = False, nice = None, verbosity = 0): """Executes the code for this job on the local machine.""" environ = copy.deepcopy(os.environ) environ['JOB_ID'] = str(job_id) if array_id: environ['SGE_TASK_ID'] = str(array_id) else: environ['SGE_TASK_ID'] = 'undefined' # generate call to the wrapper script command = [self.wrapper_script, '-l%sd'%("v"*verbosity), self._database, 'run-job'] if nice is not None: command = ['nice', '-n%d'%nice] + command job, array_job = self._job_and_array(job_id, array_id) if job is None: # rare case: job was deleted before starting return None logger.info("Starting execution of Job '%s' (%s)", job.name, self._format_log(job_id, array_id, len(job.array))) # create log files if no_log or job.log_dir is None: out, err = sys.stdout, sys.stderr else: makedirs_safe(job.log_dir) # create line-buffered files for writing output and error status if array_job is not None: out, err = open(array_job.std_out_file(), 'w', 1), open(array_job.std_err_file(), 'w', 1) else: out, err = open(job.std_out_file(), 'w', 1), open(job.std_err_file(), 'w', 1) # return the subprocess pipe to the process try: return subprocess.Popen(command, env=environ, stdout=out, stderr=err, bufsize=1) except OSError as e: logger.error("Could not execute job '%s' (%s) locally\n- reason:\t%s\n- command line:\t%s\n- directory:\t%s\n- command:\t%s", job.name, self._format_log(job_id, array_id, len(job.array)), e, " ".join(job.get_command_line()), "." if job.exec_dir is None else job.exec_dir, " ".join(command)) job.finish(117, array_id) # ASCII 'O' return None
python
def _run_parallel_job(self, job_id, array_id = None, no_log = False, nice = None, verbosity = 0): """Executes the code for this job on the local machine.""" environ = copy.deepcopy(os.environ) environ['JOB_ID'] = str(job_id) if array_id: environ['SGE_TASK_ID'] = str(array_id) else: environ['SGE_TASK_ID'] = 'undefined' # generate call to the wrapper script command = [self.wrapper_script, '-l%sd'%("v"*verbosity), self._database, 'run-job'] if nice is not None: command = ['nice', '-n%d'%nice] + command job, array_job = self._job_and_array(job_id, array_id) if job is None: # rare case: job was deleted before starting return None logger.info("Starting execution of Job '%s' (%s)", job.name, self._format_log(job_id, array_id, len(job.array))) # create log files if no_log or job.log_dir is None: out, err = sys.stdout, sys.stderr else: makedirs_safe(job.log_dir) # create line-buffered files for writing output and error status if array_job is not None: out, err = open(array_job.std_out_file(), 'w', 1), open(array_job.std_err_file(), 'w', 1) else: out, err = open(job.std_out_file(), 'w', 1), open(job.std_err_file(), 'w', 1) # return the subprocess pipe to the process try: return subprocess.Popen(command, env=environ, stdout=out, stderr=err, bufsize=1) except OSError as e: logger.error("Could not execute job '%s' (%s) locally\n- reason:\t%s\n- command line:\t%s\n- directory:\t%s\n- command:\t%s", job.name, self._format_log(job_id, array_id, len(job.array)), e, " ".join(job.get_command_line()), "." if job.exec_dir is None else job.exec_dir, " ".join(command)) job.finish(117, array_id) # ASCII 'O' return None
[ "def", "_run_parallel_job", "(", "self", ",", "job_id", ",", "array_id", "=", "None", ",", "no_log", "=", "False", ",", "nice", "=", "None", ",", "verbosity", "=", "0", ")", ":", "environ", "=", "copy", ".", "deepcopy", "(", "os", ".", "environ", ")"...
Executes the code for this job on the local machine.
[ "Executes", "the", "code", "for", "this", "job", "on", "the", "local", "machine", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/local.py#L131-L169
bioidiap/gridtk
gridtk/local.py
JobManagerLocal.run_scheduler
def run_scheduler(self, parallel_jobs = 1, job_ids = None, sleep_time = 0.1, die_when_finished = False, no_log = False, nice = None, verbosity = 0): """Starts the scheduler, which is constantly checking for jobs that should be ran.""" running_tasks = [] finished_tasks = set() try: # keep the scheduler alive until every job is finished or the KeyboardInterrupt is caught while True: # Flag that might be set in some rare cases, and that prevents the scheduler to die repeat_execution = False # FIRST, try if there are finished processes for task_index in range(len(running_tasks)-1, -1, -1): task = running_tasks[task_index] process = task[0] if process.poll() is not None: # process ended job_id = task[1] array_id = task[2] if len(task) > 2 else None self.lock() job, array_job = self._job_and_array(job_id, array_id) if job is not None: jj = array_job if array_job is not None else job result = "%s (%d)" % (jj.status, jj.result) if jj.result is not None else "%s (?)" % jj.status if jj.status not in ('success', 'failure'): logger.error("Job '%s' (%s) finished with status '%s' instead of 'success' or 'failure'. Usually this means an internal error. Check your wrapper_script parameter!", job.name, self._format_log(job_id, array_id), jj.status) raise StopIteration("Job did not finish correctly.") logger.info("Job '%s' (%s) finished execution with result '%s'", job.name, self._format_log(job_id, array_id), result) self.unlock() finished_tasks.add(job_id) # in any case, remove the job from the list del running_tasks[task_index] # SECOND, check if new jobs can be submitted; THIS NEEDS TO LOCK THE DATABASE if len(running_tasks) < parallel_jobs: # get all unfinished jobs: self.lock() jobs = self.get_jobs(job_ids) # put all new jobs into the queue for job in jobs: if job.status == 'submitted' and job.queue_name == 'local': job.queue() # get all unfinished jobs that are submitted to the local queue unfinished_jobs = [job for job in jobs if job.status in ('queued', 'executing') and job.queue_name == 'local'] for job in unfinished_jobs: if job.array: # find array jobs that can run queued_array_jobs = [array_job for array_job in job.array if array_job.status == 'queued'] if not len(queued_array_jobs): job.finish(0, -1) repeat_execution = True else: # there are new array jobs to run for i in range(min(parallel_jobs - len(running_tasks), len(queued_array_jobs))): array_job = queued_array_jobs[i] # start a new job from the array process = self._run_parallel_job(job.unique, array_job.id, no_log=no_log, nice=nice, verbosity=verbosity) if process is None: continue running_tasks.append((process, job.unique, array_job.id)) # we here set the status to executing manually to avoid jobs to be run twice # e.g., if the loop is executed while the asynchronous job did not start yet array_job.status = 'executing' job.status = 'executing' if len(running_tasks) == parallel_jobs: break else: if job.status == 'queued': # start a new job process = self._run_parallel_job(job.unique, no_log=no_log, nice=nice, verbosity=verbosity) if process is None: continue running_tasks.append((process, job.unique)) # we here set the status to executing manually to avoid jobs to be run twice # e.g., if the loop is executed while the asynchronous job did not start yet job.status = 'executing' if len(running_tasks) == parallel_jobs: break self.session.commit() self.unlock() # if after the submission of jobs there are no jobs running, we should have finished all the queue. if die_when_finished and not repeat_execution and len(running_tasks) == 0: logger.info("Stopping task scheduler since there are no more jobs running.") break # THIRD: sleep the desired amount of time before re-checking time.sleep(sleep_time) # This is the only way to stop: you have to interrupt the scheduler except (KeyboardInterrupt, StopIteration): if hasattr(self, 'session'): self.unlock() logger.info("Stopping task scheduler due to user interrupt.") for task in running_tasks: logger.warn("Killing job '%s' that was still running.", self._format_log(task[1], task[2] if len(task) > 2 else None)) try: task[0].kill() except OSError as e: logger.error("Killing job '%s' was not successful: '%s'", self._format_log(task[1], task[2] if len(task) > 2 else None), e) self.stop_job(task[1]) # stop all jobs that are currently running or queued self.stop_jobs(job_ids) # check the result of the jobs that we have run, and return the list of failed jobs self.lock() jobs = self.get_jobs(finished_tasks) failures = [job.unique for job in jobs if job.status != 'success'] self.unlock() return sorted(failures)
python
def run_scheduler(self, parallel_jobs = 1, job_ids = None, sleep_time = 0.1, die_when_finished = False, no_log = False, nice = None, verbosity = 0): """Starts the scheduler, which is constantly checking for jobs that should be ran.""" running_tasks = [] finished_tasks = set() try: # keep the scheduler alive until every job is finished or the KeyboardInterrupt is caught while True: # Flag that might be set in some rare cases, and that prevents the scheduler to die repeat_execution = False # FIRST, try if there are finished processes for task_index in range(len(running_tasks)-1, -1, -1): task = running_tasks[task_index] process = task[0] if process.poll() is not None: # process ended job_id = task[1] array_id = task[2] if len(task) > 2 else None self.lock() job, array_job = self._job_and_array(job_id, array_id) if job is not None: jj = array_job if array_job is not None else job result = "%s (%d)" % (jj.status, jj.result) if jj.result is not None else "%s (?)" % jj.status if jj.status not in ('success', 'failure'): logger.error("Job '%s' (%s) finished with status '%s' instead of 'success' or 'failure'. Usually this means an internal error. Check your wrapper_script parameter!", job.name, self._format_log(job_id, array_id), jj.status) raise StopIteration("Job did not finish correctly.") logger.info("Job '%s' (%s) finished execution with result '%s'", job.name, self._format_log(job_id, array_id), result) self.unlock() finished_tasks.add(job_id) # in any case, remove the job from the list del running_tasks[task_index] # SECOND, check if new jobs can be submitted; THIS NEEDS TO LOCK THE DATABASE if len(running_tasks) < parallel_jobs: # get all unfinished jobs: self.lock() jobs = self.get_jobs(job_ids) # put all new jobs into the queue for job in jobs: if job.status == 'submitted' and job.queue_name == 'local': job.queue() # get all unfinished jobs that are submitted to the local queue unfinished_jobs = [job for job in jobs if job.status in ('queued', 'executing') and job.queue_name == 'local'] for job in unfinished_jobs: if job.array: # find array jobs that can run queued_array_jobs = [array_job for array_job in job.array if array_job.status == 'queued'] if not len(queued_array_jobs): job.finish(0, -1) repeat_execution = True else: # there are new array jobs to run for i in range(min(parallel_jobs - len(running_tasks), len(queued_array_jobs))): array_job = queued_array_jobs[i] # start a new job from the array process = self._run_parallel_job(job.unique, array_job.id, no_log=no_log, nice=nice, verbosity=verbosity) if process is None: continue running_tasks.append((process, job.unique, array_job.id)) # we here set the status to executing manually to avoid jobs to be run twice # e.g., if the loop is executed while the asynchronous job did not start yet array_job.status = 'executing' job.status = 'executing' if len(running_tasks) == parallel_jobs: break else: if job.status == 'queued': # start a new job process = self._run_parallel_job(job.unique, no_log=no_log, nice=nice, verbosity=verbosity) if process is None: continue running_tasks.append((process, job.unique)) # we here set the status to executing manually to avoid jobs to be run twice # e.g., if the loop is executed while the asynchronous job did not start yet job.status = 'executing' if len(running_tasks) == parallel_jobs: break self.session.commit() self.unlock() # if after the submission of jobs there are no jobs running, we should have finished all the queue. if die_when_finished and not repeat_execution and len(running_tasks) == 0: logger.info("Stopping task scheduler since there are no more jobs running.") break # THIRD: sleep the desired amount of time before re-checking time.sleep(sleep_time) # This is the only way to stop: you have to interrupt the scheduler except (KeyboardInterrupt, StopIteration): if hasattr(self, 'session'): self.unlock() logger.info("Stopping task scheduler due to user interrupt.") for task in running_tasks: logger.warn("Killing job '%s' that was still running.", self._format_log(task[1], task[2] if len(task) > 2 else None)) try: task[0].kill() except OSError as e: logger.error("Killing job '%s' was not successful: '%s'", self._format_log(task[1], task[2] if len(task) > 2 else None), e) self.stop_job(task[1]) # stop all jobs that are currently running or queued self.stop_jobs(job_ids) # check the result of the jobs that we have run, and return the list of failed jobs self.lock() jobs = self.get_jobs(finished_tasks) failures = [job.unique for job in jobs if job.status != 'success'] self.unlock() return sorted(failures)
[ "def", "run_scheduler", "(", "self", ",", "parallel_jobs", "=", "1", ",", "job_ids", "=", "None", ",", "sleep_time", "=", "0.1", ",", "die_when_finished", "=", "False", ",", "no_log", "=", "False", ",", "nice", "=", "None", ",", "verbosity", "=", "0", ...
Starts the scheduler, which is constantly checking for jobs that should be ran.
[ "Starts", "the", "scheduler", "which", "is", "constantly", "checking", "for", "jobs", "that", "should", "be", "ran", "." ]
train
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/local.py#L175-L286
mozilla/socorrolib
socorrolib/lib/prioritize.py
makeDependencyMap
def makeDependencyMap(aMap): """ create a dependency data structure as follows: - Each key in aMap represents an item that depends on each item in the iterable which is that key's value - Each Node represents an item which is a precursor to its parents and depends on its children Returns a map whose keys are the items described in aMap and whose values are the dependency (sub)tree for that item Thus, for aMap = {a:(b,c), b:(d,), c:[]}, returns {a:Node(a),b:Node(b),c:Node(c),d:Node(d)} where - Node(a) has no parent and children: Node(b) and Node(c) - Node(b) has parent: Node(a) and child: Node(d) - Node(c) has parent: Node(a) and no child - Node(d) which was not a key in aMap was created. It has parent: Node(b) and no child This map is used to find the precursors for a given item by using BottomUpVisitor on the Node associated with that item """ index = {} for i in aMap.keys(): iNode = index.get(i,None) if not iNode: iNode = Node(i) index[i] = iNode for c in aMap[i]: cNode = index.get(c,None) if not cNode: cNode = Node(c) index[c] = cNode iNode.addChild(cNode) return index
python
def makeDependencyMap(aMap): """ create a dependency data structure as follows: - Each key in aMap represents an item that depends on each item in the iterable which is that key's value - Each Node represents an item which is a precursor to its parents and depends on its children Returns a map whose keys are the items described in aMap and whose values are the dependency (sub)tree for that item Thus, for aMap = {a:(b,c), b:(d,), c:[]}, returns {a:Node(a),b:Node(b),c:Node(c),d:Node(d)} where - Node(a) has no parent and children: Node(b) and Node(c) - Node(b) has parent: Node(a) and child: Node(d) - Node(c) has parent: Node(a) and no child - Node(d) which was not a key in aMap was created. It has parent: Node(b) and no child This map is used to find the precursors for a given item by using BottomUpVisitor on the Node associated with that item """ index = {} for i in aMap.keys(): iNode = index.get(i,None) if not iNode: iNode = Node(i) index[i] = iNode for c in aMap[i]: cNode = index.get(c,None) if not cNode: cNode = Node(c) index[c] = cNode iNode.addChild(cNode) return index
[ "def", "makeDependencyMap", "(", "aMap", ")", ":", "index", "=", "{", "}", "for", "i", "in", "aMap", ".", "keys", "(", ")", ":", "iNode", "=", "index", ".", "get", "(", "i", ",", "None", ")", "if", "not", "iNode", ":", "iNode", "=", "Node", "("...
create a dependency data structure as follows: - Each key in aMap represents an item that depends on each item in the iterable which is that key's value - Each Node represents an item which is a precursor to its parents and depends on its children Returns a map whose keys are the items described in aMap and whose values are the dependency (sub)tree for that item Thus, for aMap = {a:(b,c), b:(d,), c:[]}, returns {a:Node(a),b:Node(b),c:Node(c),d:Node(d)} where - Node(a) has no parent and children: Node(b) and Node(c) - Node(b) has parent: Node(a) and child: Node(d) - Node(c) has parent: Node(a) and no child - Node(d) which was not a key in aMap was created. It has parent: Node(b) and no child This map is used to find the precursors for a given item by using BottomUpVisitor on the Node associated with that item
[ "create", "a", "dependency", "data", "structure", "as", "follows", ":", "-", "Each", "key", "in", "aMap", "represents", "an", "item", "that", "depends", "on", "each", "item", "in", "the", "iterable", "which", "is", "that", "key", "s", "value", "-", "Each...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/prioritize.py#L103-L129
mozilla/socorrolib
socorrolib/lib/prioritize.py
debugTreePrint
def debugTreePrint(node,pfx="->"): """Purely a debugging aid: Ascii-art picture of a tree descended from node""" print pfx,node.item for c in node.children: debugTreePrint(c," "+pfx)
python
def debugTreePrint(node,pfx="->"): """Purely a debugging aid: Ascii-art picture of a tree descended from node""" print pfx,node.item for c in node.children: debugTreePrint(c," "+pfx)
[ "def", "debugTreePrint", "(", "node", ",", "pfx", "=", "\"->\"", ")", ":", "print", "pfx", ",", "node", ".", "item", "for", "c", "in", "node", ".", "children", ":", "debugTreePrint", "(", "c", ",", "\" \"", "+", "pfx", ")" ]
Purely a debugging aid: Ascii-art picture of a tree descended from node
[ "Purely", "a", "debugging", "aid", ":", "Ascii", "-", "art", "picture", "of", "a", "tree", "descended", "from", "node" ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/prioritize.py#L131-L135
mozilla/socorrolib
socorrolib/lib/prioritize.py
dependencyOrder
def dependencyOrder(aMap, aList = None): """ Given descriptions of dependencies in aMap and an optional list of items in aList if not aList, aList = aMap.keys() Returns a list containing each element of aList and all its precursors so that every precursor of any element in the returned list is seen before that dependent element. If aMap contains cycles, something will happen. It may not be pretty... """ dependencyMap = makeDependencyMap(aMap) outputList = [] if not aList: aList = aMap.keys() items = [] v = BottomUpVisitor() for item in aList: try: v.visit(dependencyMap[item]) except KeyError: outputList.append(item) outputList = [x.item for x in v.history]+outputList return outputList
python
def dependencyOrder(aMap, aList = None): """ Given descriptions of dependencies in aMap and an optional list of items in aList if not aList, aList = aMap.keys() Returns a list containing each element of aList and all its precursors so that every precursor of any element in the returned list is seen before that dependent element. If aMap contains cycles, something will happen. It may not be pretty... """ dependencyMap = makeDependencyMap(aMap) outputList = [] if not aList: aList = aMap.keys() items = [] v = BottomUpVisitor() for item in aList: try: v.visit(dependencyMap[item]) except KeyError: outputList.append(item) outputList = [x.item for x in v.history]+outputList return outputList
[ "def", "dependencyOrder", "(", "aMap", ",", "aList", "=", "None", ")", ":", "dependencyMap", "=", "makeDependencyMap", "(", "aMap", ")", "outputList", "=", "[", "]", "if", "not", "aList", ":", "aList", "=", "aMap", ".", "keys", "(", ")", "items", "=", ...
Given descriptions of dependencies in aMap and an optional list of items in aList if not aList, aList = aMap.keys() Returns a list containing each element of aList and all its precursors so that every precursor of any element in the returned list is seen before that dependent element. If aMap contains cycles, something will happen. It may not be pretty...
[ "Given", "descriptions", "of", "dependencies", "in", "aMap", "and", "an", "optional", "list", "of", "items", "in", "aList", "if", "not", "aList", "aList", "=", "aMap", ".", "keys", "()", "Returns", "a", "list", "containing", "each", "element", "of", "aList...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/prioritize.py#L137-L157
mozilla/socorrolib
socorrolib/lib/prioritize.py
Node.addChild
def addChild(self,item): """ When you add a child to a Node, you are adding yourself as a parent to the child You cannot have the same node as a child more than once. If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child) """ if not isinstance(item,Node): item = Node(item) if item in self.children: return item self.children.append(item) item.parents.add(self) return item
python
def addChild(self,item): """ When you add a child to a Node, you are adding yourself as a parent to the child You cannot have the same node as a child more than once. If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child) """ if not isinstance(item,Node): item = Node(item) if item in self.children: return item self.children.append(item) item.parents.add(self) return item
[ "def", "addChild", "(", "self", ",", "item", ")", ":", "if", "not", "isinstance", "(", "item", ",", "Node", ")", ":", "item", "=", "Node", "(", "item", ")", "if", "item", "in", "self", ".", "children", ":", "return", "item", "self", ".", "children"...
When you add a child to a Node, you are adding yourself as a parent to the child You cannot have the same node as a child more than once. If you add a Node, it is used. If you add a non-node, a new child Node is created. Thus: You cannot add a child as an item which is a Node. (You can, however, construct such a node, and add it as a child)
[ "When", "you", "add", "a", "child", "to", "a", "Node", "you", "are", "adding", "yourself", "as", "a", "parent", "to", "the", "child", "You", "cannot", "have", "the", "same", "node", "as", "a", "child", "more", "than", "once", ".", "If", "you", "add",...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/prioritize.py#L17-L30
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
html_to_text
def html_to_text(html, base_url='', bodywidth=CONFIG_DEFAULT): """ Convert a HTML mesasge to plain text. """ def _patched_handle_charref(c): self = h charref = self.charref(c) if self.code or self.pre: charref = cgi.escape(charref) self.o(charref, 1) def _patched_handle_entityref(c): self = h entityref = self.entityref(c) if self.code or self.pre: # this expression was inversed. entityref = cgi.escape(entityref) self.o(entityref, 1) h = HTML2Text(baseurl=base_url, bodywidth=config.BODY_WIDTH if bodywidth is CONFIG_DEFAULT else bodywidth) h.handle_entityref = _patched_handle_entityref h.handle_charref = _patched_handle_charref return h.handle(html).rstrip()
python
def html_to_text(html, base_url='', bodywidth=CONFIG_DEFAULT): """ Convert a HTML mesasge to plain text. """ def _patched_handle_charref(c): self = h charref = self.charref(c) if self.code or self.pre: charref = cgi.escape(charref) self.o(charref, 1) def _patched_handle_entityref(c): self = h entityref = self.entityref(c) if self.code or self.pre: # this expression was inversed. entityref = cgi.escape(entityref) self.o(entityref, 1) h = HTML2Text(baseurl=base_url, bodywidth=config.BODY_WIDTH if bodywidth is CONFIG_DEFAULT else bodywidth) h.handle_entityref = _patched_handle_entityref h.handle_charref = _patched_handle_charref return h.handle(html).rstrip()
[ "def", "html_to_text", "(", "html", ",", "base_url", "=", "''", ",", "bodywidth", "=", "CONFIG_DEFAULT", ")", ":", "def", "_patched_handle_charref", "(", "c", ")", ":", "self", "=", "h", "charref", "=", "self", ".", "charref", "(", "c", ")", "if", "sel...
Convert a HTML mesasge to plain text.
[ "Convert", "a", "HTML", "mesasge", "to", "plain", "text", "." ]
train
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L40-L61
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
replace_fields
def replace_fields(text, context, autoescape=None, errors='inline'): """ Allow simple field replacements, using the python str.format() syntax. When a string is passed that is tagged with :func:`~django.utils.safestring.mark_safe`, the context variables will be escaped before replacement. This function is used instead of lazily using Django templates, which can also the {% load %} stuff and {% include %} things. """ raise_errors = errors == 'raise' ignore_errors = errors == 'ignore' inline_errors = errors == 'inline' if autoescape is None: # When passing a real template context, use it's autoescape setting. # Otherwise, default to true. autoescape = getattr(context, 'autoescape', True) is_safe_string = isinstance(text, SafeData) if is_safe_string and autoescape: escape_function = conditional_escape escape_error = lambda x: u"<span style='color:red;'>{0}</span>".format(x) else: escape_function = force_text escape_error = six.text_type # Using str.format() may raise a KeyError when some fields are not provided. # Instead, simulate its' behavior to make sure all items that were found will be replaced. start = 0 new_text = [] for match in RE_FORMAT.finditer(text): new_text.append(text[start:match.start()]) start = match.end() # See if the element was found key = match.group('var') try: value = context[key] except KeyError: logger.debug("Missing key %s in email template %s!", key, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!missing {0}!!".format(key))) continue # See if further processing is needed. attr = match.group('attr') if attr: try: value = getattr(value, attr) except AttributeError: logger.debug("Missing attribute %s in email template %s!", attr, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!invalid attribute {0}.{1}!!".format(key, attr))) continue format = match.group('format') if format: try: template = u"{0" + format + "}" value = template.format(value) except ValueError: logger.debug("Invalid format %s in email template %s!", format, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!invalid format {0}!!".format(format))) continue else: value = escape_function(value) # Add the value new_text.append(value) # Add remainder, and join new_text.append(text[start:]) new_text = u"".join(new_text) # Convert back to safestring if it was passed that way if is_safe_string: return mark_safe(new_text) else: return new_text
python
def replace_fields(text, context, autoescape=None, errors='inline'): """ Allow simple field replacements, using the python str.format() syntax. When a string is passed that is tagged with :func:`~django.utils.safestring.mark_safe`, the context variables will be escaped before replacement. This function is used instead of lazily using Django templates, which can also the {% load %} stuff and {% include %} things. """ raise_errors = errors == 'raise' ignore_errors = errors == 'ignore' inline_errors = errors == 'inline' if autoescape is None: # When passing a real template context, use it's autoescape setting. # Otherwise, default to true. autoescape = getattr(context, 'autoescape', True) is_safe_string = isinstance(text, SafeData) if is_safe_string and autoescape: escape_function = conditional_escape escape_error = lambda x: u"<span style='color:red;'>{0}</span>".format(x) else: escape_function = force_text escape_error = six.text_type # Using str.format() may raise a KeyError when some fields are not provided. # Instead, simulate its' behavior to make sure all items that were found will be replaced. start = 0 new_text = [] for match in RE_FORMAT.finditer(text): new_text.append(text[start:match.start()]) start = match.end() # See if the element was found key = match.group('var') try: value = context[key] except KeyError: logger.debug("Missing key %s in email template %s!", key, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!missing {0}!!".format(key))) continue # See if further processing is needed. attr = match.group('attr') if attr: try: value = getattr(value, attr) except AttributeError: logger.debug("Missing attribute %s in email template %s!", attr, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!invalid attribute {0}.{1}!!".format(key, attr))) continue format = match.group('format') if format: try: template = u"{0" + format + "}" value = template.format(value) except ValueError: logger.debug("Invalid format %s in email template %s!", format, match.group(0)) if raise_errors: raise elif ignore_errors: new_text.append(match.group(0)) elif inline_errors: new_text.append(escape_error("!!invalid format {0}!!".format(format))) continue else: value = escape_function(value) # Add the value new_text.append(value) # Add remainder, and join new_text.append(text[start:]) new_text = u"".join(new_text) # Convert back to safestring if it was passed that way if is_safe_string: return mark_safe(new_text) else: return new_text
[ "def", "replace_fields", "(", "text", ",", "context", ",", "autoescape", "=", "None", ",", "errors", "=", "'inline'", ")", ":", "raise_errors", "=", "errors", "==", "'raise'", "ignore_errors", "=", "errors", "==", "'ignore'", "inline_errors", "=", "errors", ...
Allow simple field replacements, using the python str.format() syntax. When a string is passed that is tagged with :func:`~django.utils.safestring.mark_safe`, the context variables will be escaped before replacement. This function is used instead of lazily using Django templates, which can also the {% load %} stuff and {% include %} things.
[ "Allow", "simple", "field", "replacements", "using", "the", "python", "str", ".", "format", "()", "syntax", "." ]
train
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L65-L157
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
render_email_template
def render_email_template(email_template, base_url, extra_context=None, user=None): """ Render the email template. :type email_template: fluentcms_emailtemplates.models.EmailTemplate :type base_url: str :type extra_context: dict | None :type user: django.contrib.auth.models.User :return: The subject, html and text content :rtype: fluentcms_emailtemplates.rendering.EmailContent """ dummy_request = _get_dummy_request(base_url, user) context_user = user or extra_context.get('user', None) context_data = { 'request': dummy_request, 'email_template': email_template, 'email_format': 'html', 'user': user, # Common replacements 'first_name': context_user.first_name if context_user else '', 'last_name': context_user.last_name if context_user else '', 'full_name': context_user.get_full_name() if context_user else '', 'email': context_user.email if context_user else '', 'site': extra_context.get('site', None) or { 'domain': dummy_request.get_host(), 'name': dummy_request.get_host(), } } if extra_context: context_data.update(extra_context) # Make sure the templates and i18n are identical to the emailtemplate language. # This is the same as the current Django language, unless the object was explicitly fetched in a different language. with switch_language(email_template): # Get the body content context_data['body'] = _render_email_placeholder(dummy_request, email_template, base_url, context_data) context_data['subject'] = subject = replace_fields(email_template.subject, context_data, autoescape=False) # Merge that with the HTML templates. context = RequestContext(dummy_request).flatten() context.update(context_data) html = render_to_string(email_template.get_html_templates(), context, request=dummy_request) html, url_changes = _make_links_absolute(html, base_url) # Render the Text template. # Disable auto escaping context['email_format'] = 'text' text = render_to_string(email_template.get_text_templates(), context, request=dummy_request) text = _make_text_links_absolute(text, url_changes) return EmailContent(subject, text, html)
python
def render_email_template(email_template, base_url, extra_context=None, user=None): """ Render the email template. :type email_template: fluentcms_emailtemplates.models.EmailTemplate :type base_url: str :type extra_context: dict | None :type user: django.contrib.auth.models.User :return: The subject, html and text content :rtype: fluentcms_emailtemplates.rendering.EmailContent """ dummy_request = _get_dummy_request(base_url, user) context_user = user or extra_context.get('user', None) context_data = { 'request': dummy_request, 'email_template': email_template, 'email_format': 'html', 'user': user, # Common replacements 'first_name': context_user.first_name if context_user else '', 'last_name': context_user.last_name if context_user else '', 'full_name': context_user.get_full_name() if context_user else '', 'email': context_user.email if context_user else '', 'site': extra_context.get('site', None) or { 'domain': dummy_request.get_host(), 'name': dummy_request.get_host(), } } if extra_context: context_data.update(extra_context) # Make sure the templates and i18n are identical to the emailtemplate language. # This is the same as the current Django language, unless the object was explicitly fetched in a different language. with switch_language(email_template): # Get the body content context_data['body'] = _render_email_placeholder(dummy_request, email_template, base_url, context_data) context_data['subject'] = subject = replace_fields(email_template.subject, context_data, autoescape=False) # Merge that with the HTML templates. context = RequestContext(dummy_request).flatten() context.update(context_data) html = render_to_string(email_template.get_html_templates(), context, request=dummy_request) html, url_changes = _make_links_absolute(html, base_url) # Render the Text template. # Disable auto escaping context['email_format'] = 'text' text = render_to_string(email_template.get_text_templates(), context, request=dummy_request) text = _make_text_links_absolute(text, url_changes) return EmailContent(subject, text, html)
[ "def", "render_email_template", "(", "email_template", ",", "base_url", ",", "extra_context", "=", "None", ",", "user", "=", "None", ")", ":", "dummy_request", "=", "_get_dummy_request", "(", "base_url", ",", "user", ")", "context_user", "=", "user", "or", "ex...
Render the email template. :type email_template: fluentcms_emailtemplates.models.EmailTemplate :type base_url: str :type extra_context: dict | None :type user: django.contrib.auth.models.User :return: The subject, html and text content :rtype: fluentcms_emailtemplates.rendering.EmailContent
[ "Render", "the", "email", "template", "." ]
train
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L160-L211
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
_get_dummy_request
def _get_dummy_request(base_url, user): """ Create a dummy request. Use the ``base_url``, so code can use ``request.build_absolute_uri()`` to create absolute URLs. """ split_url = urlsplit(base_url) is_secure = split_url[0] == 'https' dummy_request = RequestFactory(HTTP_HOST=split_url[1]).get('/', secure=is_secure) dummy_request.is_secure = lambda: is_secure dummy_request.user = user or AnonymousUser() dummy_request.site = None # Workaround for wagtail.contrib.settings.context_processors return dummy_request
python
def _get_dummy_request(base_url, user): """ Create a dummy request. Use the ``base_url``, so code can use ``request.build_absolute_uri()`` to create absolute URLs. """ split_url = urlsplit(base_url) is_secure = split_url[0] == 'https' dummy_request = RequestFactory(HTTP_HOST=split_url[1]).get('/', secure=is_secure) dummy_request.is_secure = lambda: is_secure dummy_request.user = user or AnonymousUser() dummy_request.site = None # Workaround for wagtail.contrib.settings.context_processors return dummy_request
[ "def", "_get_dummy_request", "(", "base_url", ",", "user", ")", ":", "split_url", "=", "urlsplit", "(", "base_url", ")", "is_secure", "=", "split_url", "[", "0", "]", "==", "'https'", "dummy_request", "=", "RequestFactory", "(", "HTTP_HOST", "=", "split_url", ...
Create a dummy request. Use the ``base_url``, so code can use ``request.build_absolute_uri()`` to create absolute URLs.
[ "Create", "a", "dummy", "request", ".", "Use", "the", "base_url", "so", "code", "can", "use", "request", ".", "build_absolute_uri", "()", "to", "create", "absolute", "URLs", "." ]
train
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L214-L225
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
_render_email_placeholder
def _render_email_placeholder(request, email_template, base_url, context): """ Internal rendering of the placeholder/contentitems. This a simple variation of render_placeholder(), making is possible to render both a HTML and text item in a single call. Caching is currently not implemented. :rtype: fluentcms_emailtemplates.rendering.EmailBodyContent """ placeholder = email_template.contents items = placeholder.get_content_items(email_template) if not items: # NOTES: performs query # There are no items, fetch the fallback language. language_code = fc_appsettings.FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE items = placeholder.get_content_items(email_template, limit_parent_language=False).translated(language_code) html_fragments = [] text_fragments = [] for instance in items: plugin = instance.plugin html_part = _render_html(plugin, request, instance, context) text_part = _render_text(plugin, request, instance, context, base_url) html_fragments.append(html_part) text_fragments.append(text_part) html_body = u"".join(html_fragments) text_body = u"".join(text_fragments) return EmailBodyContent(text_body, html_body)
python
def _render_email_placeholder(request, email_template, base_url, context): """ Internal rendering of the placeholder/contentitems. This a simple variation of render_placeholder(), making is possible to render both a HTML and text item in a single call. Caching is currently not implemented. :rtype: fluentcms_emailtemplates.rendering.EmailBodyContent """ placeholder = email_template.contents items = placeholder.get_content_items(email_template) if not items: # NOTES: performs query # There are no items, fetch the fallback language. language_code = fc_appsettings.FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE items = placeholder.get_content_items(email_template, limit_parent_language=False).translated(language_code) html_fragments = [] text_fragments = [] for instance in items: plugin = instance.plugin html_part = _render_html(plugin, request, instance, context) text_part = _render_text(plugin, request, instance, context, base_url) html_fragments.append(html_part) text_fragments.append(text_part) html_body = u"".join(html_fragments) text_body = u"".join(text_fragments) return EmailBodyContent(text_body, html_body)
[ "def", "_render_email_placeholder", "(", "request", ",", "email_template", ",", "base_url", ",", "context", ")", ":", "placeholder", "=", "email_template", ".", "contents", "items", "=", "placeholder", ".", "get_content_items", "(", "email_template", ")", "if", "n...
Internal rendering of the placeholder/contentitems. This a simple variation of render_placeholder(), making is possible to render both a HTML and text item in a single call. Caching is currently not implemented. :rtype: fluentcms_emailtemplates.rendering.EmailBodyContent
[ "Internal", "rendering", "of", "the", "placeholder", "/", "contentitems", "." ]
train
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L229-L260
django-fluent/fluentcms-emailtemplates
fluentcms_emailtemplates/rendering.py
_make_links_absolute
def _make_links_absolute(html, base_url): """ Make all links absolute. """ url_changes = [] soup = BeautifulSoup(html) for tag in soup.find_all('a', href=True): old = tag['href'] fixed = urljoin(base_url, old) if old != fixed: url_changes.append((old, fixed)) tag['href'] = fixed for tag in soup.find_all('img', src=True): old = tag['src'] fixed = urljoin(base_url, old) if old != fixed: url_changes.append((old, fixed)) tag['src'] = fixed return mark_safe(six.text_type(soup)), url_changes
python
def _make_links_absolute(html, base_url): """ Make all links absolute. """ url_changes = [] soup = BeautifulSoup(html) for tag in soup.find_all('a', href=True): old = tag['href'] fixed = urljoin(base_url, old) if old != fixed: url_changes.append((old, fixed)) tag['href'] = fixed for tag in soup.find_all('img', src=True): old = tag['src'] fixed = urljoin(base_url, old) if old != fixed: url_changes.append((old, fixed)) tag['src'] = fixed return mark_safe(six.text_type(soup)), url_changes
[ "def", "_make_links_absolute", "(", "html", ",", "base_url", ")", ":", "url_changes", "=", "[", "]", "soup", "=", "BeautifulSoup", "(", "html", ")", "for", "tag", "in", "soup", ".", "find_all", "(", "'a'", ",", "href", "=", "True", ")", ":", "old", "...
Make all links absolute.
[ "Make", "all", "links", "absolute", "." ]
train
https://github.com/django-fluent/fluentcms-emailtemplates/blob/29f032dab9f60d05db852d2a1adcbd16e18017d1/fluentcms_emailtemplates/rendering.py#L281-L302
mozilla/socorrolib
socorrolib/lib/datetimeutil.py
string_to_datetime
def string_to_datetime(date): """Return a datetime.datetime instance with tzinfo. I.e. a timezone aware datetime instance. Acceptable formats for input are: * 2012-01-10T12:13:14 * 2012-01-10T12:13:14.98765 * 2012-01-10T12:13:14.98765+03:00 * 2012-01-10T12:13:14.98765Z * 2012-01-10 12:13:14 * 2012-01-10 12:13:14.98765 * 2012-01-10 12:13:14.98765+03:00 * 2012-01-10 12:13:14.98765Z But also, some more odd ones (probably because of legacy): * 2012-01-10 * ['2012-01-10', '12:13:14'] """ if date is None: return None if isinstance(date, datetime.datetime): if not date.tzinfo: date = date.replace(tzinfo=UTC) return date if isinstance(date, list): date = 'T'.join(date) if isinstance(date, basestring): if len(date) <= len('2000-01-01'): return (datetime.datetime .strptime(date, '%Y-%m-%d') .replace(tzinfo=UTC)) else: try: parsed = isodate.parse_datetime(date) except ValueError: # e.g. '2012-01-10 12:13:14Z' becomes '2012-01-10T12:13:14Z' parsed = isodate.parse_datetime( re.sub('(\d)\s(\d)', r'\1T\2', date) ) if not parsed.tzinfo: parsed = parsed.replace(tzinfo=UTC) return parsed raise ValueError("date not a parsable string")
python
def string_to_datetime(date): """Return a datetime.datetime instance with tzinfo. I.e. a timezone aware datetime instance. Acceptable formats for input are: * 2012-01-10T12:13:14 * 2012-01-10T12:13:14.98765 * 2012-01-10T12:13:14.98765+03:00 * 2012-01-10T12:13:14.98765Z * 2012-01-10 12:13:14 * 2012-01-10 12:13:14.98765 * 2012-01-10 12:13:14.98765+03:00 * 2012-01-10 12:13:14.98765Z But also, some more odd ones (probably because of legacy): * 2012-01-10 * ['2012-01-10', '12:13:14'] """ if date is None: return None if isinstance(date, datetime.datetime): if not date.tzinfo: date = date.replace(tzinfo=UTC) return date if isinstance(date, list): date = 'T'.join(date) if isinstance(date, basestring): if len(date) <= len('2000-01-01'): return (datetime.datetime .strptime(date, '%Y-%m-%d') .replace(tzinfo=UTC)) else: try: parsed = isodate.parse_datetime(date) except ValueError: # e.g. '2012-01-10 12:13:14Z' becomes '2012-01-10T12:13:14Z' parsed = isodate.parse_datetime( re.sub('(\d)\s(\d)', r'\1T\2', date) ) if not parsed.tzinfo: parsed = parsed.replace(tzinfo=UTC) return parsed raise ValueError("date not a parsable string")
[ "def", "string_to_datetime", "(", "date", ")", ":", "if", "date", "is", "None", ":", "return", "None", "if", "isinstance", "(", "date", ",", "datetime", ".", "datetime", ")", ":", "if", "not", "date", ".", "tzinfo", ":", "date", "=", "date", ".", "re...
Return a datetime.datetime instance with tzinfo. I.e. a timezone aware datetime instance. Acceptable formats for input are: * 2012-01-10T12:13:14 * 2012-01-10T12:13:14.98765 * 2012-01-10T12:13:14.98765+03:00 * 2012-01-10T12:13:14.98765Z * 2012-01-10 12:13:14 * 2012-01-10 12:13:14.98765 * 2012-01-10 12:13:14.98765+03:00 * 2012-01-10 12:13:14.98765Z But also, some more odd ones (probably because of legacy): * 2012-01-10 * ['2012-01-10', '12:13:14']
[ "Return", "a", "datetime", ".", "datetime", "instance", "with", "tzinfo", ".", "I", ".", "e", ".", "a", "timezone", "aware", "datetime", "instance", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/datetimeutil.py#L46-L91
mozilla/socorrolib
socorrolib/lib/datetimeutil.py
date_to_string
def date_to_string(date): """Transform a date or datetime object into a string and return it. Examples: >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC)) '2012-01-03T12:23:34+00:00' >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34)) '2012-01-03T12:23:34' >>> date_to_string(datetime.date(2012, 1, 3)) '2012-01-03' """ if isinstance(date, datetime.datetime): # Create an ISO 8601 datetime string date_str = date.strftime('%Y-%m-%dT%H:%M:%S') tzstr = date.strftime('%z') if tzstr: # Yes, this is ugly. And no, I haven't found a better way to have a # truly ISO 8601 datetime with timezone in Python. date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5]) elif isinstance(date, datetime.date): # Create an ISO 8601 date string date_str = date.strftime('%Y-%m-%d') else: raise TypeError('Argument is not a date or datetime. ') return date_str
python
def date_to_string(date): """Transform a date or datetime object into a string and return it. Examples: >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC)) '2012-01-03T12:23:34+00:00' >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34)) '2012-01-03T12:23:34' >>> date_to_string(datetime.date(2012, 1, 3)) '2012-01-03' """ if isinstance(date, datetime.datetime): # Create an ISO 8601 datetime string date_str = date.strftime('%Y-%m-%dT%H:%M:%S') tzstr = date.strftime('%z') if tzstr: # Yes, this is ugly. And no, I haven't found a better way to have a # truly ISO 8601 datetime with timezone in Python. date_str = '%s%s:%s' % (date_str, tzstr[0:3], tzstr[3:5]) elif isinstance(date, datetime.date): # Create an ISO 8601 date string date_str = date.strftime('%Y-%m-%d') else: raise TypeError('Argument is not a date or datetime. ') return date_str
[ "def", "date_to_string", "(", "date", ")", ":", "if", "isinstance", "(", "date", ",", "datetime", ".", "datetime", ")", ":", "# Create an ISO 8601 datetime string", "date_str", "=", "date", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S'", ")", "tzstr", "=", "date", ...
Transform a date or datetime object into a string and return it. Examples: >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34, tzinfo=UTC)) '2012-01-03T12:23:34+00:00' >>> date_to_string(datetime.datetime(2012, 1, 3, 12, 23, 34)) '2012-01-03T12:23:34' >>> date_to_string(datetime.date(2012, 1, 3)) '2012-01-03'
[ "Transform", "a", "date", "or", "datetime", "object", "into", "a", "string", "and", "return", "it", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/datetimeutil.py#L94-L120
mozilla/socorrolib
socorrolib/lib/datetimeutil.py
uuid_to_date
def uuid_to_date(uuid, century='20'): """Return a date created from the last 6 digits of a uuid. Arguments: uuid The unique identifier to parse. century The first 2 digits to assume in the year. Default is '20'. Examples: >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201') datetime.date(2012, 2, 1) >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201', '18') datetime.date(1812, 2, 1) """ day = int(uuid[-2:]) month = int(uuid[-4:-2]) year = int('%s%s' % (century, uuid[-6:-4])) return datetime.date(year=year, month=month, day=day)
python
def uuid_to_date(uuid, century='20'): """Return a date created from the last 6 digits of a uuid. Arguments: uuid The unique identifier to parse. century The first 2 digits to assume in the year. Default is '20'. Examples: >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201') datetime.date(2012, 2, 1) >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201', '18') datetime.date(1812, 2, 1) """ day = int(uuid[-2:]) month = int(uuid[-4:-2]) year = int('%s%s' % (century, uuid[-6:-4])) return datetime.date(year=year, month=month, day=day)
[ "def", "uuid_to_date", "(", "uuid", ",", "century", "=", "'20'", ")", ":", "day", "=", "int", "(", "uuid", "[", "-", "2", ":", "]", ")", "month", "=", "int", "(", "uuid", "[", "-", "4", ":", "-", "2", "]", ")", "year", "=", "int", "(", "'%s...
Return a date created from the last 6 digits of a uuid. Arguments: uuid The unique identifier to parse. century The first 2 digits to assume in the year. Default is '20'. Examples: >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201') datetime.date(2012, 2, 1) >>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201', '18') datetime.date(1812, 2, 1)
[ "Return", "a", "date", "created", "from", "the", "last", "6", "digits", "of", "a", "uuid", "." ]
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/datetimeutil.py#L123-L142
etcher-be/epab
epab/utils/_gitignore.py
add_to_gitignore
def add_to_gitignore(line: str): """ Adds a line to the .gitignore file of the repo Args: line: line to add """ if not line.endswith('\n'): line = f'{line}\n' if GIT_IGNORE.exists(): if line in GIT_IGNORE.read_text(encoding='utf8'): return previous_content = GIT_IGNORE.read_text(encoding='utf8') else: previous_content = '' GIT_IGNORE.write_text(previous_content + line, encoding='utf8')
python
def add_to_gitignore(line: str): """ Adds a line to the .gitignore file of the repo Args: line: line to add """ if not line.endswith('\n'): line = f'{line}\n' if GIT_IGNORE.exists(): if line in GIT_IGNORE.read_text(encoding='utf8'): return previous_content = GIT_IGNORE.read_text(encoding='utf8') else: previous_content = '' GIT_IGNORE.write_text(previous_content + line, encoding='utf8')
[ "def", "add_to_gitignore", "(", "line", ":", "str", ")", ":", "if", "not", "line", ".", "endswith", "(", "'\\n'", ")", ":", "line", "=", "f'{line}\\n'", "if", "GIT_IGNORE", ".", "exists", "(", ")", ":", "if", "line", "in", "GIT_IGNORE", ".", "read_text...
Adds a line to the .gitignore file of the repo Args: line: line to add
[ "Adds", "a", "line", "to", "the", ".", "gitignore", "file", "of", "the", "repo" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_gitignore.py#L11-L26
ishxiao/aps
aps/hardware_info.py
hardware_info
def hardware_info(): """ Returns basic hardware information about the computer. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. Returns ------- info : dict Dictionary containing cpu and memory information. """ try: if sys.platform == 'darwin': out = _mac_hardware_info() elif sys.platform == 'win32': out = _win_hardware_info() elif sys.platform in ['linux', 'linux2']: out = _linux_hardware_info() else: out = {} except: return {} else: return out
python
def hardware_info(): """ Returns basic hardware information about the computer. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. Returns ------- info : dict Dictionary containing cpu and memory information. """ try: if sys.platform == 'darwin': out = _mac_hardware_info() elif sys.platform == 'win32': out = _win_hardware_info() elif sys.platform in ['linux', 'linux2']: out = _linux_hardware_info() else: out = {} except: return {} else: return out
[ "def", "hardware_info", "(", ")", ":", "try", ":", "if", "sys", ".", "platform", "==", "'darwin'", ":", "out", "=", "_mac_hardware_info", "(", ")", "elif", "sys", ".", "platform", "==", "'win32'", ":", "out", "=", "_win_hardware_info", "(", ")", "elif", ...
Returns basic hardware information about the computer. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. Returns ------- info : dict Dictionary containing cpu and memory information.
[ "Returns", "basic", "hardware", "information", "about", "the", "computer", "." ]
train
https://github.com/ishxiao/aps/blob/faa4329b26eed257a0ca45df57561eff1a3dd133/aps/hardware_info.py#L74-L99
BD2KOnFHIR/i2b2model
i2b2model/scripts/removefacts.py
clear_i2b2_tables
def clear_i2b2_tables(tables: I2B2Tables, uploadid: int) -> None: """ Remove all entries in the i2b2 tables for uploadid. :param tables: :param uploadid: :return: """ # This is a static function to support the removefacts operation print("Deleted {} patient_dimension records" .format(PatientDimension.delete_upload_id(tables, uploadid))) print("Deleted {} patient_mapping records" .format(PatientMapping.delete_upload_id(tables, uploadid))) print("Deleted {} observation_fact records" .format(ObservationFact.delete_upload_id(tables, uploadid))) print("Deleted {} visit_dimension records" .format(VisitDimension.delete_upload_id(tables, uploadid))) print("Deleted {} encounter_mapping records" .format(EncounterMapping.delete_upload_id(tables, uploadid)))
python
def clear_i2b2_tables(tables: I2B2Tables, uploadid: int) -> None: """ Remove all entries in the i2b2 tables for uploadid. :param tables: :param uploadid: :return: """ # This is a static function to support the removefacts operation print("Deleted {} patient_dimension records" .format(PatientDimension.delete_upload_id(tables, uploadid))) print("Deleted {} patient_mapping records" .format(PatientMapping.delete_upload_id(tables, uploadid))) print("Deleted {} observation_fact records" .format(ObservationFact.delete_upload_id(tables, uploadid))) print("Deleted {} visit_dimension records" .format(VisitDimension.delete_upload_id(tables, uploadid))) print("Deleted {} encounter_mapping records" .format(EncounterMapping.delete_upload_id(tables, uploadid)))
[ "def", "clear_i2b2_tables", "(", "tables", ":", "I2B2Tables", ",", "uploadid", ":", "int", ")", "->", "None", ":", "# This is a static function to support the removefacts operation", "print", "(", "\"Deleted {} patient_dimension records\"", ".", "format", "(", "PatientDimen...
Remove all entries in the i2b2 tables for uploadid. :param tables: :param uploadid: :return:
[ "Remove", "all", "entries", "in", "the", "i2b2", "tables", "for", "uploadid", ".", ":", "param", "tables", ":", ":", "param", "uploadid", ":", ":", "return", ":" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/scripts/removefacts.py#L19-L36
BD2KOnFHIR/i2b2model
i2b2model/scripts/removefacts.py
create_parser
def create_parser() -> FileAwareParser: """ Create a command line parser :return: parser """ parser = FileAwareParser(description="Clear data from FHIR observation fact table", prog="removefacts", use_defaults=False) parser.add_argument("-ss", "--sourcesystem", metavar="SOURCE SYSTEM CODE", help="Sourcesystem code") parser.add_argument("-u", "--uploadid", metavar="UPLOAD IDENTIFIER", help="Upload identifer -- uniquely identifies this batch", type=int, nargs='*') add_connection_args(parser, strong_config_file=False) parser.add_argument("-p", "--testprefix", metavar="SS PREFIX", help=f"Sourcesystem_cd prefix for test suite functions (Default: {default_test_prefix}") parser.add_argument("--testlist", help="List leftover test suite entries", action="store_true") parser.add_argument("--removetestlist", help="Remove leftover test suite entries", action="store_true") return parser
python
def create_parser() -> FileAwareParser: """ Create a command line parser :return: parser """ parser = FileAwareParser(description="Clear data from FHIR observation fact table", prog="removefacts", use_defaults=False) parser.add_argument("-ss", "--sourcesystem", metavar="SOURCE SYSTEM CODE", help="Sourcesystem code") parser.add_argument("-u", "--uploadid", metavar="UPLOAD IDENTIFIER", help="Upload identifer -- uniquely identifies this batch", type=int, nargs='*') add_connection_args(parser, strong_config_file=False) parser.add_argument("-p", "--testprefix", metavar="SS PREFIX", help=f"Sourcesystem_cd prefix for test suite functions (Default: {default_test_prefix}") parser.add_argument("--testlist", help="List leftover test suite entries", action="store_true") parser.add_argument("--removetestlist", help="Remove leftover test suite entries", action="store_true") return parser
[ "def", "create_parser", "(", ")", "->", "FileAwareParser", ":", "parser", "=", "FileAwareParser", "(", "description", "=", "\"Clear data from FHIR observation fact table\"", ",", "prog", "=", "\"removefacts\"", ",", "use_defaults", "=", "False", ")", "parser", ".", ...
Create a command line parser :return: parser
[ "Create", "a", "command", "line", "parser", ":", "return", ":", "parser" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/scripts/removefacts.py#L52-L68
BD2KOnFHIR/i2b2model
i2b2model/scripts/removefacts.py
remove_facts
def remove_facts(argv: List[str]) -> bool: """ Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return: """ parser = create_parser() local_opts = parser.parse_args(argv) # Pull everything from the actual command line if not (local_opts.uploadid or local_opts.sourcesystem or local_opts.testlist or local_opts.removetestlist): parser.error("Option must be one of: -ss, -u, --testlist, --removetestlist") if (local_opts.testlist or local_opts.removetestlist) and (local_opts.uploadid or local_opts.sourcesystem): parser.error("Cannot combine -ss or -u option with testlist options. Use -p to specify ss prefix") opts, _ = parser.parse_known_args(parser.decode_file_args(argv)) # Include the options file if opts is None: return False opts.uploadid = local_opts.uploadid opts.sourcesystem = local_opts.sourcesystem process_parsed_args(opts, parser.error) # Update CRC and Meta table connection information if opts.uploadid: for uploadid in opts.uploadid: print("---> Removing entries for id {}".format(uploadid)) clear_i2b2_tables(I2B2Tables(opts), uploadid) if opts.sourcesystem: print("---> Removing entries for sourcesystem_cd {}".format(opts.sourcesystem)) clear_i2b2_sourcesystems(I2B2Tables(opts), opts.sourcesystem) if opts.testlist: opts.testprefix = opts.testprefix if (opts and opts.testprefix) else default_test_prefix print(f"---> Listing orphan test elements for sourcesystem_cd starting with {opts.testprefix}") list_test_artifacts(opts) if opts.removetestlist: opts.testprefix = opts.testprefix if (opts and opts.testprefix) else default_test_prefix print(f"---> Removing orphan test elements for sourcesystem_cd starting with {opts.testprefix}") remove_test_artifacts(opts) return True
python
def remove_facts(argv: List[str]) -> bool: """ Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return: """ parser = create_parser() local_opts = parser.parse_args(argv) # Pull everything from the actual command line if not (local_opts.uploadid or local_opts.sourcesystem or local_opts.testlist or local_opts.removetestlist): parser.error("Option must be one of: -ss, -u, --testlist, --removetestlist") if (local_opts.testlist or local_opts.removetestlist) and (local_opts.uploadid or local_opts.sourcesystem): parser.error("Cannot combine -ss or -u option with testlist options. Use -p to specify ss prefix") opts, _ = parser.parse_known_args(parser.decode_file_args(argv)) # Include the options file if opts is None: return False opts.uploadid = local_opts.uploadid opts.sourcesystem = local_opts.sourcesystem process_parsed_args(opts, parser.error) # Update CRC and Meta table connection information if opts.uploadid: for uploadid in opts.uploadid: print("---> Removing entries for id {}".format(uploadid)) clear_i2b2_tables(I2B2Tables(opts), uploadid) if opts.sourcesystem: print("---> Removing entries for sourcesystem_cd {}".format(opts.sourcesystem)) clear_i2b2_sourcesystems(I2B2Tables(opts), opts.sourcesystem) if opts.testlist: opts.testprefix = opts.testprefix if (opts and opts.testprefix) else default_test_prefix print(f"---> Listing orphan test elements for sourcesystem_cd starting with {opts.testprefix}") list_test_artifacts(opts) if opts.removetestlist: opts.testprefix = opts.testprefix if (opts and opts.testprefix) else default_test_prefix print(f"---> Removing orphan test elements for sourcesystem_cd starting with {opts.testprefix}") remove_test_artifacts(opts) return True
[ "def", "remove_facts", "(", "argv", ":", "List", "[", "str", "]", ")", "->", "bool", ":", "parser", "=", "create_parser", "(", ")", "local_opts", "=", "parser", ".", "parse_args", "(", "argv", ")", "# Pull everything from the actual command line", "if", "not",...
Convert a set of FHIR resources into their corresponding i2b2 counterparts. :param argv: Command line arguments. See: create_parser for details :return:
[ "Convert", "a", "set", "of", "FHIR", "resources", "into", "their", "corresponding", "i2b2", "counterparts", ".", ":", "param", "argv", ":", "Command", "line", "arguments", ".", "See", ":", "create_parser", "for", "details", ":", "return", ":" ]
train
https://github.com/BD2KOnFHIR/i2b2model/blob/9d49bb53b0733dd83ab5b716014865e270a3c903/i2b2model/scripts/removefacts.py#L119-L157
duniter/duniter-python-api
duniterpy/api/bma/wot.py
add
async def add(client: Client, identity_signed_raw: str) -> ClientResponse: """ POST identity raw document :param client: Client to connect to the api :param identity_signed_raw: Identity raw document :return: """ return await client.post(MODULE + '/add', {'identity': identity_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
async def add(client: Client, identity_signed_raw: str) -> ClientResponse: """ POST identity raw document :param client: Client to connect to the api :param identity_signed_raw: Identity raw document :return: """ return await client.post(MODULE + '/add', {'identity': identity_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "add", "(", "client", ":", "Client", ",", "identity_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/add'", ",", "{", "'identity'", ":", "identity_signed_raw", "}", ...
POST identity raw document :param client: Client to connect to the api :param identity_signed_raw: Identity raw document :return:
[ "POST", "identity", "raw", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L317-L325
duniter/duniter-python-api
duniterpy/api/bma/wot.py
certify
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse: """ POST certification raw document :param client: Client to connect to the api :param certification_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse: """ POST certification raw document :param client: Client to connect to the api :param certification_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "certify", "(", "client", ":", "Client", ",", "certification_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/certify'", ",", "{", "'cert'", ":", "certification_signed_r...
POST certification raw document :param client: Client to connect to the api :param certification_signed_raw: Certification raw document :return:
[ "POST", "certification", "raw", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L328-L336
duniter/duniter-python-api
duniterpy/api/bma/wot.py
revoke
async def revoke(client: Client, revocation_signed_raw: str) -> ClientResponse: """ POST revocation document :param client: Client to connect to the api :param revocation_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/revoke', {'revocation': revocation_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
async def revoke(client: Client, revocation_signed_raw: str) -> ClientResponse: """ POST revocation document :param client: Client to connect to the api :param revocation_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/revoke', {'revocation': revocation_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "revoke", "(", "client", ":", "Client", ",", "revocation_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/revoke'", ",", "{", "'revocation'", ":", "revocation_signed_raw...
POST revocation document :param client: Client to connect to the api :param revocation_signed_raw: Certification raw document :return:
[ "POST", "revocation", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L339-L347
duniter/duniter-python-api
duniterpy/api/bma/wot.py
lookup
async def lookup(client: Client, search: str) -> dict: """ GET UID/Public key data :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/lookup/%s' % search, schema=LOOKUP_SCHEMA)
python
async def lookup(client: Client, search: str) -> dict: """ GET UID/Public key data :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/lookup/%s' % search, schema=LOOKUP_SCHEMA)
[ "async", "def", "lookup", "(", "client", ":", "Client", ",", "search", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/lookup/%s'", "%", "search", ",", "schema", "=", "LOOKUP_SCHEMA", ")" ]
GET UID/Public key data :param client: Client to connect to the api :param search: UID or public key :return:
[ "GET", "UID", "/", "Public", "key", "data" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L350-L358
duniter/duniter-python-api
duniterpy/api/bma/wot.py
certifiers_of
async def certifiers_of(client: Client, search: str) -> dict: """ GET UID/Public key certifiers :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/certifiers-of/%s' % search, schema=CERTIFICATIONS_SCHEMA)
python
async def certifiers_of(client: Client, search: str) -> dict: """ GET UID/Public key certifiers :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/certifiers-of/%s' % search, schema=CERTIFICATIONS_SCHEMA)
[ "async", "def", "certifiers_of", "(", "client", ":", "Client", ",", "search", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/certifiers-of/%s'", "%", "search", ",", "schema", "=", "CERTIFICATIONS_SCHEMA", ...
GET UID/Public key certifiers :param client: Client to connect to the api :param search: UID or public key :return:
[ "GET", "UID", "/", "Public", "key", "certifiers" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L361-L369