repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
pndurette/gTTS | gtts/tokenizer/tokenizer_cases.py | tone_marks | python | def tone_marks():
return RegexBuilder(
pattern_args=symbols.TONE_MARKS,
pattern_func=lambda x: u"(?<={}).".format(x)).regex | Keep tone-modifying punctuation by matching following character.
Assumes the `tone_marks` pre-processor was run for cases where there might
not be any space after a tone-modifying punctuation mark. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L5-L13 | null | # -*- coding: utf-8 -*-
from gtts.tokenizer import RegexBuilder, symbols
def period_comma():
"""Period and comma case.
Match if not preceded by ".<letter>" and only if followed by space.
Won't cut in the middle/after dotted abbreviations; won't cut numbers.
Note:
Won't match if a dotted abbreviation ends a sentence.
Note:
Won't match the end of a sentence if not followed by a space.
"""
return RegexBuilder(
pattern_args=symbols.PERIOD_COMMA,
pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
def colon():
"""Colon case.
Match a colon ":" only if not preceeded by a digit.
Mainly to prevent a cut in the middle of time notations e.g. 10:01
"""
return RegexBuilder(
pattern_args=symbols.COLON,
pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
def other_punctuation():
"""Match other punctuation.
Match other punctuation to split on; punctuation that naturally
inserts a break in speech.
"""
punc = ''.join(
set(symbols.ALL_PUNC) -
set(symbols.TONE_MARKS) -
set(symbols.PERIOD_COMMA) -
set(symbols.COLON))
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯
"""Match all punctuation.
Use as only tokenizer case to mimic gTTS 1.x tokenization.
"""
punc = symbols.ALL_PUNC
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
|
pndurette/gTTS | gtts/tokenizer/tokenizer_cases.py | period_comma | python | def period_comma():
return RegexBuilder(
pattern_args=symbols.PERIOD_COMMA,
pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex | Period and comma case.
Match if not preceded by ".<letter>" and only if followed by space.
Won't cut in the middle/after dotted abbreviations; won't cut numbers.
Note:
Won't match if a dotted abbreviation ends a sentence.
Note:
Won't match the end of a sentence if not followed by a space. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L16-L31 | null | # -*- coding: utf-8 -*-
from gtts.tokenizer import RegexBuilder, symbols
def tone_marks():
"""Keep tone-modifying punctuation by matching following character.
Assumes the `tone_marks` pre-processor was run for cases where there might
not be any space after a tone-modifying punctuation mark.
"""
return RegexBuilder(
pattern_args=symbols.TONE_MARKS,
pattern_func=lambda x: u"(?<={}).".format(x)).regex
def colon():
"""Colon case.
Match a colon ":" only if not preceeded by a digit.
Mainly to prevent a cut in the middle of time notations e.g. 10:01
"""
return RegexBuilder(
pattern_args=symbols.COLON,
pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
def other_punctuation():
"""Match other punctuation.
Match other punctuation to split on; punctuation that naturally
inserts a break in speech.
"""
punc = ''.join(
set(symbols.ALL_PUNC) -
set(symbols.TONE_MARKS) -
set(symbols.PERIOD_COMMA) -
set(symbols.COLON))
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯
"""Match all punctuation.
Use as only tokenizer case to mimic gTTS 1.x tokenization.
"""
punc = symbols.ALL_PUNC
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
|
pndurette/gTTS | gtts/tokenizer/tokenizer_cases.py | colon | python | def colon():
return RegexBuilder(
pattern_args=symbols.COLON,
pattern_func=lambda x: r"(?<!\d){}".format(x)).regex | Colon case.
Match a colon ":" only if not preceeded by a digit.
Mainly to prevent a cut in the middle of time notations e.g. 10:01 | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L34-L43 | null | # -*- coding: utf-8 -*-
from gtts.tokenizer import RegexBuilder, symbols
def tone_marks():
"""Keep tone-modifying punctuation by matching following character.
Assumes the `tone_marks` pre-processor was run for cases where there might
not be any space after a tone-modifying punctuation mark.
"""
return RegexBuilder(
pattern_args=symbols.TONE_MARKS,
pattern_func=lambda x: u"(?<={}).".format(x)).regex
def period_comma():
"""Period and comma case.
Match if not preceded by ".<letter>" and only if followed by space.
Won't cut in the middle/after dotted abbreviations; won't cut numbers.
Note:
Won't match if a dotted abbreviation ends a sentence.
Note:
Won't match the end of a sentence if not followed by a space.
"""
return RegexBuilder(
pattern_args=symbols.PERIOD_COMMA,
pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
def other_punctuation():
"""Match other punctuation.
Match other punctuation to split on; punctuation that naturally
inserts a break in speech.
"""
punc = ''.join(
set(symbols.ALL_PUNC) -
set(symbols.TONE_MARKS) -
set(symbols.PERIOD_COMMA) -
set(symbols.COLON))
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯
"""Match all punctuation.
Use as only tokenizer case to mimic gTTS 1.x tokenization.
"""
punc = symbols.ALL_PUNC
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
|
pndurette/gTTS | gtts/tokenizer/tokenizer_cases.py | other_punctuation | python | def other_punctuation():
punc = ''.join(
set(symbols.ALL_PUNC) -
set(symbols.TONE_MARKS) -
set(symbols.PERIOD_COMMA) -
set(symbols.COLON))
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex | Match other punctuation.
Match other punctuation to split on; punctuation that naturally
inserts a break in speech. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L46-L60 | null | # -*- coding: utf-8 -*-
from gtts.tokenizer import RegexBuilder, symbols
def tone_marks():
"""Keep tone-modifying punctuation by matching following character.
Assumes the `tone_marks` pre-processor was run for cases where there might
not be any space after a tone-modifying punctuation mark.
"""
return RegexBuilder(
pattern_args=symbols.TONE_MARKS,
pattern_func=lambda x: u"(?<={}).".format(x)).regex
def period_comma():
"""Period and comma case.
Match if not preceded by ".<letter>" and only if followed by space.
Won't cut in the middle/after dotted abbreviations; won't cut numbers.
Note:
Won't match if a dotted abbreviation ends a sentence.
Note:
Won't match the end of a sentence if not followed by a space.
"""
return RegexBuilder(
pattern_args=symbols.PERIOD_COMMA,
pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
def colon():
"""Colon case.
Match a colon ":" only if not preceeded by a digit.
Mainly to prevent a cut in the middle of time notations e.g. 10:01
"""
return RegexBuilder(
pattern_args=symbols.COLON,
pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯
"""Match all punctuation.
Use as only tokenizer case to mimic gTTS 1.x tokenization.
"""
punc = symbols.ALL_PUNC
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
|
pndurette/gTTS | gtts/tokenizer/tokenizer_cases.py | legacy_all_punctuation | python | def legacy_all_punctuation(): # pragma: no cover b/c tested but Coveralls: ¯\_(ツ)_/¯
punc = symbols.ALL_PUNC
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex | Match all punctuation.
Use as only tokenizer case to mimic gTTS 1.x tokenization. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/tokenizer_cases.py#L63-L71 | null | # -*- coding: utf-8 -*-
from gtts.tokenizer import RegexBuilder, symbols
def tone_marks():
"""Keep tone-modifying punctuation by matching following character.
Assumes the `tone_marks` pre-processor was run for cases where there might
not be any space after a tone-modifying punctuation mark.
"""
return RegexBuilder(
pattern_args=symbols.TONE_MARKS,
pattern_func=lambda x: u"(?<={}).".format(x)).regex
def period_comma():
"""Period and comma case.
Match if not preceded by ".<letter>" and only if followed by space.
Won't cut in the middle/after dotted abbreviations; won't cut numbers.
Note:
Won't match if a dotted abbreviation ends a sentence.
Note:
Won't match the end of a sentence if not followed by a space.
"""
return RegexBuilder(
pattern_args=symbols.PERIOD_COMMA,
pattern_func=lambda x: r"(?<!\.[a-z]){} ".format(x)).regex
def colon():
"""Colon case.
Match a colon ":" only if not preceeded by a digit.
Mainly to prevent a cut in the middle of time notations e.g. 10:01
"""
return RegexBuilder(
pattern_args=symbols.COLON,
pattern_func=lambda x: r"(?<!\d){}".format(x)).regex
def other_punctuation():
"""Match other punctuation.
Match other punctuation to split on; punctuation that naturally
inserts a break in speech.
"""
punc = ''.join(
set(symbols.ALL_PUNC) -
set(symbols.TONE_MARKS) -
set(symbols.PERIOD_COMMA) -
set(symbols.COLON))
return RegexBuilder(
pattern_args=punc,
pattern_func=lambda x: u"{}".format(x)).regex
|
pndurette/gTTS | gtts/cli.py | validate_text | python | def validate_text(ctx, param, text):
if not text and 'file' not in ctx.params:
# No <text> and no <file>
raise click.BadParameter(
"<text> or -f/--file <file> required")
if text and 'file' in ctx.params:
# Both <text> and <file>
raise click.BadParameter(
"<text> and -f/--file <file> can't be used together")
return text | Validation callback for the <text> argument.
Ensures <text> (arg) and <file> (opt) are mutually exclusive | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/cli.py#L45-L57 | null | # -*- coding: utf-8 -*-
from gtts import gTTS, gTTSError, __version__
from gtts.lang import tts_langs
import click
import logging
import logging.config
# Click settings
CONTEXT_SETTINGS = {
'help_option_names': ['-h', '--help']
}
# Logger settings
LOGGER_SETTINGS = {
'version': 1,
'formatters': {
'default': {
'format': '%(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}
},
'loggers': {
'gtts': {
'handlers': ['console'],
'level': 'WARNING'
}
}
}
# Logger
logging.config.dictConfig(LOGGER_SETTINGS)
log = logging.getLogger('gtts')
def sys_encoding():
"""Charset to use for --file <path>|- (stdin)"""
return 'utf8'
def validate_lang(ctx, param, lang):
"""Validation callback for the <lang> option.
Ensures <lang> is a supported language unless the <nocheck> flag is set
"""
if ctx.params['nocheck']:
return lang
try:
if lang not in tts_langs():
raise click.UsageError(
"'%s' not in list of supported languages.\n"
"Use --all to list languages or "
"add --nocheck to disable language check." % lang)
else:
# The language is valid.
# No need to let gTTS re-validate.
ctx.params['nocheck'] = True
except RuntimeError as e:
# Only case where the <nocheck> flag can be False
# Non-fatal. gTTS will try to re-validate.
log.debug(str(e), exc_info=True)
return lang
def print_languages(ctx, param, value):
"""Callback for <all> flag.
Prints formatted sorted list of supported languages and exits
"""
if not value or ctx.resilient_parsing:
return
try:
langs = tts_langs()
langs_str_list = sorted("{}: {}".format(k, langs[k]) for k in langs)
click.echo(' ' + '\n '.join(langs_str_list))
except RuntimeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.ClickException("Couldn't fetch language list.")
ctx.exit()
def set_debug(ctx, param, debug):
"""Callback for <debug> flag.
Sets logger level to DEBUG
"""
if debug:
log.setLevel(logging.DEBUG)
return
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('text', metavar='<text>', required=False, callback=validate_text)
@click.option(
'-f',
'--file',
metavar='<file>',
# For py2.7/unicode. If encoding not None Click uses io.open
type=click.File(encoding=sys_encoding()),
help="Read from <file> instead of <text>.")
@click.option(
'-o',
'--output',
metavar='<file>',
type=click.File(mode='wb'),
help="Write to <file> instead of stdout.")
@click.option(
'-s',
'--slow',
default=False,
is_flag=True,
help="Read more slowly.")
@click.option(
'-l',
'--lang',
metavar='<lang>',
default='en',
show_default=True,
callback=validate_lang,
help="IETF language tag. Language to speak in. List documented tags with --all.")
@click.option(
'--nocheck',
default=False,
is_flag=True,
is_eager=True, # Prioritize <nocheck> to ensure it gets set before <lang>
help="Disable strict IETF language tag checking. Allow undocumented tags.")
@click.option(
'--all',
default=False,
is_flag=True,
is_eager=True,
expose_value=False,
callback=print_languages,
help="Print all documented available IETF language tags and exit.")
@click.option(
'--debug',
default=False,
is_flag=True,
is_eager=True, # Prioritize <debug> to see debug logs of callbacks
expose_value=False,
callback=set_debug,
help="Show debug information.")
@click.version_option(version=__version__)
def tts_cli(text, file, output, slow, lang, nocheck):
""" Read <text> to mp3 format using Google Translate's Text-to-Speech API
(set <text> or --file <file> to - for standard input)
"""
# stdin for <text>
if text == '-':
text = click.get_text_stream('stdin').read()
# stdout (when no <output>)
if not output:
output = click.get_binary_stream('stdout')
# <file> input (stdin on '-' is handled by click.File)
if file:
try:
text = file.read()
except UnicodeDecodeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.FileError(
file.name,
"<file> must be encoded using '%s'." %
sys_encoding())
# TTS
try:
tts = gTTS(
text=text,
lang=lang,
slow=slow,
lang_check=not nocheck)
tts.write_to_fp(output)
except (ValueError, AssertionError) as e:
raise click.UsageError(str(e))
except gTTSError as e:
raise click.ClickException(str(e))
|
pndurette/gTTS | gtts/cli.py | validate_lang | python | def validate_lang(ctx, param, lang):
if ctx.params['nocheck']:
return lang
try:
if lang not in tts_langs():
raise click.UsageError(
"'%s' not in list of supported languages.\n"
"Use --all to list languages or "
"add --nocheck to disable language check." % lang)
else:
# The language is valid.
# No need to let gTTS re-validate.
ctx.params['nocheck'] = True
except RuntimeError as e:
# Only case where the <nocheck> flag can be False
# Non-fatal. gTTS will try to re-validate.
log.debug(str(e), exc_info=True)
return lang | Validation callback for the <lang> option.
Ensures <lang> is a supported language unless the <nocheck> flag is set | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/cli.py#L60-L82 | [
"def tts_langs():\n \"\"\"Languages Google Text-to-Speech supports.\n\n Returns:\n dict: A dictionnary of the type `{ '<lang>': '<name>'}`\n\n Where `<lang>` is an IETF language tag such as `en` or `pt-br`,\n and `<name>` is the full English name of the language, such as\n `English` or `Portuguese (Brazil)`.\n\n The dictionnary returned combines languages from two origins:\n\n - Languages fetched automatically from Google Translate\n - Languages that are undocumented variations that were observed to work and\n present different dialects or accents.\n\n \"\"\"\n try:\n langs = dict()\n langs.update(_fetch_langs())\n langs.update(_extra_langs())\n log.debug(\"langs: %s\", langs)\n return langs\n except Exception as e:\n raise RuntimeError(\"Unable to get language list: %s\" % str(e))\n"
] | # -*- coding: utf-8 -*-
from gtts import gTTS, gTTSError, __version__
from gtts.lang import tts_langs
import click
import logging
import logging.config
# Click settings
CONTEXT_SETTINGS = {
'help_option_names': ['-h', '--help']
}
# Logger settings
LOGGER_SETTINGS = {
'version': 1,
'formatters': {
'default': {
'format': '%(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}
},
'loggers': {
'gtts': {
'handlers': ['console'],
'level': 'WARNING'
}
}
}
# Logger
logging.config.dictConfig(LOGGER_SETTINGS)
log = logging.getLogger('gtts')
def sys_encoding():
"""Charset to use for --file <path>|- (stdin)"""
return 'utf8'
def validate_text(ctx, param, text):
"""Validation callback for the <text> argument.
Ensures <text> (arg) and <file> (opt) are mutually exclusive
"""
if not text and 'file' not in ctx.params:
# No <text> and no <file>
raise click.BadParameter(
"<text> or -f/--file <file> required")
if text and 'file' in ctx.params:
# Both <text> and <file>
raise click.BadParameter(
"<text> and -f/--file <file> can't be used together")
return text
def print_languages(ctx, param, value):
"""Callback for <all> flag.
Prints formatted sorted list of supported languages and exits
"""
if not value or ctx.resilient_parsing:
return
try:
langs = tts_langs()
langs_str_list = sorted("{}: {}".format(k, langs[k]) for k in langs)
click.echo(' ' + '\n '.join(langs_str_list))
except RuntimeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.ClickException("Couldn't fetch language list.")
ctx.exit()
def set_debug(ctx, param, debug):
"""Callback for <debug> flag.
Sets logger level to DEBUG
"""
if debug:
log.setLevel(logging.DEBUG)
return
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('text', metavar='<text>', required=False, callback=validate_text)
@click.option(
'-f',
'--file',
metavar='<file>',
# For py2.7/unicode. If encoding not None Click uses io.open
type=click.File(encoding=sys_encoding()),
help="Read from <file> instead of <text>.")
@click.option(
'-o',
'--output',
metavar='<file>',
type=click.File(mode='wb'),
help="Write to <file> instead of stdout.")
@click.option(
'-s',
'--slow',
default=False,
is_flag=True,
help="Read more slowly.")
@click.option(
'-l',
'--lang',
metavar='<lang>',
default='en',
show_default=True,
callback=validate_lang,
help="IETF language tag. Language to speak in. List documented tags with --all.")
@click.option(
'--nocheck',
default=False,
is_flag=True,
is_eager=True, # Prioritize <nocheck> to ensure it gets set before <lang>
help="Disable strict IETF language tag checking. Allow undocumented tags.")
@click.option(
'--all',
default=False,
is_flag=True,
is_eager=True,
expose_value=False,
callback=print_languages,
help="Print all documented available IETF language tags and exit.")
@click.option(
'--debug',
default=False,
is_flag=True,
is_eager=True, # Prioritize <debug> to see debug logs of callbacks
expose_value=False,
callback=set_debug,
help="Show debug information.")
@click.version_option(version=__version__)
def tts_cli(text, file, output, slow, lang, nocheck):
""" Read <text> to mp3 format using Google Translate's Text-to-Speech API
(set <text> or --file <file> to - for standard input)
"""
# stdin for <text>
if text == '-':
text = click.get_text_stream('stdin').read()
# stdout (when no <output>)
if not output:
output = click.get_binary_stream('stdout')
# <file> input (stdin on '-' is handled by click.File)
if file:
try:
text = file.read()
except UnicodeDecodeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.FileError(
file.name,
"<file> must be encoded using '%s'." %
sys_encoding())
# TTS
try:
tts = gTTS(
text=text,
lang=lang,
slow=slow,
lang_check=not nocheck)
tts.write_to_fp(output)
except (ValueError, AssertionError) as e:
raise click.UsageError(str(e))
except gTTSError as e:
raise click.ClickException(str(e))
|
pndurette/gTTS | gtts/cli.py | print_languages | python | def print_languages(ctx, param, value):
if not value or ctx.resilient_parsing:
return
try:
langs = tts_langs()
langs_str_list = sorted("{}: {}".format(k, langs[k]) for k in langs)
click.echo(' ' + '\n '.join(langs_str_list))
except RuntimeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.ClickException("Couldn't fetch language list.")
ctx.exit() | Callback for <all> flag.
Prints formatted sorted list of supported languages and exits | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/cli.py#L85-L98 | [
"def tts_langs():\n \"\"\"Languages Google Text-to-Speech supports.\n\n Returns:\n dict: A dictionnary of the type `{ '<lang>': '<name>'}`\n\n Where `<lang>` is an IETF language tag such as `en` or `pt-br`,\n and `<name>` is the full English name of the language, such as\n `English` or `Portuguese (Brazil)`.\n\n The dictionnary returned combines languages from two origins:\n\n - Languages fetched automatically from Google Translate\n - Languages that are undocumented variations that were observed to work and\n present different dialects or accents.\n\n \"\"\"\n try:\n langs = dict()\n langs.update(_fetch_langs())\n langs.update(_extra_langs())\n log.debug(\"langs: %s\", langs)\n return langs\n except Exception as e:\n raise RuntimeError(\"Unable to get language list: %s\" % str(e))\n"
] | # -*- coding: utf-8 -*-
from gtts import gTTS, gTTSError, __version__
from gtts.lang import tts_langs
import click
import logging
import logging.config
# Click settings
CONTEXT_SETTINGS = {
'help_option_names': ['-h', '--help']
}
# Logger settings
LOGGER_SETTINGS = {
'version': 1,
'formatters': {
'default': {
'format': '%(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}
},
'loggers': {
'gtts': {
'handlers': ['console'],
'level': 'WARNING'
}
}
}
# Logger
logging.config.dictConfig(LOGGER_SETTINGS)
log = logging.getLogger('gtts')
def sys_encoding():
"""Charset to use for --file <path>|- (stdin)"""
return 'utf8'
def validate_text(ctx, param, text):
"""Validation callback for the <text> argument.
Ensures <text> (arg) and <file> (opt) are mutually exclusive
"""
if not text and 'file' not in ctx.params:
# No <text> and no <file>
raise click.BadParameter(
"<text> or -f/--file <file> required")
if text and 'file' in ctx.params:
# Both <text> and <file>
raise click.BadParameter(
"<text> and -f/--file <file> can't be used together")
return text
def validate_lang(ctx, param, lang):
"""Validation callback for the <lang> option.
Ensures <lang> is a supported language unless the <nocheck> flag is set
"""
if ctx.params['nocheck']:
return lang
try:
if lang not in tts_langs():
raise click.UsageError(
"'%s' not in list of supported languages.\n"
"Use --all to list languages or "
"add --nocheck to disable language check." % lang)
else:
# The language is valid.
# No need to let gTTS re-validate.
ctx.params['nocheck'] = True
except RuntimeError as e:
# Only case where the <nocheck> flag can be False
# Non-fatal. gTTS will try to re-validate.
log.debug(str(e), exc_info=True)
return lang
def set_debug(ctx, param, debug):
"""Callback for <debug> flag.
Sets logger level to DEBUG
"""
if debug:
log.setLevel(logging.DEBUG)
return
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('text', metavar='<text>', required=False, callback=validate_text)
@click.option(
'-f',
'--file',
metavar='<file>',
# For py2.7/unicode. If encoding not None Click uses io.open
type=click.File(encoding=sys_encoding()),
help="Read from <file> instead of <text>.")
@click.option(
'-o',
'--output',
metavar='<file>',
type=click.File(mode='wb'),
help="Write to <file> instead of stdout.")
@click.option(
'-s',
'--slow',
default=False,
is_flag=True,
help="Read more slowly.")
@click.option(
'-l',
'--lang',
metavar='<lang>',
default='en',
show_default=True,
callback=validate_lang,
help="IETF language tag. Language to speak in. List documented tags with --all.")
@click.option(
'--nocheck',
default=False,
is_flag=True,
is_eager=True, # Prioritize <nocheck> to ensure it gets set before <lang>
help="Disable strict IETF language tag checking. Allow undocumented tags.")
@click.option(
'--all',
default=False,
is_flag=True,
is_eager=True,
expose_value=False,
callback=print_languages,
help="Print all documented available IETF language tags and exit.")
@click.option(
'--debug',
default=False,
is_flag=True,
is_eager=True, # Prioritize <debug> to see debug logs of callbacks
expose_value=False,
callback=set_debug,
help="Show debug information.")
@click.version_option(version=__version__)
def tts_cli(text, file, output, slow, lang, nocheck):
""" Read <text> to mp3 format using Google Translate's Text-to-Speech API
(set <text> or --file <file> to - for standard input)
"""
# stdin for <text>
if text == '-':
text = click.get_text_stream('stdin').read()
# stdout (when no <output>)
if not output:
output = click.get_binary_stream('stdout')
# <file> input (stdin on '-' is handled by click.File)
if file:
try:
text = file.read()
except UnicodeDecodeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.FileError(
file.name,
"<file> must be encoded using '%s'." %
sys_encoding())
# TTS
try:
tts = gTTS(
text=text,
lang=lang,
slow=slow,
lang_check=not nocheck)
tts.write_to_fp(output)
except (ValueError, AssertionError) as e:
raise click.UsageError(str(e))
except gTTSError as e:
raise click.ClickException(str(e))
|
pndurette/gTTS | gtts/cli.py | tts_cli | python | def tts_cli(text, file, output, slow, lang, nocheck):
# stdin for <text>
if text == '-':
text = click.get_text_stream('stdin').read()
# stdout (when no <output>)
if not output:
output = click.get_binary_stream('stdout')
# <file> input (stdin on '-' is handled by click.File)
if file:
try:
text = file.read()
except UnicodeDecodeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.FileError(
file.name,
"<file> must be encoded using '%s'." %
sys_encoding())
# TTS
try:
tts = gTTS(
text=text,
lang=lang,
slow=slow,
lang_check=not nocheck)
tts.write_to_fp(output)
except (ValueError, AssertionError) as e:
raise click.UsageError(str(e))
except gTTSError as e:
raise click.ClickException(str(e)) | Read <text> to mp3 format using Google Translate's Text-to-Speech API
(set <text> or --file <file> to - for standard input) | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/cli.py#L162-L197 | [
"def sys_encoding():\n \"\"\"Charset to use for --file <path>|- (stdin)\"\"\"\n return 'utf8'\n",
"def write_to_fp(self, fp):\n \"\"\"Do the TTS API request and write bytes to a file-like object.\n\n Args:\n fp (file object): Any file-like object to write the ``mp3`` to.\n\n Raises:\n :class:`gTTSError`: When there's an error with the API request.\n TypeError: When ``fp`` is not a file-like object that takes bytes.\n\n \"\"\"\n # When disabling ssl verify in requests (for proxies and firewalls),\n # urllib3 prints an insecure warning on stdout. We disable that.\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n text_parts = self._tokenize(self.text)\n log.debug(\"text_parts: %i\", len(text_parts))\n assert text_parts, 'No text to send to TTS API'\n\n for idx, part in enumerate(text_parts):\n try:\n # Calculate token\n part_tk = self.token.calculate_token(part)\n except requests.exceptions.RequestException as e: # pragma: no cover\n log.debug(str(e), exc_info=True)\n raise gTTSError(\n \"Connection error during token calculation: %s\" %\n str(e))\n\n payload = {'ie': 'UTF-8',\n 'q': part,\n 'tl': self.lang,\n 'ttsspeed': self.speed,\n 'total': len(text_parts),\n 'idx': idx,\n 'client': 'tw-ob',\n 'textlen': _len(part),\n 'tk': part_tk}\n\n log.debug(\"payload-%i: %s\", idx, payload)\n\n try:\n # Request\n r = requests.get(self.GOOGLE_TTS_URL,\n params=payload,\n headers=self.GOOGLE_TTS_HEADERS,\n proxies=urllib.request.getproxies(),\n verify=False)\n\n log.debug(\"headers-%i: %s\", idx, r.request.headers)\n log.debug(\"url-%i: %s\", idx, r.request.url)\n log.debug(\"status-%i: %s\", idx, r.status_code)\n\n r.raise_for_status()\n except requests.exceptions.HTTPError:\n # Request successful, bad response\n raise gTTSError(tts=self, response=r)\n except requests.exceptions.RequestException as e: # pragma: no cover\n # Request failed\n raise gTTSError(str(e))\n\n try:\n # Write\n for chunk in r.iter_content(chunk_size=1024):\n fp.write(chunk)\n log.debug(\"part-%i written to %s\", idx, fp)\n except (AttributeError, TypeError) as e:\n raise TypeError(\n \"'fp' is not a file-like object or it does not take bytes: %s\" %\n str(e))\n"
] | # -*- coding: utf-8 -*-
from gtts import gTTS, gTTSError, __version__
from gtts.lang import tts_langs
import click
import logging
import logging.config
# Click settings
CONTEXT_SETTINGS = {
'help_option_names': ['-h', '--help']
}
# Logger settings
LOGGER_SETTINGS = {
'version': 1,
'formatters': {
'default': {
'format': '%(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'default'
}
},
'loggers': {
'gtts': {
'handlers': ['console'],
'level': 'WARNING'
}
}
}
# Logger
logging.config.dictConfig(LOGGER_SETTINGS)
log = logging.getLogger('gtts')
def sys_encoding():
"""Charset to use for --file <path>|- (stdin)"""
return 'utf8'
def validate_text(ctx, param, text):
"""Validation callback for the <text> argument.
Ensures <text> (arg) and <file> (opt) are mutually exclusive
"""
if not text and 'file' not in ctx.params:
# No <text> and no <file>
raise click.BadParameter(
"<text> or -f/--file <file> required")
if text and 'file' in ctx.params:
# Both <text> and <file>
raise click.BadParameter(
"<text> and -f/--file <file> can't be used together")
return text
def validate_lang(ctx, param, lang):
"""Validation callback for the <lang> option.
Ensures <lang> is a supported language unless the <nocheck> flag is set
"""
if ctx.params['nocheck']:
return lang
try:
if lang not in tts_langs():
raise click.UsageError(
"'%s' not in list of supported languages.\n"
"Use --all to list languages or "
"add --nocheck to disable language check." % lang)
else:
# The language is valid.
# No need to let gTTS re-validate.
ctx.params['nocheck'] = True
except RuntimeError as e:
# Only case where the <nocheck> flag can be False
# Non-fatal. gTTS will try to re-validate.
log.debug(str(e), exc_info=True)
return lang
def print_languages(ctx, param, value):
"""Callback for <all> flag.
Prints formatted sorted list of supported languages and exits
"""
if not value or ctx.resilient_parsing:
return
try:
langs = tts_langs()
langs_str_list = sorted("{}: {}".format(k, langs[k]) for k in langs)
click.echo(' ' + '\n '.join(langs_str_list))
except RuntimeError as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise click.ClickException("Couldn't fetch language list.")
ctx.exit()
def set_debug(ctx, param, debug):
"""Callback for <debug> flag.
Sets logger level to DEBUG
"""
if debug:
log.setLevel(logging.DEBUG)
return
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('text', metavar='<text>', required=False, callback=validate_text)
@click.option(
'-f',
'--file',
metavar='<file>',
# For py2.7/unicode. If encoding not None Click uses io.open
type=click.File(encoding=sys_encoding()),
help="Read from <file> instead of <text>.")
@click.option(
'-o',
'--output',
metavar='<file>',
type=click.File(mode='wb'),
help="Write to <file> instead of stdout.")
@click.option(
'-s',
'--slow',
default=False,
is_flag=True,
help="Read more slowly.")
@click.option(
'-l',
'--lang',
metavar='<lang>',
default='en',
show_default=True,
callback=validate_lang,
help="IETF language tag. Language to speak in. List documented tags with --all.")
@click.option(
'--nocheck',
default=False,
is_flag=True,
is_eager=True, # Prioritize <nocheck> to ensure it gets set before <lang>
help="Disable strict IETF language tag checking. Allow undocumented tags.")
@click.option(
'--all',
default=False,
is_flag=True,
is_eager=True,
expose_value=False,
callback=print_languages,
help="Print all documented available IETF language tags and exit.")
@click.option(
'--debug',
default=False,
is_flag=True,
is_eager=True, # Prioritize <debug> to see debug logs of callbacks
expose_value=False,
callback=set_debug,
help="Show debug information.")
@click.version_option(version=__version__)
|
pndurette/gTTS | gtts/tts.py | gTTS.write_to_fp | python | def write_to_fp(self, fp):
# When disabling ssl verify in requests (for proxies and firewalls),
# urllib3 prints an insecure warning on stdout. We disable that.
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
text_parts = self._tokenize(self.text)
log.debug("text_parts: %i", len(text_parts))
assert text_parts, 'No text to send to TTS API'
for idx, part in enumerate(text_parts):
try:
# Calculate token
part_tk = self.token.calculate_token(part)
except requests.exceptions.RequestException as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise gTTSError(
"Connection error during token calculation: %s" %
str(e))
payload = {'ie': 'UTF-8',
'q': part,
'tl': self.lang,
'ttsspeed': self.speed,
'total': len(text_parts),
'idx': idx,
'client': 'tw-ob',
'textlen': _len(part),
'tk': part_tk}
log.debug("payload-%i: %s", idx, payload)
try:
# Request
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=self.GOOGLE_TTS_HEADERS,
proxies=urllib.request.getproxies(),
verify=False)
log.debug("headers-%i: %s", idx, r.request.headers)
log.debug("url-%i: %s", idx, r.request.url)
log.debug("status-%i: %s", idx, r.status_code)
r.raise_for_status()
except requests.exceptions.HTTPError:
# Request successful, bad response
raise gTTSError(tts=self, response=r)
except requests.exceptions.RequestException as e: # pragma: no cover
# Request failed
raise gTTSError(str(e))
try:
# Write
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
log.debug("part-%i written to %s", idx, fp)
except (AttributeError, TypeError) as e:
raise TypeError(
"'fp' is not a file-like object or it does not take bytes: %s" %
str(e)) | Do the TTS API request and write bytes to a file-like object.
Args:
fp (file object): Any file-like object to write the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
TypeError: When ``fp`` is not a file-like object that takes bytes. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tts.py#L167-L236 | [
"def _len(text):\n \"\"\"Same as `len(text)` for a string but that decodes\n `text` first in Python 2.x\n\n Args:\n text (string): string to get the size of.\n\n Returns:\n int: the size of the string.\n \"\"\"\n try:\n # Python 2\n return len(unicode(text))\n except NameError: # pragma: no cover\n # Python 3\n return len(text)\n",
"def _tokenize(self, text):\n # Pre-clean\n text = text.strip()\n\n # Apply pre-processors\n for pp in self.pre_processor_funcs:\n log.debug(\"pre-processing: %s\", pp)\n text = pp(text)\n\n if _len(text) <= self.GOOGLE_TTS_MAX_CHARS:\n return _clean_tokens([text])\n\n # Tokenize\n log.debug(\"tokenizing: %s\", self.tokenizer_func)\n tokens = self.tokenizer_func(text)\n\n # Clean\n tokens = _clean_tokens(tokens)\n\n # Minimize\n min_tokens = []\n for t in tokens:\n min_tokens += _minimize(t, ' ', self.GOOGLE_TTS_MAX_CHARS)\n return min_tokens\n"
] | class gTTS:
"""gTTS -- Google Text-to-Speech.
An interface to Google Translate's Text-to-Speech API.
Args:
text (string): The text to be read.
lang (string, optional): The language (IETF language tag) to
read the text in. Defaults to 'en'.
slow (bool, optional): Reads text more slowly. Defaults to ``False``.
lang_check (bool, optional): Strictly enforce an existing ``lang``,
to catch a language error early. If set to ``True``,
a ``ValueError`` is raised if ``lang`` doesn't exist.
Default is ``True``.
pre_processor_funcs (list): A list of zero or more functions that are
called to transform (pre-process) text before tokenizing. Those
functions must take a string and return a string. Defaults to::
[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
]
tokenizer_func (callable): A function that takes in a string and
returns a list of string (tokens). Defaults to::
Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
See Also:
:doc:`Pre-processing and tokenizing <tokenizer>`
Raises:
AssertionError: When ``text`` is ``None`` or empty; when there's nothing
left to speak after pre-precessing, tokenizing and cleaning.
ValueError: When ``lang_check`` is ``True`` and ``lang`` is not supported.
RuntimeError: When ``lang_check`` is ``True`` but there's an error loading
the languages dictionnary.
"""
GOOGLE_TTS_MAX_CHARS = 100 # Max characters the Google TTS API takes at a time
GOOGLE_TTS_URL = "https://translate.google.com/translate_tts"
GOOGLE_TTS_HEADERS = {
"Referer": "http://translate.google.com/",
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/47.0.2526.106 Safari/537.36"
}
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
):
# Debug
for k, v in locals().items():
if k == 'self':
continue
log.debug("%s: %s", k, v)
# Text
assert text, 'No text to speak'
self.text = text
# Language
if lang_check:
try:
langs = tts_langs()
if lang.lower() not in langs:
raise ValueError("Language not supported: %s" % lang)
except RuntimeError as e:
log.debug(str(e), exc_info=True)
log.warning(str(e))
self.lang_check = lang_check
self.lang = lang.lower()
# Read speed
if slow:
self.speed = Speed.SLOW
else:
self.speed = Speed.NORMAL
# Pre-processors and tokenizer
self.pre_processor_funcs = pre_processor_funcs
self.tokenizer_func = tokenizer_func
# Google Translate token
self.token = gtts_token.Token()
def _tokenize(self, text):
# Pre-clean
text = text.strip()
# Apply pre-processors
for pp in self.pre_processor_funcs:
log.debug("pre-processing: %s", pp)
text = pp(text)
if _len(text) <= self.GOOGLE_TTS_MAX_CHARS:
return _clean_tokens([text])
# Tokenize
log.debug("tokenizing: %s", self.tokenizer_func)
tokens = self.tokenizer_func(text)
# Clean
tokens = _clean_tokens(tokens)
# Minimize
min_tokens = []
for t in tokens:
min_tokens += _minimize(t, ' ', self.GOOGLE_TTS_MAX_CHARS)
return min_tokens
def save(self, savefile):
"""Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
"""
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug("Saved to %s", savefile)
|
pndurette/gTTS | gtts/tts.py | gTTS.save | python | def save(self, savefile):
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug("Saved to %s", savefile) | Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tts.py#L238-L250 | [
"def write_to_fp(self, fp):\n \"\"\"Do the TTS API request and write bytes to a file-like object.\n\n Args:\n fp (file object): Any file-like object to write the ``mp3`` to.\n\n Raises:\n :class:`gTTSError`: When there's an error with the API request.\n TypeError: When ``fp`` is not a file-like object that takes bytes.\n\n \"\"\"\n # When disabling ssl verify in requests (for proxies and firewalls),\n # urllib3 prints an insecure warning on stdout. We disable that.\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n text_parts = self._tokenize(self.text)\n log.debug(\"text_parts: %i\", len(text_parts))\n assert text_parts, 'No text to send to TTS API'\n\n for idx, part in enumerate(text_parts):\n try:\n # Calculate token\n part_tk = self.token.calculate_token(part)\n except requests.exceptions.RequestException as e: # pragma: no cover\n log.debug(str(e), exc_info=True)\n raise gTTSError(\n \"Connection error during token calculation: %s\" %\n str(e))\n\n payload = {'ie': 'UTF-8',\n 'q': part,\n 'tl': self.lang,\n 'ttsspeed': self.speed,\n 'total': len(text_parts),\n 'idx': idx,\n 'client': 'tw-ob',\n 'textlen': _len(part),\n 'tk': part_tk}\n\n log.debug(\"payload-%i: %s\", idx, payload)\n\n try:\n # Request\n r = requests.get(self.GOOGLE_TTS_URL,\n params=payload,\n headers=self.GOOGLE_TTS_HEADERS,\n proxies=urllib.request.getproxies(),\n verify=False)\n\n log.debug(\"headers-%i: %s\", idx, r.request.headers)\n log.debug(\"url-%i: %s\", idx, r.request.url)\n log.debug(\"status-%i: %s\", idx, r.status_code)\n\n r.raise_for_status()\n except requests.exceptions.HTTPError:\n # Request successful, bad response\n raise gTTSError(tts=self, response=r)\n except requests.exceptions.RequestException as e: # pragma: no cover\n # Request failed\n raise gTTSError(str(e))\n\n try:\n # Write\n for chunk in r.iter_content(chunk_size=1024):\n fp.write(chunk)\n log.debug(\"part-%i written to %s\", idx, fp)\n except (AttributeError, TypeError) as e:\n raise TypeError(\n \"'fp' is not a file-like object or it does not take bytes: %s\" %\n str(e))\n"
] | class gTTS:
"""gTTS -- Google Text-to-Speech.
An interface to Google Translate's Text-to-Speech API.
Args:
text (string): The text to be read.
lang (string, optional): The language (IETF language tag) to
read the text in. Defaults to 'en'.
slow (bool, optional): Reads text more slowly. Defaults to ``False``.
lang_check (bool, optional): Strictly enforce an existing ``lang``,
to catch a language error early. If set to ``True``,
a ``ValueError`` is raised if ``lang`` doesn't exist.
Default is ``True``.
pre_processor_funcs (list): A list of zero or more functions that are
called to transform (pre-process) text before tokenizing. Those
functions must take a string and return a string. Defaults to::
[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
]
tokenizer_func (callable): A function that takes in a string and
returns a list of string (tokens). Defaults to::
Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
See Also:
:doc:`Pre-processing and tokenizing <tokenizer>`
Raises:
AssertionError: When ``text`` is ``None`` or empty; when there's nothing
left to speak after pre-precessing, tokenizing and cleaning.
ValueError: When ``lang_check`` is ``True`` and ``lang`` is not supported.
RuntimeError: When ``lang_check`` is ``True`` but there's an error loading
the languages dictionnary.
"""
GOOGLE_TTS_MAX_CHARS = 100 # Max characters the Google TTS API takes at a time
GOOGLE_TTS_URL = "https://translate.google.com/translate_tts"
GOOGLE_TTS_HEADERS = {
"Referer": "http://translate.google.com/",
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/47.0.2526.106 Safari/537.36"
}
def __init__(
self,
text,
lang='en',
slow=False,
lang_check=True,
pre_processor_funcs=[
pre_processors.tone_marks,
pre_processors.end_of_line,
pre_processors.abbreviations,
pre_processors.word_sub
],
tokenizer_func=Tokenizer([
tokenizer_cases.tone_marks,
tokenizer_cases.period_comma,
tokenizer_cases.colon,
tokenizer_cases.other_punctuation
]).run
):
# Debug
for k, v in locals().items():
if k == 'self':
continue
log.debug("%s: %s", k, v)
# Text
assert text, 'No text to speak'
self.text = text
# Language
if lang_check:
try:
langs = tts_langs()
if lang.lower() not in langs:
raise ValueError("Language not supported: %s" % lang)
except RuntimeError as e:
log.debug(str(e), exc_info=True)
log.warning(str(e))
self.lang_check = lang_check
self.lang = lang.lower()
# Read speed
if slow:
self.speed = Speed.SLOW
else:
self.speed = Speed.NORMAL
# Pre-processors and tokenizer
self.pre_processor_funcs = pre_processor_funcs
self.tokenizer_func = tokenizer_func
# Google Translate token
self.token = gtts_token.Token()
def _tokenize(self, text):
# Pre-clean
text = text.strip()
# Apply pre-processors
for pp in self.pre_processor_funcs:
log.debug("pre-processing: %s", pp)
text = pp(text)
if _len(text) <= self.GOOGLE_TTS_MAX_CHARS:
return _clean_tokens([text])
# Tokenize
log.debug("tokenizing: %s", self.tokenizer_func)
tokens = self.tokenizer_func(text)
# Clean
tokens = _clean_tokens(tokens)
# Minimize
min_tokens = []
for t in tokens:
min_tokens += _minimize(t, ' ', self.GOOGLE_TTS_MAX_CHARS)
return min_tokens
def write_to_fp(self, fp):
"""Do the TTS API request and write bytes to a file-like object.
Args:
fp (file object): Any file-like object to write the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
TypeError: When ``fp`` is not a file-like object that takes bytes.
"""
# When disabling ssl verify in requests (for proxies and firewalls),
# urllib3 prints an insecure warning on stdout. We disable that.
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
text_parts = self._tokenize(self.text)
log.debug("text_parts: %i", len(text_parts))
assert text_parts, 'No text to send to TTS API'
for idx, part in enumerate(text_parts):
try:
# Calculate token
part_tk = self.token.calculate_token(part)
except requests.exceptions.RequestException as e: # pragma: no cover
log.debug(str(e), exc_info=True)
raise gTTSError(
"Connection error during token calculation: %s" %
str(e))
payload = {'ie': 'UTF-8',
'q': part,
'tl': self.lang,
'ttsspeed': self.speed,
'total': len(text_parts),
'idx': idx,
'client': 'tw-ob',
'textlen': _len(part),
'tk': part_tk}
log.debug("payload-%i: %s", idx, payload)
try:
# Request
r = requests.get(self.GOOGLE_TTS_URL,
params=payload,
headers=self.GOOGLE_TTS_HEADERS,
proxies=urllib.request.getproxies(),
verify=False)
log.debug("headers-%i: %s", idx, r.request.headers)
log.debug("url-%i: %s", idx, r.request.url)
log.debug("status-%i: %s", idx, r.status_code)
r.raise_for_status()
except requests.exceptions.HTTPError:
# Request successful, bad response
raise gTTSError(tts=self, response=r)
except requests.exceptions.RequestException as e: # pragma: no cover
# Request failed
raise gTTSError(str(e))
try:
# Write
for chunk in r.iter_content(chunk_size=1024):
fp.write(chunk)
log.debug("part-%i written to %s", idx, fp)
except (AttributeError, TypeError) as e:
raise TypeError(
"'fp' is not a file-like object or it does not take bytes: %s" %
str(e))
|
pndurette/gTTS | gtts/tts.py | gTTSError.infer_msg | python | def infer_msg(self, tts, rsp):
# rsp should be <requests.Response>
# http://docs.python-requests.org/en/master/api/
status = rsp.status_code
reason = rsp.reason
cause = "Unknown"
if status == 403:
cause = "Bad token or upstream API changes"
elif status == 404 and not tts.lang_check:
cause = "Unsupported language '%s'" % self.tts.lang
elif status >= 500:
cause = "Uptream API error. Try again later."
return "%i (%s) from TTS API. Probable cause: %s" % (
status, reason, cause) | Attempt to guess what went wrong by using known
information (e.g. http response) and observed behaviour | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tts.py#L267-L286 | null | class gTTSError(Exception):
"""Exception that uses context to present a meaningful error message"""
def __init__(self, msg=None, **kwargs):
self.tts = kwargs.pop('tts', None)
self.rsp = kwargs.pop('response', None)
if msg:
self.msg = msg
elif self.tts is not None and self.rsp is not None:
self.msg = self.infer_msg(self.tts, self.rsp)
else:
self.msg = None
super(gTTSError, self).__init__(self.msg)
|
pndurette/gTTS | gtts/utils.py | _minimize | python | def _minimize(the_string, delim, max_size):
# Remove `delim` from start of `the_string`
# i.e. prevent a recursive infinite loop on `the_string[0:0]`
# if `the_string` starts with `delim` and is larger than `max_size`
if the_string.startswith(delim):
the_string = the_string[_len(delim):]
if _len(the_string) > max_size:
try:
# Find the highest index of `delim` in `the_string[0:max_size]`
# i.e. `the_string` will be cut in half on `delim` index
idx = the_string.rindex(delim, 0, max_size)
except ValueError:
# `delim` not found in `the_string`, index becomes `max_size`
# i.e. `the_string` will be cut in half arbitrarily on `max_size`
idx = max_size
# Call itself again for `the_string[idx:]`
return [the_string[:idx]] + \
_minimize(the_string[idx:], delim, max_size)
else:
return [the_string] | Recursively split a string in the largest chunks
possible from the highest position of a delimiter all the way
to a maximum size
Args:
the_string (string): The string to split.
delim (string): The delimiter to split on.
max_size (int): The maximum size of a chunk.
Returns:
list: the minimized string in tokens
Every chunk size will be at minimum `the_string[0:idx]` where `idx`
is the highest index of `delim` found in `the_string`; and at maximum
`the_string[0:max_size]` if no `delim` was found in `the_string`.
In the latter case, the split will occur at `the_string[max_size]`
which can be any character. The function runs itself again on the rest of
`the_string` (`the_string[idx:]`) until no chunk is larger than `max_size`. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/utils.py#L13-L53 | [
"def _len(text):\n \"\"\"Same as `len(text)` for a string but that decodes\n `text` first in Python 2.x\n\n Args:\n text (string): string to get the size of.\n\n Returns:\n int: the size of the string.\n \"\"\"\n try:\n # Python 2\n return len(unicode(text))\n except NameError: # pragma: no cover\n # Python 3\n return len(text)\n",
"def _minimize(the_string, delim, max_size):\n \"\"\"Recursively split a string in the largest chunks\n possible from the highest position of a delimiter all the way\n to a maximum size\n\n Args:\n the_string (string): The string to split.\n delim (string): The delimiter to split on.\n max_size (int): The maximum size of a chunk.\n\n Returns:\n list: the minimized string in tokens\n\n Every chunk size will be at minimum `the_string[0:idx]` where `idx`\n is the highest index of `delim` found in `the_string`; and at maximum\n `the_string[0:max_size]` if no `delim` was found in `the_string`.\n In the latter case, the split will occur at `the_string[max_size]`\n which can be any character. The function runs itself again on the rest of\n `the_string` (`the_string[idx:]`) until no chunk is larger than `max_size`.\n\n \"\"\"\n # Remove `delim` from start of `the_string`\n # i.e. prevent a recursive infinite loop on `the_string[0:0]`\n # if `the_string` starts with `delim` and is larger than `max_size`\n if the_string.startswith(delim):\n the_string = the_string[_len(delim):]\n\n if _len(the_string) > max_size:\n try:\n # Find the highest index of `delim` in `the_string[0:max_size]`\n # i.e. `the_string` will be cut in half on `delim` index\n idx = the_string.rindex(delim, 0, max_size)\n except ValueError:\n # `delim` not found in `the_string`, index becomes `max_size`\n # i.e. `the_string` will be cut in half arbitrarily on `max_size`\n idx = max_size\n # Call itself again for `the_string[idx:]`\n return [the_string[:idx]] + \\\n _minimize(the_string[idx:], delim, max_size)\n else:\n return [the_string]\n"
] | # -*- coding: utf-8 -*-
from gtts.tokenizer.symbols import ALL_PUNC as punc
from string import whitespace as ws
import re
_ALL_PUNC_OR_SPACE = re.compile(u"^[{}]*$".format(re.escape(punc + ws)))
"""Regex that matches if an entire line is only comprised
of whitespace and punctuation
"""
def _len(text):
"""Same as `len(text)` for a string but that decodes
`text` first in Python 2.x
Args:
text (string): string to get the size of.
Returns:
int: the size of the string.
"""
try:
# Python 2
return len(unicode(text))
except NameError: # pragma: no cover
# Python 3
return len(text)
def _clean_tokens(tokens):
"""Clean a list of strings
Args:
tokens (list): a list of strings (tokens) to clean.
Returns:
list: stripped strings `tokens` without the original elements
that only consisted of whitespace and/or punctuation characters.
"""
return [t.strip() for t in tokens if not _ALL_PUNC_OR_SPACE.match(t)]
|
pndurette/gTTS | gtts/tokenizer/core.py | PreProcessorRegex.run | python | def run(self, text):
for regex in self.regexes:
text = regex.sub(self.repl, text)
return text | Run each regex substitution on ``text``.
Args:
text (string): the input text.
Returns:
string: text after all substitutions have been sequentially
applied. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/core.py#L127-L140 | null | class PreProcessorRegex():
"""Regex-based substitution text pre-processor.
Runs a series of regex substitutions (``re.sub``) from each ``regex`` of a
:class:`gtts.tokenizer.core.RegexBuilder` with an extra ``repl``
replacement parameter.
Args:
search_args (iteratable): String element(s) to be each passed to
``search_func`` to create a regex pattern. Each element is
``re.escape``'d before being passed.
search_func (callable): A 'template' function that should take a
string and return a string. It should take an element of
``search_args`` and return a valid regex search pattern string.
repl (string): The common replacement passed to the ``sub`` method for
each ``regex``. Can be a raw string (the case of a regex
backreference, for example)
flags: ``re`` flag(s) to compile with each `regex`.
Example:
Add "!" after the words "lorem" or "ipsum", while ignoring case::
>>> import re
>>> words = ['lorem', 'ipsum']
>>> pp = PreProcessorRegex(words,
... lambda x: "({})".format(x), r'\\1!',
... re.IGNORECASE)
In this case, the regex is a group and the replacement uses its
backreference ``\\1`` (as a raw string). Looking at ``pp`` we get the
following list of search/replacement pairs::
>>> print(pp)
(re.compile('(lorem)', re.IGNORECASE), repl='\1!'),
(re.compile('(ipsum)', re.IGNORECASE), repl='\1!')
It can then be run on any string of text::
>>> pp.run("LOREM ipSuM")
"LOREM! ipSuM!"
See :mod:`gtts.tokenizer.pre_processors` for more examples.
"""
def __init__(self, search_args, search_func, repl, flags=0):
self.repl = repl
# Create regex list
self.regexes = []
for arg in search_args:
rb = RegexBuilder([arg], search_func, flags)
self.regexes.append(rb.regex)
def __repr__(self): # pragma: no cover
subs_strs = []
for r in self.regexes:
subs_strs.append("({}, repl='{}')".format(r, self.repl))
return ", ".join(subs_strs)
|
pndurette/gTTS | gtts/tokenizer/core.py | PreProcessorSub.run | python | def run(self, text):
for pp in self.pre_processors:
text = pp.run(text)
return text | Run each substitution on ``text``.
Args:
text (string): the input text.
Returns:
string: text after all substitutions have been sequentially
applied. | train | https://github.com/pndurette/gTTS/blob/b01ac4eb22d40c6241202e202d0418ccf4f98460/gtts/tokenizer/core.py#L196-L209 | null | class PreProcessorSub():
"""Simple substitution text preprocessor.
Performs string-for-string substitution from list a find/replace pairs.
It abstracts :class:`gtts.tokenizer.core.PreProcessorRegex` with a default
simple substitution regex.
Args:
sub_pairs (list): A list of tuples of the style
``(<search str>, <replace str>)``
ignore_case (bool): Ignore case during search. Defaults to ``True``.
Example:
Replace all occurences of "Mac" to "PC" and "Firefox" to "Chrome"::
>>> sub_pairs = [('Mac', 'PC'), ('Firefox', 'Chrome')]
>>> pp = PreProcessorSub(sub_pairs)
Looking at the ``pp``, we get the following list of
search (regex)/replacement pairs::
>>> print(pp)
(re.compile('Mac', re.IGNORECASE), repl='PC'),
(re.compile('Firefox', re.IGNORECASE), repl='Chrome')
It can then be run on any string of text::
>>> pp.run("I use firefox on my mac")
"I use Chrome on my PC"
See :mod:`gtts.tokenizer.pre_processors` for more examples.
"""
def __init__(self, sub_pairs, ignore_case=True):
def search_func(x):
return u"{}".format(x)
flags = re.I if ignore_case else 0
# Create pre-processor list
self.pre_processors = []
for sub_pair in sub_pairs:
pattern, repl = sub_pair
pp = PreProcessorRegex([pattern], search_func, repl, flags)
self.pre_processors.append(pp)
def __repr__(self): # pragma: no cover
return ", ".join([str(pp) for pp in self.pre_processors])
|
myusuf3/delorean | delorean/interface.py | parse | python | def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do | Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 PST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='America/Los_Angeles')
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 EST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC') | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L15-L105 | null | from datetime import datetime
import pytz
from dateutil.rrule import rrule, DAILY, HOURLY, MONTHLY, YEARLY
from dateutil.parser import parse as capture, isoparse as isocapture
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidDatetime
from .dates import Delorean, is_datetime_naive, datetime_timezone
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
def range_hourly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
HOURLY stops
"""
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count)
def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count)
def range_yearly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
YEARLY stops
"""
return stops(start=start, stop=stop, freq=YEARLY, timezone=timezone, count=count)
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
"""
This will create a list of delorean objects the apply to
setting possed in.
"""
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d
def epoch(s):
dt = datetime.utcfromtimestamp(s)
return Delorean(datetime=dt, timezone='UTC')
def flux():
print("If you put your mind to it, you can accomplish anything.")
def utcnow():
"""
Return a Delorean object for the current UTC date and time, setting the timezone to UTC.
"""
return Delorean()
def now(timezone=None):
"""
Return a Delorean object for the current local date and time, setting the timezone to the local timezone of the
caller by default.
:param Optional[datetime.tzinfo] timezone: A custom timezone to use when computing the time.
:rtype: delorean.dates.Delorean
"""
return Delorean(timezone=timezone or get_localzone())
|
myusuf3/delorean | delorean/interface.py | range_daily | python | def range_daily(start=None, stop=None, timezone='UTC', count=None):
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count) | This an alternative way to generating sets of Delorean objects with
DAILY stops | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L108-L113 | [
"def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,\n bymonth=None, bymonthday=None, byyearday=None, byeaster=None,\n byweekno=None, byweekday=None, byhour=None, byminute=None,\n bysecond=None, timezone='UTC', start=None, stop=None):\n \"\"\"\n This will create a list of delorean objects the apply to\n setting possed in.\n \"\"\"\n # check to see if datetimees passed in are naive if so process them\n # with given timezone.\n if all([(start is None or is_datetime_naive(start)),\n (stop is None or is_datetime_naive(stop))]):\n pass\n else:\n raise DeloreanInvalidDatetime('Provide a naive datetime object')\n\n # if no datetimes are passed in create a proper datetime object for\n # start default because default in dateutil is datetime.now() :(\n if start is None:\n start = datetime_timezone(timezone)\n\n for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,\n bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,\n byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,\n bysecond=bysecond, until=stop, dtstart=start):\n # make the delorean object\n # yield it.\n # doing this to make sure delorean receives a naive datetime.\n dt = dt.replace(tzinfo=None)\n d = Delorean(datetime=dt, timezone=timezone)\n yield d\n"
] | from datetime import datetime
import pytz
from dateutil.rrule import rrule, DAILY, HOURLY, MONTHLY, YEARLY
from dateutil.parser import parse as capture, isoparse as isocapture
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidDatetime
from .dates import Delorean, is_datetime_naive, datetime_timezone
def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
"""
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 PST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='America/Los_Angeles')
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 EST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
"""
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do
def range_hourly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
HOURLY stops
"""
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count)
def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count)
def range_yearly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
YEARLY stops
"""
return stops(start=start, stop=stop, freq=YEARLY, timezone=timezone, count=count)
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
"""
This will create a list of delorean objects the apply to
setting possed in.
"""
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d
def epoch(s):
dt = datetime.utcfromtimestamp(s)
return Delorean(datetime=dt, timezone='UTC')
def flux():
print("If you put your mind to it, you can accomplish anything.")
def utcnow():
"""
Return a Delorean object for the current UTC date and time, setting the timezone to UTC.
"""
return Delorean()
def now(timezone=None):
"""
Return a Delorean object for the current local date and time, setting the timezone to the local timezone of the
caller by default.
:param Optional[datetime.tzinfo] timezone: A custom timezone to use when computing the time.
:rtype: delorean.dates.Delorean
"""
return Delorean(timezone=timezone or get_localzone())
|
myusuf3/delorean | delorean/interface.py | range_hourly | python | def range_hourly(start=None, stop=None, timezone='UTC', count=None):
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count) | This an alternative way to generating sets of Delorean objects with
HOURLY stops | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L116-L121 | [
"def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,\n bymonth=None, bymonthday=None, byyearday=None, byeaster=None,\n byweekno=None, byweekday=None, byhour=None, byminute=None,\n bysecond=None, timezone='UTC', start=None, stop=None):\n \"\"\"\n This will create a list of delorean objects the apply to\n setting possed in.\n \"\"\"\n # check to see if datetimees passed in are naive if so process them\n # with given timezone.\n if all([(start is None or is_datetime_naive(start)),\n (stop is None or is_datetime_naive(stop))]):\n pass\n else:\n raise DeloreanInvalidDatetime('Provide a naive datetime object')\n\n # if no datetimes are passed in create a proper datetime object for\n # start default because default in dateutil is datetime.now() :(\n if start is None:\n start = datetime_timezone(timezone)\n\n for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,\n bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,\n byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,\n bysecond=bysecond, until=stop, dtstart=start):\n # make the delorean object\n # yield it.\n # doing this to make sure delorean receives a naive datetime.\n dt = dt.replace(tzinfo=None)\n d = Delorean(datetime=dt, timezone=timezone)\n yield d\n"
] | from datetime import datetime
import pytz
from dateutil.rrule import rrule, DAILY, HOURLY, MONTHLY, YEARLY
from dateutil.parser import parse as capture, isoparse as isocapture
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidDatetime
from .dates import Delorean, is_datetime_naive, datetime_timezone
def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
"""
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 PST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='America/Los_Angeles')
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 EST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
"""
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count)
def range_yearly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
YEARLY stops
"""
return stops(start=start, stop=stop, freq=YEARLY, timezone=timezone, count=count)
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
"""
This will create a list of delorean objects the apply to
setting possed in.
"""
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d
def epoch(s):
dt = datetime.utcfromtimestamp(s)
return Delorean(datetime=dt, timezone='UTC')
def flux():
print("If you put your mind to it, you can accomplish anything.")
def utcnow():
"""
Return a Delorean object for the current UTC date and time, setting the timezone to UTC.
"""
return Delorean()
def now(timezone=None):
"""
Return a Delorean object for the current local date and time, setting the timezone to the local timezone of the
caller by default.
:param Optional[datetime.tzinfo] timezone: A custom timezone to use when computing the time.
:rtype: delorean.dates.Delorean
"""
return Delorean(timezone=timezone or get_localzone())
|
myusuf3/delorean | delorean/interface.py | range_monthly | python | def range_monthly(start=None, stop=None, timezone='UTC', count=None):
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count) | This an alternative way to generating sets of Delorean objects with
MONTHLY stops | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L124-L129 | [
"def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,\n bymonth=None, bymonthday=None, byyearday=None, byeaster=None,\n byweekno=None, byweekday=None, byhour=None, byminute=None,\n bysecond=None, timezone='UTC', start=None, stop=None):\n \"\"\"\n This will create a list of delorean objects the apply to\n setting possed in.\n \"\"\"\n # check to see if datetimees passed in are naive if so process them\n # with given timezone.\n if all([(start is None or is_datetime_naive(start)),\n (stop is None or is_datetime_naive(stop))]):\n pass\n else:\n raise DeloreanInvalidDatetime('Provide a naive datetime object')\n\n # if no datetimes are passed in create a proper datetime object for\n # start default because default in dateutil is datetime.now() :(\n if start is None:\n start = datetime_timezone(timezone)\n\n for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,\n bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,\n byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,\n bysecond=bysecond, until=stop, dtstart=start):\n # make the delorean object\n # yield it.\n # doing this to make sure delorean receives a naive datetime.\n dt = dt.replace(tzinfo=None)\n d = Delorean(datetime=dt, timezone=timezone)\n yield d\n"
] | from datetime import datetime
import pytz
from dateutil.rrule import rrule, DAILY, HOURLY, MONTHLY, YEARLY
from dateutil.parser import parse as capture, isoparse as isocapture
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidDatetime
from .dates import Delorean, is_datetime_naive, datetime_timezone
def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
"""
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 PST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='America/Los_Angeles')
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 EST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
"""
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
def range_hourly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
HOURLY stops
"""
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count)
def range_yearly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
YEARLY stops
"""
return stops(start=start, stop=stop, freq=YEARLY, timezone=timezone, count=count)
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
"""
This will create a list of delorean objects the apply to
setting possed in.
"""
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d
def epoch(s):
dt = datetime.utcfromtimestamp(s)
return Delorean(datetime=dt, timezone='UTC')
def flux():
print("If you put your mind to it, you can accomplish anything.")
def utcnow():
"""
Return a Delorean object for the current UTC date and time, setting the timezone to UTC.
"""
return Delorean()
def now(timezone=None):
"""
Return a Delorean object for the current local date and time, setting the timezone to the local timezone of the
caller by default.
:param Optional[datetime.tzinfo] timezone: A custom timezone to use when computing the time.
:rtype: delorean.dates.Delorean
"""
return Delorean(timezone=timezone or get_localzone())
|
myusuf3/delorean | delorean/interface.py | range_yearly | python | def range_yearly(start=None, stop=None, timezone='UTC', count=None):
return stops(start=start, stop=stop, freq=YEARLY, timezone=timezone, count=count) | This an alternative way to generating sets of Delorean objects with
YEARLY stops | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L132-L137 | [
"def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,\n bymonth=None, bymonthday=None, byyearday=None, byeaster=None,\n byweekno=None, byweekday=None, byhour=None, byminute=None,\n bysecond=None, timezone='UTC', start=None, stop=None):\n \"\"\"\n This will create a list of delorean objects the apply to\n setting possed in.\n \"\"\"\n # check to see if datetimees passed in are naive if so process them\n # with given timezone.\n if all([(start is None or is_datetime_naive(start)),\n (stop is None or is_datetime_naive(stop))]):\n pass\n else:\n raise DeloreanInvalidDatetime('Provide a naive datetime object')\n\n # if no datetimes are passed in create a proper datetime object for\n # start default because default in dateutil is datetime.now() :(\n if start is None:\n start = datetime_timezone(timezone)\n\n for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,\n bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,\n byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,\n bysecond=bysecond, until=stop, dtstart=start):\n # make the delorean object\n # yield it.\n # doing this to make sure delorean receives a naive datetime.\n dt = dt.replace(tzinfo=None)\n d = Delorean(datetime=dt, timezone=timezone)\n yield d\n"
] | from datetime import datetime
import pytz
from dateutil.rrule import rrule, DAILY, HOURLY, MONTHLY, YEARLY
from dateutil.parser import parse as capture, isoparse as isocapture
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidDatetime
from .dates import Delorean, is_datetime_naive, datetime_timezone
def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
"""
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 PST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='America/Los_Angeles')
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 EST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
"""
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
def range_hourly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
HOURLY stops
"""
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count)
def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count)
def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
"""
This will create a list of delorean objects the apply to
setting possed in.
"""
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d
def epoch(s):
dt = datetime.utcfromtimestamp(s)
return Delorean(datetime=dt, timezone='UTC')
def flux():
print("If you put your mind to it, you can accomplish anything.")
def utcnow():
"""
Return a Delorean object for the current UTC date and time, setting the timezone to UTC.
"""
return Delorean()
def now(timezone=None):
"""
Return a Delorean object for the current local date and time, setting the timezone to the local timezone of the
caller by default.
:param Optional[datetime.tzinfo] timezone: A custom timezone to use when computing the time.
:rtype: delorean.dates.Delorean
"""
return Delorean(timezone=timezone or get_localzone())
|
myusuf3/delorean | delorean/interface.py | stops | python | def stops(freq, interval=1, count=None, wkst=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None, byhour=None, byminute=None,
bysecond=None, timezone='UTC', start=None, stop=None):
# check to see if datetimees passed in are naive if so process them
# with given timezone.
if all([(start is None or is_datetime_naive(start)),
(stop is None or is_datetime_naive(stop))]):
pass
else:
raise DeloreanInvalidDatetime('Provide a naive datetime object')
# if no datetimes are passed in create a proper datetime object for
# start default because default in dateutil is datetime.now() :(
if start is None:
start = datetime_timezone(timezone)
for dt in rrule(freq, interval=interval, count=count, wkst=wkst, bysetpos=bysetpos,
bymonth=bymonth, bymonthday=bymonthday, byyearday=byyearday, byeaster=byeaster,
byweekno=byweekno, byweekday=byweekday, byhour=byhour, byminute=byminute,
bysecond=bysecond, until=stop, dtstart=start):
# make the delorean object
# yield it.
# doing this to make sure delorean receives a naive datetime.
dt = dt.replace(tzinfo=None)
d = Delorean(datetime=dt, timezone=timezone)
yield d | This will create a list of delorean objects the apply to
setting possed in. | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L140-L170 | [
"def datetime_timezone(tz):\n \"\"\"\n This method given a timezone returns a localized datetime object.\n \"\"\"\n utc_datetime_naive = datetime.utcnow()\n # return a localized datetime to UTC\n utc_localized_datetime = localize(utc_datetime_naive, 'UTC')\n # normalize the datetime to given timezone\n normalized_datetime = normalize(utc_localized_datetime, tz)\n return normalized_datetime\n",
"def is_datetime_naive(dt):\n \"\"\"\n This method returns true if the datetime is naive else returns false\n \"\"\"\n if dt.tzinfo is None:\n return True\n else:\n return False\n"
] | from datetime import datetime
import pytz
from dateutil.rrule import rrule, DAILY, HOURLY, MONTHLY, YEARLY
from dateutil.parser import parse as capture, isoparse as isocapture
from dateutil.tz import tzlocal
from dateutil.tz import tzoffset
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidDatetime
from .dates import Delorean, is_datetime_naive, datetime_timezone
def parse(datetime_str, timezone=None, isofirst=True, dayfirst=True, yearfirst=True):
"""
Parses a datetime string and returns a `Delorean` object.
:param datetime_str: The string to be interpreted into a `Delorean` object.
:param timezone: Pass this parameter and the returned Delorean object will be normalized to this timezone. Any
offsets passed as part of datetime_str will be ignored.
:param isofirst: try to parse string as date in ISO format before everything else.
:param dayfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the day
(True) or month (False). If yearfirst is set to True, this distinguishes between YDM and YMD.
:param yearfirst: Whether to interpret the first value in an ambiguous 3-integer date (ex. 01/05/09) as the
year. If True, the first number is taken to be the year, otherwise the last number is taken to be the year.
.. testsetup::
from delorean import Delorean
from delorean import parse
.. doctest::
>>> parse('2015-01-01 00:01:02')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
If a fixed offset is provided in the datetime_str, it will be parsed and the returned `Delorean` object will store a
`pytz.FixedOffest` as it's timezone.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0800')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone=pytz.FixedOffset(-480))
If the timezone argument is supplied, the returned Delorean object will be in the timezone supplied. Any offsets in
the datetime_str will be ignored.
.. doctest::
>>> parse('2015-01-01 00:01:02 -0500', timezone='US/Pacific')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='US/Pacific')
If an unambiguous timezone is detected in the datetime string, a Delorean object with that datetime and
timezone will be returned.
.. doctest::
>>> parse('2015-01-01 00:01:02 PST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='America/Los_Angeles')
However if the provided timezone is ambiguous, parse will ignore the timezone and return a `Delorean` object in UTC
time.
>>> parse('2015-01-01 00:01:02 EST')
Delorean(datetime=datetime.datetime(2015, 1, 1, 0, 1, 2), timezone='UTC')
"""
# parse string to datetime object
dt = None
if isofirst:
try:
dt = isocapture(datetime_str)
except Exception:
pass
if dt is None:
dt = capture(datetime_str, dayfirst=dayfirst, yearfirst=yearfirst)
if timezone:
dt = dt.replace(tzinfo=None)
do = Delorean(datetime=dt, timezone=timezone)
elif dt.tzinfo is None:
# assuming datetime object passed in is UTC
do = Delorean(datetime=dt, timezone='UTC')
elif isinstance(dt.tzinfo, tzoffset):
utcoffset = dt.tzinfo.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (utcoffset.seconds + utcoffset.days * 24 * 3600) * 10**6) / 10**6)
tz = pytz.FixedOffset(total_seconds / 60)
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
elif isinstance(dt.tzinfo, tzlocal):
tz = get_localzone()
dt = dt.replace(tzinfo=None)
do = Delorean(dt, timezone=tz)
else:
dt = pytz.utc.normalize(dt)
# making dt naive so we can pass it to Delorean
dt = dt.replace(tzinfo=None)
# if parse string has tzinfo we return a normalized UTC
# delorean object that represents the time.
do = Delorean(datetime=dt, timezone='UTC')
return do
def range_daily(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
DAILY stops
"""
return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
def range_hourly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
HOURLY stops
"""
return stops(start=start, stop=stop, freq=HOURLY, timezone=timezone, count=count)
def range_monthly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
MONTHLY stops
"""
return stops(start=start, stop=stop, freq=MONTHLY, timezone=timezone, count=count)
def range_yearly(start=None, stop=None, timezone='UTC', count=None):
"""
This an alternative way to generating sets of Delorean objects with
YEARLY stops
"""
return stops(start=start, stop=stop, freq=YEARLY, timezone=timezone, count=count)
def epoch(s):
dt = datetime.utcfromtimestamp(s)
return Delorean(datetime=dt, timezone='UTC')
def flux():
print("If you put your mind to it, you can accomplish anything.")
def utcnow():
"""
Return a Delorean object for the current UTC date and time, setting the timezone to UTC.
"""
return Delorean()
def now(timezone=None):
"""
Return a Delorean object for the current local date and time, setting the timezone to the local timezone of the
caller by default.
:param Optional[datetime.tzinfo] timezone: A custom timezone to use when computing the time.
:rtype: delorean.dates.Delorean
"""
return Delorean(timezone=timezone or get_localzone())
|
myusuf3/delorean | delorean/dates.py | _move_datetime | python | def _move_datetime(dt, direction, delta):
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt | Move datetime given delta by given direction | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L45-L56 | null | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | move_datetime_month | python | def move_datetime_month(dt, direction, num_shifts):
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta) | Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L95-L101 | [
"def _move_datetime(dt, direction, delta):\n \"\"\"\n Move datetime given delta by given direction\n \"\"\"\n if direction == 'next':\n dt = dt + delta\n elif direction == 'last':\n dt = dt - delta\n else:\n pass\n # raise some delorean error here\n return dt\n"
] | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | move_datetime_week | python | def move_datetime_week(dt, direction, num_shifts):
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta) | Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L104-L110 | [
"def _move_datetime(dt, direction, delta):\n \"\"\"\n Move datetime given delta by given direction\n \"\"\"\n if direction == 'next':\n dt = dt + delta\n elif direction == 'last':\n dt = dt - delta\n else:\n pass\n # raise some delorean error here\n return dt\n"
] | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | move_datetime_year | python | def move_datetime_year(dt, direction, num_shifts):
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta) | Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L113-L119 | [
"def _move_datetime(dt, direction, delta):\n \"\"\"\n Move datetime given delta by given direction\n \"\"\"\n if direction == 'next':\n dt = dt + delta\n elif direction == 'last':\n dt = dt - delta\n else:\n pass\n # raise some delorean error here\n return dt\n"
] | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | datetime_timezone | python | def datetime_timezone(tz):
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime | This method given a timezone returns a localized datetime object. | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L137-L146 | [
"def normalize(dt, tz):\n \"\"\"\n Given a object with a timezone return a datetime object\n normalized to the proper timezone.\n\n This means take the give localized datetime and returns the\n datetime normalized to match the specificed timezone.\n \"\"\"\n if not isinstance(tz, tzinfo):\n tz = pytz.timezone(tz)\n dt = tz.normalize(dt)\n return dt\n",
"def localize(dt, tz):\n \"\"\"\n Given a naive datetime object this method will return a localized\n datetime object\n \"\"\"\n if not isinstance(tz, tzinfo):\n tz = pytz.timezone(tz)\n\n return tz.localize(dt)\n"
] | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | localize | python | def localize(dt, tz):
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt) | Given a naive datetime object this method will return a localized
datetime object | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L149-L157 | null | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
def normalize(dt, tz):
"""
Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone.
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | normalize | python | def normalize(dt, tz):
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
dt = tz.normalize(dt)
return dt | Given a object with a timezone return a datetime object
normalized to the proper timezone.
This means take the give localized datetime and returns the
datetime normalized to match the specificed timezone. | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L160-L171 | null | import sys
from datetime import datetime
from datetime import timedelta
from datetime import tzinfo
from functools import partial
from functools import update_wrapper
import humanize
import pytz
from babel.dates import format_datetime
from dateutil.tz import tzoffset
from dateutil.relativedelta import relativedelta
from tzlocal import get_localzone
from .exceptions import DeloreanInvalidTimezone
def get_total_second(td):
"""
This method takes a timedelta and return the number of seconds it
represents with the resolution of 10**6
"""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
def is_datetime_naive(dt):
"""
This method returns true if the datetime is naive else returns false
"""
if dt.tzinfo is None:
return True
else:
return False
def is_datetime_instance(dt):
if dt is None:
return
if not isinstance(dt, datetime):
raise ValueError('Please provide a datetime instance to Delorean')
def _move_datetime(dt, direction, delta):
"""
Move datetime given delta by given direction
"""
if direction == 'next':
dt = dt + delta
elif direction == 'last':
dt = dt - delta
else:
pass
# raise some delorean error here
return dt
def move_datetime_day(dt, direction, num_shifts):
delta = relativedelta(days=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_namedday(dt, direction, unit):
TOTAL_DAYS = 7
days = {
'monday': 1,
'tuesday': 2,
'wednesday': 3,
'thursday': 4,
'friday': 5,
'saturday': 6,
'sunday': 7,
}
current_day = days[dt.strftime('%A').lower()]
target_day = days[unit.lower()]
if direction == 'next':
if current_day < target_day:
delta_days = target_day - current_day
else:
delta_days = (target_day - current_day) + TOTAL_DAYS
elif direction == 'last':
if current_day <= target_day:
delta_days = (current_day - target_day) + TOTAL_DAYS
else:
delta_days = current_day - target_day
delta = relativedelta(days=+delta_days)
return _move_datetime(dt, direction, delta)
def move_datetime_month(dt, direction, num_shifts):
"""
Move datetime 1 month in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(months=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_week(dt, direction, num_shifts):
"""
Move datetime 1 week in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(weeks=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_year(dt, direction, num_shifts):
"""
Move datetime 1 year in the chosen direction.
unit is a no-op, to keep the API the same as the day case
"""
delta = relativedelta(years=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_hour(dt, direction, num_shifts):
delta = relativedelta(hours=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_minute(dt, direction, num_shifts):
delta = relativedelta(minutes=+num_shifts)
return _move_datetime(dt, direction, delta)
def move_datetime_second(dt, direction, num_shifts):
delta = relativedelta(seconds=+num_shifts)
return _move_datetime(dt, direction, delta)
def datetime_timezone(tz):
"""
This method given a timezone returns a localized datetime object.
"""
utc_datetime_naive = datetime.utcnow()
# return a localized datetime to UTC
utc_localized_datetime = localize(utc_datetime_naive, 'UTC')
# normalize the datetime to given timezone
normalized_datetime = normalize(utc_localized_datetime, tz)
return normalized_datetime
def localize(dt, tz):
"""
Given a naive datetime object this method will return a localized
datetime object
"""
if not isinstance(tz, tzinfo):
tz = pytz.timezone(tz)
return tz.localize(dt)
class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean._shift_date | python | def _shift_date(self, direction, unit, *args):
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone) | Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L292-L315 | null | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean.truncate | python | def truncate(self, s):
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self | Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific') | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L336-L371 | null | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean.shift | python | def shift(self, timezone):
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self | Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC') | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L466-L489 | null | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean.epoch | python | def epoch(self):
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec) | Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0 | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L492-L512 | [
"def get_total_second(td):\n \"\"\"\n This method takes a timedelta and return the number of seconds it\n represents with the resolution of 10**6\n \"\"\"\n return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6\n"
] | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean.replace | python | def replace(self, **kwargs):
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone) | Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC') | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L553-L570 | null | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean.humanize | python | def humanize(self):
now = self.now(self.timezone)
return humanize.naturaltime(now - self) | Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago' | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L572-L590 | [
"def now(cls, timezone=None):\n if timezone:\n return cls(timezone=timezone)\n else:\n return cls(timezone=get_localzone())\n"
] | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def format_datetime(self, format='medium', locale='en_US'):
"""
Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier
"""
return format_datetime(self._dt, format=format, locale=locale)
|
myusuf3/delorean | delorean/dates.py | Delorean.format_datetime | python | def format_datetime(self, format='medium', locale='en_US'):
return format_datetime(self._dt, format=format, locale=locale) | Return a date string formatted to the given pattern.
.. testsetup::
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 30), timezone='US/Pacific')
>>> d.format_datetime(locale='en_US')
u'Jan 1, 2015, 12:30:00 PM'
>>> d.format_datetime(format='long', locale='de_DE')
u'1. Januar 2015 12:30:00 -0800'
:param format: one of "full", "long", "medium", "short", or a custom datetime pattern
:param locale: a locale identifier | train | https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/dates.py#L592-L613 | null | class Delorean(object):
"""
The class `Delorean <Delorean>` object. This method accepts naive
datetime objects, with a string timezone.
"""
_VALID_SHIFT_DIRECTIONS = ('last', 'next')
_VALID_SHIFT_UNITS = ('second', 'minute', 'hour', 'day', 'week',
'month', 'year', 'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday')
def __init__(self, datetime=None, timezone=None):
# maybe set timezone on the way in here. if here set it if not
# use UTC
is_datetime_instance(datetime)
if datetime:
if is_datetime_naive(datetime):
if timezone:
if isinstance(timezone, tzoffset):
utcoffset = timezone.utcoffset(None)
total_seconds = (
(utcoffset.microseconds + (
utcoffset.seconds + utcoffset.days * 24 * 3600) * 10 ** 6) / 10 ** 6)
self._tzinfo = pytz.FixedOffset(total_seconds / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = localize(datetime, self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
# TODO(mlew, 2015-08-09):
# Should we really throw an error here, or should this
# default to UTC?)
raise DeloreanInvalidTimezone('Provide a valid timezone')
else:
self._tzinfo = datetime.tzinfo
self._dt = datetime
else:
if timezone:
if isinstance(timezone, tzoffset):
self._tzinfo = pytz.FixedOffset(timezone.utcoffset(None).total_seconds() / 60)
elif isinstance(timezone, tzinfo):
self._tzinfo = timezone
else:
self._tzinfo = pytz.timezone(timezone)
self._dt = datetime_timezone(self._tzinfo)
self._tzinfo = self._dt.tzinfo
else:
self._tzinfo = pytz.utc
self._dt = datetime_timezone('UTC')
def __repr__(self):
dt = self.datetime.replace(tzinfo=None)
if isinstance(self.timezone, pytz._FixedOffset):
tz = self.timezone
else:
tz = self.timezone.tzname(None)
return 'Delorean(datetime=%r, timezone=%r)' % (dt, tz)
def __eq__(self, other):
if isinstance(other, Delorean):
return self.epoch == other.epoch
return False
def __lt__(self, other):
return self.epoch < other.epoch
def __gt__(self, other):
return self.epoch > other.epoch
def __ge__(self, other):
return self.epoch >= other.epoch
def __le__(self, other):
return self.epoch <= other.epoch
def __ne__(self, other):
return not self == other
def __add__(self, other):
if not isinstance(other, timedelta):
raise TypeError("Delorean objects can only be added with timedelta objects")
dt = self._dt + other
return Delorean(datetime=dt, timezone=self.timezone)
def __sub__(self, other):
if isinstance(other, timedelta):
dt = self._dt - other
return Delorean(datetime=dt, timezone=self.timezone)
elif isinstance(other, Delorean):
return self._dt - other._dt
else:
raise TypeError("Delorean objects can only be subtracted with timedelta or other Delorean objects")
def __getattr__(self, name):
"""
Implement __getattr__ to call `shift_date` function when function
called does not exist
"""
func_parts = name.split('_')
# is the func we are trying to call the right length?
if len(func_parts) != 2:
raise AttributeError
# is the function we are trying to call valid?
if (func_parts[0] not in self._VALID_SHIFT_DIRECTIONS or
func_parts[1] not in self._VALID_SHIFT_UNITS):
return AttributeError
# dispatch our function
func = partial(self._shift_date, func_parts[0], func_parts[1])
# update our partial with self.shift_date attributes
update_wrapper(func, self._shift_date)
return func
def _shift_date(self, direction, unit, *args):
"""
Shift datetime in `direction` in _VALID_SHIFT_DIRECTIONS and by some
unit in _VALID_SHIFTS and shift that amount by some multiple,
defined by by args[0] if it exists
"""
this_module = sys.modules[__name__]
num_shifts = 1
if len(args) > 0:
num_shifts = int(args[0])
if unit in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday']:
shift_func = move_datetime_namedday
dt = shift_func(self._dt, direction, unit)
if num_shifts > 1:
for n in range(num_shifts - 1):
dt = shift_func(dt, direction, unit)
else:
shift_func = getattr(this_module, 'move_datetime_%s' % unit)
dt = shift_func(self._dt, direction, num_shifts)
return Delorean(datetime=dt.replace(tzinfo=None), timezone=self.timezone)
@property
def timezone(self):
"""
Returns a valid tzinfo object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='UTC')
>>> d.timezone
<UTC>
"""
return self._tzinfo
def truncate(self, s):
"""
Truncate the delorian object to the nearest s
(second, minute, hour, day, month, year)
This is a destructive method, modifies the internal datetime
object associated with the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 10), timezone='US/Pacific')
>>> d.truncate('hour')
Delorean(datetime=datetime.datetime(2015, 1, 1, 12, 0), timezone='US/Pacific')
"""
if s == 'second':
self._dt = self._dt.replace(microsecond=0)
elif s == 'minute':
self._dt = self._dt.replace(second=0, microsecond=0)
elif s == 'hour':
self._dt = self._dt.replace(minute=0, second=0, microsecond=0)
elif s == 'day':
self._dt = self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
elif s == 'month':
self._dt = self._dt.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
elif s == 'year':
self._dt = self._dt.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
else:
raise ValueError("Invalid truncation level")
return self
@property
def naive(self):
"""
Returns a naive datetime object associated with the Delorean
object, this method simply converts the localize datetime to UTC
and removes the tzinfo that is associated with it modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.naive
datetime.datetime(2015, 1, 1, 8, 0)
"""
self.shift('UTC')
return self._dt.replace(tzinfo=None)
@classmethod
def now(cls, timezone=None):
if timezone:
return cls(timezone=timezone)
else:
return cls(timezone=get_localzone())
@classmethod
def utcnow(cls):
return cls()
@property
def midnight(self):
"""
Returns midnight for datetime associated with
the Delorean object modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.midnight
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self._dt.replace(hour=0, minute=0, second=0, microsecond=0)
@property
def start_of_day(self):
"""
Returns the start of the day for datetime assoicated
with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.start_of_day
datetime.datetime(2015, 1, 1, 0, 0, tzinfo=<UTC>)
"""
return self.midnight
@property
def end_of_day(self):
"""
Returns the end of the day for the datetime
assocaited with the Delorean object, modifying the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12), timezone='UTC')
>>> d.end_of_day
datetime.datetime(2015, 1, 1, 23, 59, 59, 999999, tzinfo=<UTC>)
"""
return self._dt.replace(hour=23, minute=59, second=59, microsecond=999999)
def shift(self, timezone):
"""
Shifts the timezone from the current timezone to the specified timezone associated with the Delorean object,
modifying the Delorean object and returning the modified object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.shift('UTC')
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 0), timezone='UTC')
"""
try:
self._tzinfo = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise DeloreanInvalidTimezone('Provide a valid timezone')
self._dt = self._tzinfo.normalize(self._dt.astimezone(self._tzinfo))
self._tzinfo = self._dt.tzinfo
return self
@property
def epoch(self):
"""
Returns the total seconds since epoch associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1), timezone='US/Pacific')
>>> d.epoch
1420099200.0
"""
epoch_sec = pytz.utc.localize(datetime.utcfromtimestamp(0))
now_sec = pytz.utc.normalize(self._dt)
delta_sec = now_sec - epoch_sec
return get_total_second(delta_sec)
@property
def date(self):
"""
Returns the actual date object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='US/Pacific')
>>> d.date
datetime.date(2015, 1, 1)
"""
return self._dt.date()
@property
def datetime(self):
"""
Returns the actual datetime object associated with
the Delorean object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.datetime
datetime.datetime(2015, 1, 1, 12, 15, tzinfo=<UTC>)
"""
return self._dt
def replace(self, **kwargs):
"""
Returns a new Delorean object after applying replace on the
existing datetime object.
.. testsetup::
from datetime import datetime
from delorean import Delorean
.. doctest::
>>> d = Delorean(datetime(2015, 1, 1, 12, 15), timezone='UTC')
>>> d.replace(hour=8)
Delorean(datetime=datetime.datetime(2015, 1, 1, 8, 15), timezone='UTC')
"""
return Delorean(datetime=self._dt.replace(**kwargs), timezone=self.timezone)
def humanize(self):
"""
Humanize relative to now:
.. testsetup::
from datetime import timedelta
from delorean import Delorean
.. doctest::
>>> past = Delorean.utcnow() - timedelta(hours=1)
>>> past.humanize()
'an hour ago'
"""
now = self.now(self.timezone)
return humanize.naturaltime(now - self)
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_head | python | def get_head(self) -> Commit:
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch) | Get the head commit.
:return: Commit of the head commit | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L81-L88 | null | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_list_commits | python | def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit) | Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L90-L100 | [
"def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:\n \"\"\"\n Build a PyDriller commit object from a GitPython commit object.\n This is internal of PyDriller, I don't think users generally will need\n it.\n\n :param GitCommit commit: GitPython commit\n :return: Commit commit: PyDriller commit\n \"\"\"\n return Commit(commit, self.path, self.main_branch)\n"
] | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_commit | python | def get_commit(self, commit_id: str) -> Commit:
return Commit(self.repo.commit(commit_id), self.path, self.main_branch) | Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L102-L109 | null | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_commit_from_gitpython | python | def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
return Commit(commit, self.path, self.main_branch) | Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L111-L120 | null | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.checkout | python | def checkout(self, _hash: str) -> None:
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD') | Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L122-L132 | [
"def _delete_tmp_branch(self) -> None:\n try:\n # we are already in _PD, so checkout the master branch before\n # deleting it\n if self.repo.active_branch.name == '_PD':\n self.git.checkout('-f', self.main_branch)\n self.repo.delete_head('_PD', force=True)\n except GitCommandError:\n logger.debug(\"Branch _PD not found\")\n"
] | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.files | python | def files(self) -> List[str]:
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all | Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L144-L156 | null | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.reset | python | def reset(self) -> None:
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch() | Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option). | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L158-L167 | [
"def _delete_tmp_branch(self) -> None:\n try:\n # we are already in _PD, so checkout the master branch before\n # deleting it\n if self.repo.active_branch.name == '_PD':\n self.git.checkout('-f', self.main_branch)\n self.repo.delete_head('_PD', force=True)\n except GitCommandError:\n logger.debug(\"Branch _PD not found\")\n"
] | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_commit_from_tag | python | def get_commit_from_tag(self, tag: str) -> Commit:
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise | Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L177-L189 | [
"def get_commit(self, commit_id: str) -> Commit:\n \"\"\"\n Get the specified commit.\n\n :param str commit_id: hash of the commit to analyze\n :return: Commit\n \"\"\"\n return Commit(self.repo.commit(commit_id), self.path, self.main_branch)\n"
] | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.parse_diff | python | def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines | Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L191-L229 | [
"def _get_line_numbers(self, line):\n token = line.split(\" \")\n numbers_old_file = token[1]\n numbers_new_file = token[2]\n delete_line_number = int(numbers_old_file.split(\",\")[0]\n .replace(\"-\", \"\")) - 1\n additions_line_number = int(numbers_new_file.split(\",\")[0]) - 1\n return delete_line_number, additions_line_number\n"
] | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_commits_last_modified_lines | python | def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits | Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L240-L289 | [
"def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:\n \"\"\"\n Given a diff, returns a dictionary with the added and deleted lines.\n The dictionary has 2 keys: \"added\" and \"deleted\", each containing the\n corresponding added or deleted lines. For both keys, the value is a\n list of Tuple (int, str), corresponding to (number of line in the file,\n actual line).\n\n\n :param str diff: diff of the commit\n :return: Dictionary\n \"\"\"\n lines = diff.split('\\n')\n modified_lines = {'added': [], 'deleted': []}\n\n count_deletions = 0\n count_additions = 0\n\n for line in lines:\n line = line.rstrip()\n count_deletions += 1\n count_additions += 1\n\n if line.startswith('@@'):\n count_deletions, count_additions = self._get_line_numbers(line)\n\n if line.startswith('-'):\n modified_lines['deleted'].append((count_deletions, line[1:]))\n count_additions -= 1\n\n if line.startswith('+'):\n modified_lines['added'].append((count_additions, line[1:]))\n count_deletions -= 1\n\n if line == r'\\ No newline at end of file':\n count_deletions -= 1\n count_additions -= 1\n\n return modified_lines\n",
"def _useless_line(self, line: str):\n # this covers comments in Java and Python, as well as empty lines.\n # More have to be added!\n return not line or \\\n line.startswith('//') or \\\n line.startswith('#') or \\\n line.startswith(\"/*\") or \\\n line.startswith(\"'''\") or \\\n line.startswith('\"\"\"') or \\\n line.startswith(\"*\")\n"
] | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
def get_commits_modified_file(self, filepath: str) -> List[str]:
"""
Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash
"""
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits
|
ishepard/pydriller | pydriller/git_repository.py | GitRepository.get_commits_modified_file | python | def get_commits_modified_file(self, filepath: str) -> List[str]:
path = str(Path(filepath))
commits = []
try:
commits = self.git.log("--follow", "--format=%H", path).split('\n')
except GitCommandError:
logger.debug("Could not find information of file %s", path)
return commits | Given a filepath, returns all the commits that modified this file
(following renames).
:param str filepath: path to the file
:return: the list of commits' hash | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/git_repository.py#L302-L318 | null | class GitRepository:
"""
Class representing a repository in Git. It contains most of the logic of
PyDriller: obtaining the list of commits, checkout, reset, etc.
"""
def __init__(self, path: str):
"""
Init the Git RepositoryMining.
:param str path: path to the repository
"""
self.path = Path(path)
self.project_name = self.path.name
self.main_branch = None
self.lock = Lock()
@property
def git(self):
"""
GitPython object Git.
:return: Git
"""
return self._open_git()
@property
def repo(self):
"""
GitPython object Repo.
:return: Repo
"""
return self._open_repository()
def _open_git(self) -> Git:
return Git(str(self.path))
def _open_repository(self) -> Repo:
repo = Repo(str(self.path))
if self.main_branch is None:
self._discover_main_branch(repo)
return repo
def _discover_main_branch(self, repo):
self.main_branch = repo.active_branch.name
def get_head(self) -> Commit:
"""
Get the head commit.
:return: Commit of the head commit
"""
head_commit = self.repo.head.commit
return Commit(head_commit, self.path, self.main_branch)
def get_list_commits(self, branch: str = None,
reverse_order: bool = True) \
-> Generator[Commit, None, None]:
"""
Return a generator of commits of all the commits in the repo.
:return: Generator[Commit], the generator of all the commits in the
repo
"""
for commit in self.repo.iter_commits(branch, reverse=reverse_order):
yield self.get_commit_from_gitpython(commit)
def get_commit(self, commit_id: str) -> Commit:
"""
Get the specified commit.
:param str commit_id: hash of the commit to analyze
:return: Commit
"""
return Commit(self.repo.commit(commit_id), self.path, self.main_branch)
def get_commit_from_gitpython(self, commit: GitCommit) -> Commit:
"""
Build a PyDriller commit object from a GitPython commit object.
This is internal of PyDriller, I don't think users generally will need
it.
:param GitCommit commit: GitPython commit
:return: Commit commit: PyDriller commit
"""
return Commit(commit, self.path, self.main_branch)
def checkout(self, _hash: str) -> None:
"""
Checkout the repo at the speficied commit.
BE CAREFUL: this will change the state of the repo, hence it should
*not* be used with more than 1 thread.
:param _hash: commit hash to checkout
"""
with self.lock:
self._delete_tmp_branch()
self.git.checkout('-f', _hash, b='_PD')
def _delete_tmp_branch(self) -> None:
try:
# we are already in _PD, so checkout the master branch before
# deleting it
if self.repo.active_branch.name == '_PD':
self.git.checkout('-f', self.main_branch)
self.repo.delete_head('_PD', force=True)
except GitCommandError:
logger.debug("Branch _PD not found")
def files(self) -> List[str]:
"""
Obtain the list of the files (excluding .git directory).
:return: List[str], the list of the files
"""
_all = []
for path, _, files in os.walk(str(self.path)):
if '.git' in path:
continue
for name in files:
_all.append(os.path.join(path, name))
return _all
def reset(self) -> None:
"""
Reset the state of the repo, checking out the main branch and
discarding
local changes (-f option).
"""
with self.lock:
self.git.checkout('-f', self.main_branch)
self._delete_tmp_branch()
def total_commits(self) -> int:
"""
Calculate total number of commits.
:return: the total number of commits
"""
return len(list(self.get_list_commits()))
def get_commit_from_tag(self, tag: str) -> Commit:
"""
Obtain the tagged commit.
:param str tag: the tag
:return: Commit commit: the commit the tag referred to
"""
try:
selected_tag = self.repo.tags[tag]
return self.get_commit(selected_tag.commit.hexsha)
except (IndexError, AttributeError):
logger.debug('Tag %s not found', tag)
raise
def parse_diff(self, diff: str) -> Dict[str, List[Tuple[int, str]]]:
"""
Given a diff, returns a dictionary with the added and deleted lines.
The dictionary has 2 keys: "added" and "deleted", each containing the
corresponding added or deleted lines. For both keys, the value is a
list of Tuple (int, str), corresponding to (number of line in the file,
actual line).
:param str diff: diff of the commit
:return: Dictionary
"""
lines = diff.split('\n')
modified_lines = {'added': [], 'deleted': []}
count_deletions = 0
count_additions = 0
for line in lines:
line = line.rstrip()
count_deletions += 1
count_additions += 1
if line.startswith('@@'):
count_deletions, count_additions = self._get_line_numbers(line)
if line.startswith('-'):
modified_lines['deleted'].append((count_deletions, line[1:]))
count_additions -= 1
if line.startswith('+'):
modified_lines['added'].append((count_additions, line[1:]))
count_deletions -= 1
if line == r'\ No newline at end of file':
count_deletions -= 1
count_additions -= 1
return modified_lines
def _get_line_numbers(self, line):
token = line.split(" ")
numbers_old_file = token[1]
numbers_new_file = token[2]
delete_line_number = int(numbers_old_file.split(",")[0]
.replace("-", "")) - 1
additions_line_number = int(numbers_new_file.split(",")[0]) - 1
return delete_line_number, additions_line_number
def get_commits_last_modified_lines(self, commit: Commit,
modification: Modification = None) \
-> Set[str]:
"""
Given the Commit object, returns the set of commits that last
"touched" the lines that are modified in the files included in the
commit. It applies SZZ.
The algorithm works as follow: (for every file in the commit)
1- obtain the diff
2- obtain the list of deleted lines
3- blame the file and obtain the commits were those lines were added
Can also be passed as parameter a single Modification, in this case
only this file
will be analyzed.
:param Commit commit: the commit to analyze
:param Modification modification: single modification to analyze
:return: the set containing all the bug inducing commits
"""
buggy_commits = set()
if modification is not None:
modifications = [modification]
else:
modifications = commit.modifications
for mod in modifications:
path = mod.new_path
if mod.change_type == ModificationType.RENAME or \
mod.change_type == ModificationType.DELETE:
path = mod.old_path
deleted_lines = self.parse_diff(mod.diff)['deleted']
try:
blame = self.git.blame(commit.hash + '^',
'--', path).split('\n')
for num_line, line in deleted_lines:
if not self._useless_line(line.strip()):
buggy_commit = blame[num_line - 1].split(' ')[
0].replace('^', '')
buggy_commits.add(self.get_commit(buggy_commit).hash)
except GitCommandError:
logger.debug(
"Could not found file %s in commit %s. Probably a double "
"rename!", mod.filename, commit.hash)
return buggy_commits
def _useless_line(self, line: str):
# this covers comments in Java and Python, as well as empty lines.
# More have to be added!
return not line or \
line.startswith('//') or \
line.startswith('#') or \
line.startswith("/*") or \
line.startswith("'''") or \
line.startswith('"""') or \
line.startswith("*")
|
ishepard/pydriller | pydriller/repository_mining.py | RepositoryMining.traverse_commits | python | def traverse_commits(self) -> Generator[Commit, None, None]:
if isinstance(self._path_to_repo, str):
self._path_to_repo = [self._path_to_repo]
for path_repo in self._path_to_repo:
# if it is a remote repo, clone it first in a temporary folder!
if self._isremote(path_repo):
tmp_folder = tempfile.TemporaryDirectory()
path_repo = self._clone_remote_repos(tmp_folder.name,
path_repo)
git_repo = GitRepository(path_repo)
self._sanity_check_filters(git_repo)
self._check_timezones()
logger.info('Analyzing git repository in %s', git_repo.path)
if self._filepath is not None:
self._filepath_commits = git_repo.get_commits_modified_file(
self._filepath)
for commit in git_repo.get_list_commits(self._only_in_branch,
not self._reversed_order):
logger.info('Commit #%s in %s from %s', commit.hash,
commit.committer_date,
commit.author.name)
if self._is_commit_filtered(commit):
logger.info('Commit #%s filtered', commit.hash)
continue
yield commit | Analyze all the specified commits (all of them by default), returning
a generator of commits. | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/repository_mining.py#L176-L213 | [
"def _isremote(self, repo: str) -> bool:\n return repo.startswith(\"git@\") or repo.startswith(\"https://\")\n"
] | class RepositoryMining:
"""
This is the main class of PyDriller, responsible for running the study.
"""
# pylint: disable=R0902,R0913,R0914
def __init__(self, path_to_repo: Union[str, List[str]],
single: str = None,
since: datetime = None, to: datetime = None,
from_commit: str = None, to_commit: str = None,
from_tag: str = None, to_tag: str = None,
reversed_order: bool = False,
only_in_branch: str = None,
only_modifications_with_file_types: List[str] = None,
only_no_merge: bool = False,
only_authors: List[str] = None,
only_commits: List[str] = None,
filepath: str = None):
"""
Init a repository mining. The only required parameter is
"path_to_repo": to analyze a
single repo, pass the absolute path to the repo; if you need to
analyze more
repos, pass a list of absolute paths.
Furthermore, PyDriller supports local and remote repositories: if
you pass a path to a
repo, PyDriller will run the study on that repo; if you pass an URL,
PyDriller will clone
the repo in a temporary folder, run the study, and delete the
temporary folder.
:param Union[str,List[str]] path_to_repo: absolute path (or list of
absolute paths) to the repository(ies) to analyze
:param str single: hash of a single commit to analyze
:param datetime since: starting date
:param datetime to: ending date
:param str from_commit: starting commit (only if `since` is None)
:param str to_commit: ending commit (only if `to` is None)
:param str from_tag: starting the analysis from specified tag (only
if `since` and `from_commit` are None)
:param str to_tag: ending the analysis from specified tag (only if
`to` and `to_commit` are None)
:param bool reversed_order: whether the commits should be analyzed
in reversed order
:param str only_in_branch: only commits in this branch will be analyzed
:param List[str] only_modifications_with_file_types: only
modifications with that file types will be analyzed
:param bool only_no_merge: if True, merges will not be analyzed
:param List[str] only_authors: only commits of these authors will be
analyzed (the check is done on the username, NOT the email)
:param List[str] only_commits: only these commits will be analyzed
:param str filepath: only commits that modified this file will be
analyzed
"""
self._sanity_check_repos(path_to_repo)
self._path_to_repo = path_to_repo
self._from_commit = from_commit
self._to_commit = to_commit
self._from_tag = from_tag
self._to_tag = to_tag
self._single = single
self._since = since
self._to = to
self._reversed_order = reversed_order
self._only_in_branch = only_in_branch
self._only_modifications_with_file_types = \
only_modifications_with_file_types
self._only_no_merge = only_no_merge
self._only_authors = only_authors
self._only_commits = only_commits
self._filepath = filepath
self._filepath_commits = None
def _sanity_check_repos(self, path_to_repo):
if not isinstance(path_to_repo, str) and \
not isinstance(path_to_repo, list):
raise Exception("The path to the repo has to be of type "
"'string' or 'list of strings'!")
def _sanity_check_filters(self, git_repo: GitRepository):
# If single is defined, no other filters should be
if self._single is not None:
if not self._check_filters_none([self._since,
self._to,
self._from_commit,
self._to_commit,
self._from_tag,
self._to_tag]):
raise Exception('You can not specify a single commit with '
'other filters')
# If from_commit is defined, since should not be
if self._from_commit is not None:
if not self._check_filters_none([self._since, self._from_tag]):
raise Exception('You can not specify both <since date> '
'and <from commit>')
self._since = git_repo.get_commit(self._from_commit).committer_date
# If from_tag is defined, since and from_commit should not be
if self._from_tag is not None:
if not self._check_filters_none([self._since, self._from_commit]):
raise Exception('You can not specify <since date> or '
'<from commit> when using <from tag>')
self._since = git_repo.get_commit_from_tag(
self._from_tag).committer_date
# If to_commit is defined, to should not be
if self._to_commit is not None:
if not self._check_filters_none([self._to, self._to_tag]):
raise Exception('You can not specify both <to date> '
'and <to commit>')
self._to = git_repo.get_commit(self._to_commit).committer_date
# If to_tag is defined, to and to_commit should not be
if self._to_tag is not None:
if not self._check_filters_none([self._to, self._to_commit]):
raise Exception('You can not specify <to date> or <to commit> '
'when using <to tag>')
self._to = git_repo.get_commit_from_tag(
self._to_tag).committer_date
def _check_filters_none(self, filters: List):
for filter in filters:
if filter is not None:
return False
return True
def _isremote(self, repo: str) -> bool:
return repo.startswith("git@") or repo.startswith("https://")
def _clone_remote_repos(self, tmp_folder: str, repo: str) -> str:
repo_folder = os.path.join(tmp_folder,
self._get_repo_name_from_url(repo))
logger.info("Cloning %s in temporary folder %s", repo, repo_folder)
Repo.clone_from(url=repo, to_path=repo_folder)
return repo_folder
def _is_commit_filtered(self, commit: Commit): # pylint: disable=R0911
if self._single is not None and commit.hash != self._single:
logger.debug(
'Commit filtered because is not the defined in single')
return True
if (self._since is not None and commit.committer_date < self._since) \
or (self._to is not None and commit.committer_date > self._to):
return True
if self._only_modifications_with_file_types is not None:
if not self._has_modification_with_file_type(commit):
logger.debug('Commit filtered for modification types')
return True
if self._only_no_merge is True and commit.merge is True:
logger.debug('Commit filtered for no merge')
return True
if self._only_authors is not None and commit.author.name not in \
self._only_authors:
logger.debug("Commit filtered for author")
return True
if self._only_commits is not None and commit.hash not in \
self._only_commits:
logger.debug("Commit filtered because it is not one of the "
"specified commits")
return True
if self._filepath_commits is not None and commit.hash not in \
self._filepath_commits:
logger.debug("Commit filtered because it did not modify the "
"specified file")
return True
return False
def _has_modification_with_file_type(self, commit):
for mod in commit.modifications:
if mod.filename.endswith(
tuple(self._only_modifications_with_file_types)):
return True
return False
def _check_timezones(self):
if self._since is not None:
self._since = self._replace_timezone(self._since)
if self._to is not None:
self._to = self._replace_timezone(self._to)
def _replace_timezone(self, dt: datetime):
if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
dt = dt.replace(tzinfo=pytz.utc)
return dt
def _get_repo_name_from_url(self, url: str) -> str:
last_slash_index = url.rfind("/")
last_suffix_index = url.rfind(".git")
if last_suffix_index < 0:
last_suffix_index = len(url)
if last_slash_index < 0 or last_suffix_index <= last_slash_index:
raise Exception("Badly formatted url {}".format(url))
return url[last_slash_index + 1:last_suffix_index]
|
ishepard/pydriller | pydriller/domain/commit.py | Modification.added | python | def added(self) -> int:
added = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('+') and not line.startswith('+++'):
added += 1
return added | Return the total number of added lines in the file.
:return: int lines_added | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L105-L115 | null | class Modification: # pylint: disable=R0902
"""
This class contains information regarding a modified file in a commit.
"""
def __init__(self, old_path: str, new_path: str,
change_type: ModificationType,
diff_and_sc: Dict[str, str]):
"""
Initialize a modification. A modification carries on information
regarding the changed file. Normally, you shouldn't initialize a new
one.
"""
self._old_path = Path(old_path) if old_path is not None else None
self._new_path = Path(new_path) if new_path is not None else None
self.change_type = change_type
self.diff = diff_and_sc['diff']
self.source_code = diff_and_sc['source_code']
self._nloc = None
self._complexity = None
self._token_count = None
self._function_list = []
@property
@property
def removed(self):
"""
Return the total number of deleted lines in the file.
:return: int lines_deleted
"""
removed = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('-') and not line.startswith('---'):
removed += 1
return removed
@property
def old_path(self):
"""
Old path of the file. Can be None if the file is added.
:return: str old_path
"""
if self._old_path:
return str(self._old_path)
return self._old_path
@property
def new_path(self):
"""
New path of the file. Can be None if the file is deleted.
:return: str new_path
"""
if self._new_path:
return str(self._new_path)
return self._new_path
@property
def filename(self) -> str:
"""
Return the filename. Given a path-like-string (e.g.
"/Users/dspadini/pydriller/myfile.py") returns only the filename
(e.g. "myfile.py")
:return: str filename
"""
if self._new_path is not None and str(self._new_path) != "/dev/null":
path = self._new_path
else:
path = self._old_path
return path.name
@property
def nloc(self) -> int:
"""
Calculate the LOC of the file.
:return: LOC of the file
"""
self._calculate_metrics()
return self._nloc
@property
def complexity(self) -> int:
"""
Calculate the Cyclomatic Complexity of the file.
:return: Cyclomatic Complexity of the file
"""
self._calculate_metrics()
return self._complexity
@property
def token_count(self) -> int:
"""
Calculate the token count of functions.
:return: token count
"""
self._calculate_metrics()
return self._token_count
@property
def methods(self) -> List[Method]:
"""
Return the list of methods in the file. Every method
contains various information like complexity, loc, name,
number of parameters, etc.
:return: list of methods
"""
self._calculate_metrics()
return self._function_list
def _calculate_metrics(self):
if self.source_code and self._nloc is None:
l = lizard.analyze_file.analyze_source_code(self.filename,
self.source_code)
self._nloc = l.nloc
self._complexity = l.CCN
self._token_count = l.token_count
for func in l.function_list:
self._function_list.append(Method(func))
def __eq__(self, other):
if not isinstance(other, Modification):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'MODIFICATION\n' +
'Old Path: {}\n'.format(self.old_path) +
'New Path: {}\n'.format(self.new_path) +
'Type: {}\n'.format(self.change_type.name) +
'Diff: {}\n'.format(self.diff) +
'Source code: {}\n'.format(self.source_code)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Modification.removed | python | def removed(self):
removed = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('-') and not line.startswith('---'):
removed += 1
return removed | Return the total number of deleted lines in the file.
:return: int lines_deleted | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L118-L128 | null | class Modification: # pylint: disable=R0902
"""
This class contains information regarding a modified file in a commit.
"""
def __init__(self, old_path: str, new_path: str,
change_type: ModificationType,
diff_and_sc: Dict[str, str]):
"""
Initialize a modification. A modification carries on information
regarding the changed file. Normally, you shouldn't initialize a new
one.
"""
self._old_path = Path(old_path) if old_path is not None else None
self._new_path = Path(new_path) if new_path is not None else None
self.change_type = change_type
self.diff = diff_and_sc['diff']
self.source_code = diff_and_sc['source_code']
self._nloc = None
self._complexity = None
self._token_count = None
self._function_list = []
@property
def added(self) -> int:
"""
Return the total number of added lines in the file.
:return: int lines_added
"""
added = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('+') and not line.startswith('+++'):
added += 1
return added
@property
@property
def old_path(self):
"""
Old path of the file. Can be None if the file is added.
:return: str old_path
"""
if self._old_path:
return str(self._old_path)
return self._old_path
@property
def new_path(self):
"""
New path of the file. Can be None if the file is deleted.
:return: str new_path
"""
if self._new_path:
return str(self._new_path)
return self._new_path
@property
def filename(self) -> str:
"""
Return the filename. Given a path-like-string (e.g.
"/Users/dspadini/pydriller/myfile.py") returns only the filename
(e.g. "myfile.py")
:return: str filename
"""
if self._new_path is not None and str(self._new_path) != "/dev/null":
path = self._new_path
else:
path = self._old_path
return path.name
@property
def nloc(self) -> int:
"""
Calculate the LOC of the file.
:return: LOC of the file
"""
self._calculate_metrics()
return self._nloc
@property
def complexity(self) -> int:
"""
Calculate the Cyclomatic Complexity of the file.
:return: Cyclomatic Complexity of the file
"""
self._calculate_metrics()
return self._complexity
@property
def token_count(self) -> int:
"""
Calculate the token count of functions.
:return: token count
"""
self._calculate_metrics()
return self._token_count
@property
def methods(self) -> List[Method]:
"""
Return the list of methods in the file. Every method
contains various information like complexity, loc, name,
number of parameters, etc.
:return: list of methods
"""
self._calculate_metrics()
return self._function_list
def _calculate_metrics(self):
if self.source_code and self._nloc is None:
l = lizard.analyze_file.analyze_source_code(self.filename,
self.source_code)
self._nloc = l.nloc
self._complexity = l.CCN
self._token_count = l.token_count
for func in l.function_list:
self._function_list.append(Method(func))
def __eq__(self, other):
if not isinstance(other, Modification):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'MODIFICATION\n' +
'Old Path: {}\n'.format(self.old_path) +
'New Path: {}\n'.format(self.new_path) +
'Type: {}\n'.format(self.change_type.name) +
'Diff: {}\n'.format(self.diff) +
'Source code: {}\n'.format(self.source_code)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Modification.filename | python | def filename(self) -> str:
if self._new_path is not None and str(self._new_path) != "/dev/null":
path = self._new_path
else:
path = self._old_path
return path.name | Return the filename. Given a path-like-string (e.g.
"/Users/dspadini/pydriller/myfile.py") returns only the filename
(e.g. "myfile.py")
:return: str filename | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L153-L166 | null | class Modification: # pylint: disable=R0902
"""
This class contains information regarding a modified file in a commit.
"""
def __init__(self, old_path: str, new_path: str,
change_type: ModificationType,
diff_and_sc: Dict[str, str]):
"""
Initialize a modification. A modification carries on information
regarding the changed file. Normally, you shouldn't initialize a new
one.
"""
self._old_path = Path(old_path) if old_path is not None else None
self._new_path = Path(new_path) if new_path is not None else None
self.change_type = change_type
self.diff = diff_and_sc['diff']
self.source_code = diff_and_sc['source_code']
self._nloc = None
self._complexity = None
self._token_count = None
self._function_list = []
@property
def added(self) -> int:
"""
Return the total number of added lines in the file.
:return: int lines_added
"""
added = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('+') and not line.startswith('+++'):
added += 1
return added
@property
def removed(self):
"""
Return the total number of deleted lines in the file.
:return: int lines_deleted
"""
removed = 0
for line in self.diff.replace('\r', '').split("\n"):
if line.startswith('-') and not line.startswith('---'):
removed += 1
return removed
@property
def old_path(self):
"""
Old path of the file. Can be None if the file is added.
:return: str old_path
"""
if self._old_path:
return str(self._old_path)
return self._old_path
@property
def new_path(self):
"""
New path of the file. Can be None if the file is deleted.
:return: str new_path
"""
if self._new_path:
return str(self._new_path)
return self._new_path
@property
@property
def nloc(self) -> int:
"""
Calculate the LOC of the file.
:return: LOC of the file
"""
self._calculate_metrics()
return self._nloc
@property
def complexity(self) -> int:
"""
Calculate the Cyclomatic Complexity of the file.
:return: Cyclomatic Complexity of the file
"""
self._calculate_metrics()
return self._complexity
@property
def token_count(self) -> int:
"""
Calculate the token count of functions.
:return: token count
"""
self._calculate_metrics()
return self._token_count
@property
def methods(self) -> List[Method]:
"""
Return the list of methods in the file. Every method
contains various information like complexity, loc, name,
number of parameters, etc.
:return: list of methods
"""
self._calculate_metrics()
return self._function_list
def _calculate_metrics(self):
if self.source_code and self._nloc is None:
l = lizard.analyze_file.analyze_source_code(self.filename,
self.source_code)
self._nloc = l.nloc
self._complexity = l.CCN
self._token_count = l.token_count
for func in l.function_list:
self._function_list.append(Method(func))
def __eq__(self, other):
if not isinstance(other, Modification):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'MODIFICATION\n' +
'Old Path: {}\n'.format(self.old_path) +
'New Path: {}\n'.format(self.new_path) +
'Type: {}\n'.format(self.change_type.name) +
'Diff: {}\n'.format(self.diff) +
'Source code: {}\n'.format(self.source_code)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Commit.author | python | def author(self) -> Developer:
return Developer(self._c_object.author.name,
self._c_object.author.email) | Return the author of the commit as a Developer object.
:return: author | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L273-L280 | null | class Commit:
"""
Class representing a Commit. Contains all the important information such
as hash, author, dates, and modified files.
"""
def __init__(self, commit: GitCommit, project_path: Path,
main_branch: str) -> None:
"""
Create a commit object.
:param commit: GitPython Commit object
:param project_path: path to the project (temporary folder in case
of a remote repository)
:param main_branch: main branch of the repo
"""
self._c_object = commit
self._main_branch = main_branch
self.project_path = project_path
self._modifications = None
self._branches = None
@property
def hash(self) -> str:
"""
Return the SHA of the commit.
:return: str hash
"""
return self._c_object.hexsha
@property
@property
def committer(self) -> Developer:
"""
Return the committer of the commit as a Developer object.
:return: committer
"""
return Developer(self._c_object.committer.name,
self._c_object.committer.email)
@property
def project_name(self) -> str:
"""
Return the project name.
:return: project name
"""
return self.project_path.name
@property
def author_date(self) -> datetime:
"""
Return the authored datetime.
:return: datetime author_datetime
"""
return self._c_object.authored_datetime
@property
def committer_date(self) -> datetime:
"""
Return the committed datetime.
:return: datetime committer_datetime
"""
return self._c_object.committed_datetime
@property
def author_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.author_tz_offset
@property
def committer_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.committer_tz_offset
@property
def msg(self) -> str:
"""
Return commit message.
:return: str commit_message
"""
return self._c_object.message.strip()
@property
def parents(self) -> List[str]:
"""
Return the list of parents SHAs.
:return: List[str] parents
"""
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents
@property
def merge(self) -> bool:
"""
Return True if the commit is a merge, False otherwise.
:return: bool merge
"""
return len(self._c_object.parents) > 1
@property
def modifications(self) -> List[Modification]:
"""
Return a list of modified files.
:return: List[Modification] modifications
"""
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications
def _get_modifications(self):
repo = Repo(str(self.project_path))
commit = self._c_object
if self.parents:
# the commit has a parent
diff_index = self._c_object.parents[0].diff(commit,
create_patch=True)
else:
# this is the first commit of the repo. Comparing it with git
# NULL TREE
parent = repo.tree(NULL_TREE)
diff_index = parent.diff(commit.tree, create_patch=True)
return self._parse_diff(diff_index)
def _parse_diff(self, diff_index) -> List[Modification]:
modifications_list = []
for diff in diff_index:
old_path = diff.a_path
new_path = diff.b_path
change_type = self._from_change_to_modification_type(diff)
diff_and_sc = {
'diff': '',
'source_code': ''
}
try:
diff_and_sc['diff'] = diff.diff.decode('utf-8')
diff_and_sc['source_code'] = diff.b_blob.data_stream.read()\
.decode('utf-8')
except (UnicodeDecodeError, AttributeError, ValueError):
logger.debug('Could not load source code or the diff of a '
'file in commit %s', self._c_object.hexsha)
modifications_list.append(Modification(old_path, new_path,
change_type, diff_and_sc))
return modifications_list
@property
def in_main_branch(self) -> bool:
"""
Return True if the commit is in the main branch, False otherwise.
:return: bool in_main_branch
"""
return self._main_branch in self.branches
@property
def branches(self) -> Set[str]:
"""
Return the set of branches that contain the commit.
:return: set(str) branches
"""
if self._branches is None:
self._branches = self._get_branches()
return self._branches
def _get_branches(self):
c_git = Git(str(self.project_path))
branches = set()
for branch in set(c_git.branch('--contains', self.hash).split('\n')):
branches.add(branch.strip().replace('* ', ''))
return branches
# pylint disable=R0902
def _from_change_to_modification_type(self, diff: Diff):
if diff.new_file:
return ModificationType.ADD
if diff.deleted_file:
return ModificationType.DELETE
if diff.renamed_file:
return ModificationType.RENAME
if diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
return ModificationType.MODIFY
return ModificationType.UNKNOWN
def __eq__(self, other):
if not isinstance(other, Commit):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'Hash: {}\n'.format(self.hash) +
'Author: {}\n'.format(self.author.name) +
'Author email: {}\n'.format(self.author.email) +
'Committer: {}\n'.format(self.committer.name) +
'Committer email: {}\n'.format(self.committer.email) +
'Author date: {}\n'.format(
self.author_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Committer date: {}\n'.format(
self.committer_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Message: {}\n'.format(self.msg) +
'Parent: {}\n'.format("\n".join(map(str, self.parents))) +
'Merge: {}\n'.format(self.merge) +
'Modifications: \n{}'.format(
"\n".join(map(str, self.modifications))) +
'Branches: \n{}'.format("\n".join(map(str, self.branches))) +
'In main branch: {}\n'.format(self.in_main_branch)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Commit.committer | python | def committer(self) -> Developer:
return Developer(self._c_object.committer.name,
self._c_object.committer.email) | Return the committer of the commit as a Developer object.
:return: committer | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L283-L290 | null | class Commit:
"""
Class representing a Commit. Contains all the important information such
as hash, author, dates, and modified files.
"""
def __init__(self, commit: GitCommit, project_path: Path,
main_branch: str) -> None:
"""
Create a commit object.
:param commit: GitPython Commit object
:param project_path: path to the project (temporary folder in case
of a remote repository)
:param main_branch: main branch of the repo
"""
self._c_object = commit
self._main_branch = main_branch
self.project_path = project_path
self._modifications = None
self._branches = None
@property
def hash(self) -> str:
"""
Return the SHA of the commit.
:return: str hash
"""
return self._c_object.hexsha
@property
def author(self) -> Developer:
"""
Return the author of the commit as a Developer object.
:return: author
"""
return Developer(self._c_object.author.name,
self._c_object.author.email)
@property
@property
def project_name(self) -> str:
"""
Return the project name.
:return: project name
"""
return self.project_path.name
@property
def author_date(self) -> datetime:
"""
Return the authored datetime.
:return: datetime author_datetime
"""
return self._c_object.authored_datetime
@property
def committer_date(self) -> datetime:
"""
Return the committed datetime.
:return: datetime committer_datetime
"""
return self._c_object.committed_datetime
@property
def author_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.author_tz_offset
@property
def committer_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.committer_tz_offset
@property
def msg(self) -> str:
"""
Return commit message.
:return: str commit_message
"""
return self._c_object.message.strip()
@property
def parents(self) -> List[str]:
"""
Return the list of parents SHAs.
:return: List[str] parents
"""
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents
@property
def merge(self) -> bool:
"""
Return True if the commit is a merge, False otherwise.
:return: bool merge
"""
return len(self._c_object.parents) > 1
@property
def modifications(self) -> List[Modification]:
"""
Return a list of modified files.
:return: List[Modification] modifications
"""
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications
def _get_modifications(self):
repo = Repo(str(self.project_path))
commit = self._c_object
if self.parents:
# the commit has a parent
diff_index = self._c_object.parents[0].diff(commit,
create_patch=True)
else:
# this is the first commit of the repo. Comparing it with git
# NULL TREE
parent = repo.tree(NULL_TREE)
diff_index = parent.diff(commit.tree, create_patch=True)
return self._parse_diff(diff_index)
def _parse_diff(self, diff_index) -> List[Modification]:
modifications_list = []
for diff in diff_index:
old_path = diff.a_path
new_path = diff.b_path
change_type = self._from_change_to_modification_type(diff)
diff_and_sc = {
'diff': '',
'source_code': ''
}
try:
diff_and_sc['diff'] = diff.diff.decode('utf-8')
diff_and_sc['source_code'] = diff.b_blob.data_stream.read()\
.decode('utf-8')
except (UnicodeDecodeError, AttributeError, ValueError):
logger.debug('Could not load source code or the diff of a '
'file in commit %s', self._c_object.hexsha)
modifications_list.append(Modification(old_path, new_path,
change_type, diff_and_sc))
return modifications_list
@property
def in_main_branch(self) -> bool:
"""
Return True if the commit is in the main branch, False otherwise.
:return: bool in_main_branch
"""
return self._main_branch in self.branches
@property
def branches(self) -> Set[str]:
"""
Return the set of branches that contain the commit.
:return: set(str) branches
"""
if self._branches is None:
self._branches = self._get_branches()
return self._branches
def _get_branches(self):
c_git = Git(str(self.project_path))
branches = set()
for branch in set(c_git.branch('--contains', self.hash).split('\n')):
branches.add(branch.strip().replace('* ', ''))
return branches
# pylint disable=R0902
def _from_change_to_modification_type(self, diff: Diff):
if diff.new_file:
return ModificationType.ADD
if diff.deleted_file:
return ModificationType.DELETE
if diff.renamed_file:
return ModificationType.RENAME
if diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
return ModificationType.MODIFY
return ModificationType.UNKNOWN
def __eq__(self, other):
if not isinstance(other, Commit):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'Hash: {}\n'.format(self.hash) +
'Author: {}\n'.format(self.author.name) +
'Author email: {}\n'.format(self.author.email) +
'Committer: {}\n'.format(self.committer.name) +
'Committer email: {}\n'.format(self.committer.email) +
'Author date: {}\n'.format(
self.author_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Committer date: {}\n'.format(
self.committer_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Message: {}\n'.format(self.msg) +
'Parent: {}\n'.format("\n".join(map(str, self.parents))) +
'Merge: {}\n'.format(self.merge) +
'Modifications: \n{}'.format(
"\n".join(map(str, self.modifications))) +
'Branches: \n{}'.format("\n".join(map(str, self.branches))) +
'In main branch: {}\n'.format(self.in_main_branch)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Commit.parents | python | def parents(self) -> List[str]:
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents | Return the list of parents SHAs.
:return: List[str] parents | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L347-L356 | null | class Commit:
"""
Class representing a Commit. Contains all the important information such
as hash, author, dates, and modified files.
"""
def __init__(self, commit: GitCommit, project_path: Path,
main_branch: str) -> None:
"""
Create a commit object.
:param commit: GitPython Commit object
:param project_path: path to the project (temporary folder in case
of a remote repository)
:param main_branch: main branch of the repo
"""
self._c_object = commit
self._main_branch = main_branch
self.project_path = project_path
self._modifications = None
self._branches = None
@property
def hash(self) -> str:
"""
Return the SHA of the commit.
:return: str hash
"""
return self._c_object.hexsha
@property
def author(self) -> Developer:
"""
Return the author of the commit as a Developer object.
:return: author
"""
return Developer(self._c_object.author.name,
self._c_object.author.email)
@property
def committer(self) -> Developer:
"""
Return the committer of the commit as a Developer object.
:return: committer
"""
return Developer(self._c_object.committer.name,
self._c_object.committer.email)
@property
def project_name(self) -> str:
"""
Return the project name.
:return: project name
"""
return self.project_path.name
@property
def author_date(self) -> datetime:
"""
Return the authored datetime.
:return: datetime author_datetime
"""
return self._c_object.authored_datetime
@property
def committer_date(self) -> datetime:
"""
Return the committed datetime.
:return: datetime committer_datetime
"""
return self._c_object.committed_datetime
@property
def author_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.author_tz_offset
@property
def committer_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.committer_tz_offset
@property
def msg(self) -> str:
"""
Return commit message.
:return: str commit_message
"""
return self._c_object.message.strip()
@property
@property
def merge(self) -> bool:
"""
Return True if the commit is a merge, False otherwise.
:return: bool merge
"""
return len(self._c_object.parents) > 1
@property
def modifications(self) -> List[Modification]:
"""
Return a list of modified files.
:return: List[Modification] modifications
"""
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications
def _get_modifications(self):
repo = Repo(str(self.project_path))
commit = self._c_object
if self.parents:
# the commit has a parent
diff_index = self._c_object.parents[0].diff(commit,
create_patch=True)
else:
# this is the first commit of the repo. Comparing it with git
# NULL TREE
parent = repo.tree(NULL_TREE)
diff_index = parent.diff(commit.tree, create_patch=True)
return self._parse_diff(diff_index)
def _parse_diff(self, diff_index) -> List[Modification]:
modifications_list = []
for diff in diff_index:
old_path = diff.a_path
new_path = diff.b_path
change_type = self._from_change_to_modification_type(diff)
diff_and_sc = {
'diff': '',
'source_code': ''
}
try:
diff_and_sc['diff'] = diff.diff.decode('utf-8')
diff_and_sc['source_code'] = diff.b_blob.data_stream.read()\
.decode('utf-8')
except (UnicodeDecodeError, AttributeError, ValueError):
logger.debug('Could not load source code or the diff of a '
'file in commit %s', self._c_object.hexsha)
modifications_list.append(Modification(old_path, new_path,
change_type, diff_and_sc))
return modifications_list
@property
def in_main_branch(self) -> bool:
"""
Return True if the commit is in the main branch, False otherwise.
:return: bool in_main_branch
"""
return self._main_branch in self.branches
@property
def branches(self) -> Set[str]:
"""
Return the set of branches that contain the commit.
:return: set(str) branches
"""
if self._branches is None:
self._branches = self._get_branches()
return self._branches
def _get_branches(self):
c_git = Git(str(self.project_path))
branches = set()
for branch in set(c_git.branch('--contains', self.hash).split('\n')):
branches.add(branch.strip().replace('* ', ''))
return branches
# pylint disable=R0902
def _from_change_to_modification_type(self, diff: Diff):
if diff.new_file:
return ModificationType.ADD
if diff.deleted_file:
return ModificationType.DELETE
if diff.renamed_file:
return ModificationType.RENAME
if diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
return ModificationType.MODIFY
return ModificationType.UNKNOWN
def __eq__(self, other):
if not isinstance(other, Commit):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'Hash: {}\n'.format(self.hash) +
'Author: {}\n'.format(self.author.name) +
'Author email: {}\n'.format(self.author.email) +
'Committer: {}\n'.format(self.committer.name) +
'Committer email: {}\n'.format(self.committer.email) +
'Author date: {}\n'.format(
self.author_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Committer date: {}\n'.format(
self.committer_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Message: {}\n'.format(self.msg) +
'Parent: {}\n'.format("\n".join(map(str, self.parents))) +
'Merge: {}\n'.format(self.merge) +
'Modifications: \n{}'.format(
"\n".join(map(str, self.modifications))) +
'Branches: \n{}'.format("\n".join(map(str, self.branches))) +
'In main branch: {}\n'.format(self.in_main_branch)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Commit.modifications | python | def modifications(self) -> List[Modification]:
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications | Return a list of modified files.
:return: List[Modification] modifications | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L368-L377 | [
"def _get_modifications(self):\n repo = Repo(str(self.project_path))\n commit = self._c_object\n\n if self.parents:\n # the commit has a parent\n diff_index = self._c_object.parents[0].diff(commit,\n create_patch=True)\n else:\n # this is the first commit of the repo. Comparing it with git\n # NULL TREE\n parent = repo.tree(NULL_TREE)\n diff_index = parent.diff(commit.tree, create_patch=True)\n\n return self._parse_diff(diff_index)\n"
] | class Commit:
"""
Class representing a Commit. Contains all the important information such
as hash, author, dates, and modified files.
"""
def __init__(self, commit: GitCommit, project_path: Path,
main_branch: str) -> None:
"""
Create a commit object.
:param commit: GitPython Commit object
:param project_path: path to the project (temporary folder in case
of a remote repository)
:param main_branch: main branch of the repo
"""
self._c_object = commit
self._main_branch = main_branch
self.project_path = project_path
self._modifications = None
self._branches = None
@property
def hash(self) -> str:
"""
Return the SHA of the commit.
:return: str hash
"""
return self._c_object.hexsha
@property
def author(self) -> Developer:
"""
Return the author of the commit as a Developer object.
:return: author
"""
return Developer(self._c_object.author.name,
self._c_object.author.email)
@property
def committer(self) -> Developer:
"""
Return the committer of the commit as a Developer object.
:return: committer
"""
return Developer(self._c_object.committer.name,
self._c_object.committer.email)
@property
def project_name(self) -> str:
"""
Return the project name.
:return: project name
"""
return self.project_path.name
@property
def author_date(self) -> datetime:
"""
Return the authored datetime.
:return: datetime author_datetime
"""
return self._c_object.authored_datetime
@property
def committer_date(self) -> datetime:
"""
Return the committed datetime.
:return: datetime committer_datetime
"""
return self._c_object.committed_datetime
@property
def author_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.author_tz_offset
@property
def committer_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.committer_tz_offset
@property
def msg(self) -> str:
"""
Return commit message.
:return: str commit_message
"""
return self._c_object.message.strip()
@property
def parents(self) -> List[str]:
"""
Return the list of parents SHAs.
:return: List[str] parents
"""
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents
@property
def merge(self) -> bool:
"""
Return True if the commit is a merge, False otherwise.
:return: bool merge
"""
return len(self._c_object.parents) > 1
@property
def _get_modifications(self):
repo = Repo(str(self.project_path))
commit = self._c_object
if self.parents:
# the commit has a parent
diff_index = self._c_object.parents[0].diff(commit,
create_patch=True)
else:
# this is the first commit of the repo. Comparing it with git
# NULL TREE
parent = repo.tree(NULL_TREE)
diff_index = parent.diff(commit.tree, create_patch=True)
return self._parse_diff(diff_index)
def _parse_diff(self, diff_index) -> List[Modification]:
modifications_list = []
for diff in diff_index:
old_path = diff.a_path
new_path = diff.b_path
change_type = self._from_change_to_modification_type(diff)
diff_and_sc = {
'diff': '',
'source_code': ''
}
try:
diff_and_sc['diff'] = diff.diff.decode('utf-8')
diff_and_sc['source_code'] = diff.b_blob.data_stream.read()\
.decode('utf-8')
except (UnicodeDecodeError, AttributeError, ValueError):
logger.debug('Could not load source code or the diff of a '
'file in commit %s', self._c_object.hexsha)
modifications_list.append(Modification(old_path, new_path,
change_type, diff_and_sc))
return modifications_list
@property
def in_main_branch(self) -> bool:
"""
Return True if the commit is in the main branch, False otherwise.
:return: bool in_main_branch
"""
return self._main_branch in self.branches
@property
def branches(self) -> Set[str]:
"""
Return the set of branches that contain the commit.
:return: set(str) branches
"""
if self._branches is None:
self._branches = self._get_branches()
return self._branches
def _get_branches(self):
c_git = Git(str(self.project_path))
branches = set()
for branch in set(c_git.branch('--contains', self.hash).split('\n')):
branches.add(branch.strip().replace('* ', ''))
return branches
# pylint disable=R0902
def _from_change_to_modification_type(self, diff: Diff):
if diff.new_file:
return ModificationType.ADD
if diff.deleted_file:
return ModificationType.DELETE
if diff.renamed_file:
return ModificationType.RENAME
if diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
return ModificationType.MODIFY
return ModificationType.UNKNOWN
def __eq__(self, other):
if not isinstance(other, Commit):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'Hash: {}\n'.format(self.hash) +
'Author: {}\n'.format(self.author.name) +
'Author email: {}\n'.format(self.author.email) +
'Committer: {}\n'.format(self.committer.name) +
'Committer email: {}\n'.format(self.committer.email) +
'Author date: {}\n'.format(
self.author_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Committer date: {}\n'.format(
self.committer_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Message: {}\n'.format(self.msg) +
'Parent: {}\n'.format("\n".join(map(str, self.parents))) +
'Merge: {}\n'.format(self.merge) +
'Modifications: \n{}'.format(
"\n".join(map(str, self.modifications))) +
'Branches: \n{}'.format("\n".join(map(str, self.branches))) +
'In main branch: {}\n'.format(self.in_main_branch)
)
|
ishepard/pydriller | pydriller/domain/commit.py | Commit.branches | python | def branches(self) -> Set[str]:
if self._branches is None:
self._branches = self._get_branches()
return self._branches | Return the set of branches that contain the commit.
:return: set(str) branches | train | https://github.com/ishepard/pydriller/blob/71facb32afa085d5ddf0081beba34d00d57b8080/pydriller/domain/commit.py#L430-L439 | [
"def _get_branches(self):\n c_git = Git(str(self.project_path))\n branches = set()\n for branch in set(c_git.branch('--contains', self.hash).split('\\n')):\n branches.add(branch.strip().replace('* ', ''))\n return branches\n"
] | class Commit:
"""
Class representing a Commit. Contains all the important information such
as hash, author, dates, and modified files.
"""
def __init__(self, commit: GitCommit, project_path: Path,
main_branch: str) -> None:
"""
Create a commit object.
:param commit: GitPython Commit object
:param project_path: path to the project (temporary folder in case
of a remote repository)
:param main_branch: main branch of the repo
"""
self._c_object = commit
self._main_branch = main_branch
self.project_path = project_path
self._modifications = None
self._branches = None
@property
def hash(self) -> str:
"""
Return the SHA of the commit.
:return: str hash
"""
return self._c_object.hexsha
@property
def author(self) -> Developer:
"""
Return the author of the commit as a Developer object.
:return: author
"""
return Developer(self._c_object.author.name,
self._c_object.author.email)
@property
def committer(self) -> Developer:
"""
Return the committer of the commit as a Developer object.
:return: committer
"""
return Developer(self._c_object.committer.name,
self._c_object.committer.email)
@property
def project_name(self) -> str:
"""
Return the project name.
:return: project name
"""
return self.project_path.name
@property
def author_date(self) -> datetime:
"""
Return the authored datetime.
:return: datetime author_datetime
"""
return self._c_object.authored_datetime
@property
def committer_date(self) -> datetime:
"""
Return the committed datetime.
:return: datetime committer_datetime
"""
return self._c_object.committed_datetime
@property
def author_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.author_tz_offset
@property
def committer_timezone(self) -> int:
"""
Author timezone expressed in seconds from epoch.
:return: int timezone
"""
return self._c_object.committer_tz_offset
@property
def msg(self) -> str:
"""
Return commit message.
:return: str commit_message
"""
return self._c_object.message.strip()
@property
def parents(self) -> List[str]:
"""
Return the list of parents SHAs.
:return: List[str] parents
"""
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents
@property
def merge(self) -> bool:
"""
Return True if the commit is a merge, False otherwise.
:return: bool merge
"""
return len(self._c_object.parents) > 1
@property
def modifications(self) -> List[Modification]:
"""
Return a list of modified files.
:return: List[Modification] modifications
"""
if self._modifications is None:
self._modifications = self._get_modifications()
return self._modifications
def _get_modifications(self):
repo = Repo(str(self.project_path))
commit = self._c_object
if self.parents:
# the commit has a parent
diff_index = self._c_object.parents[0].diff(commit,
create_patch=True)
else:
# this is the first commit of the repo. Comparing it with git
# NULL TREE
parent = repo.tree(NULL_TREE)
diff_index = parent.diff(commit.tree, create_patch=True)
return self._parse_diff(diff_index)
def _parse_diff(self, diff_index) -> List[Modification]:
modifications_list = []
for diff in diff_index:
old_path = diff.a_path
new_path = diff.b_path
change_type = self._from_change_to_modification_type(diff)
diff_and_sc = {
'diff': '',
'source_code': ''
}
try:
diff_and_sc['diff'] = diff.diff.decode('utf-8')
diff_and_sc['source_code'] = diff.b_blob.data_stream.read()\
.decode('utf-8')
except (UnicodeDecodeError, AttributeError, ValueError):
logger.debug('Could not load source code or the diff of a '
'file in commit %s', self._c_object.hexsha)
modifications_list.append(Modification(old_path, new_path,
change_type, diff_and_sc))
return modifications_list
@property
def in_main_branch(self) -> bool:
"""
Return True if the commit is in the main branch, False otherwise.
:return: bool in_main_branch
"""
return self._main_branch in self.branches
@property
def _get_branches(self):
c_git = Git(str(self.project_path))
branches = set()
for branch in set(c_git.branch('--contains', self.hash).split('\n')):
branches.add(branch.strip().replace('* ', ''))
return branches
# pylint disable=R0902
def _from_change_to_modification_type(self, diff: Diff):
if diff.new_file:
return ModificationType.ADD
if diff.deleted_file:
return ModificationType.DELETE
if diff.renamed_file:
return ModificationType.RENAME
if diff.a_blob and diff.b_blob and diff.a_blob != diff.b_blob:
return ModificationType.MODIFY
return ModificationType.UNKNOWN
def __eq__(self, other):
if not isinstance(other, Commit):
return NotImplemented
if self is other:
return True
return self.__dict__ == other.__dict__
def __str__(self):
return (
'Hash: {}\n'.format(self.hash) +
'Author: {}\n'.format(self.author.name) +
'Author email: {}\n'.format(self.author.email) +
'Committer: {}\n'.format(self.committer.name) +
'Committer email: {}\n'.format(self.committer.email) +
'Author date: {}\n'.format(
self.author_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Committer date: {}\n'.format(
self.committer_date.strftime("%Y-%m-%d %H:%M:%S")) +
'Message: {}\n'.format(self.msg) +
'Parent: {}\n'.format("\n".join(map(str, self.parents))) +
'Merge: {}\n'.format(self.merge) +
'Modifications: \n{}'.format(
"\n".join(map(str, self.modifications))) +
'Branches: \n{}'.format("\n".join(map(str, self.branches))) +
'In main branch: {}\n'.format(self.in_main_branch)
)
|
pmneila/morphsnakes | examples.py | visual_callback_2d | python | def visual_callback_2d(background, fig=None):
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback | Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/examples.py#L25-L70 | null |
import os
import logging
import numpy as np
from imageio import imread
import matplotlib
from matplotlib import pyplot as plt
import morphsnakes as ms
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
PATH_IMG_NODULE = 'images/mama07ORI.bmp'
PATH_IMG_STARFISH = 'images/seastar2.png'
PATH_IMG_LAKES = 'images/lakes3.jpg'
PATH_IMG_CAMERA = 'images/camera.png'
PATH_IMG_COINS = 'images/coins.png'
PATH_ARRAY_CONFOCAL = 'images/confocal.npy'
def visual_callback_2d(background, fig=None):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback
def visual_callback_3d(fig=None, plot_each=1):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 3D images.
Parameters
----------
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
plot_each : positive integer
The plot will be updated once every `plot_each` calls to the callback
function.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
from mpl_toolkits.mplot3d import Axes3D
# PyMCubes package is required for `visual_callback_3d`
try:
import mcubes
except ImportError:
raise ImportError("PyMCubes is required for 3D `visual_callback_3d`")
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(111, projection='3d')
plt.pause(0.001)
counter = [-1]
def callback(levelset):
counter[0] += 1
if (counter[0] % plot_each) != 0:
return
if ax.collections:
del ax.collections[0]
coords, triangles = mcubes.marching_cubes(levelset, 0.5)
ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=triangles)
plt.pause(0.1)
return callback
def rgb2gray(img):
"""Convert a RGB image to gray scale."""
return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]
def example_nodule():
logging.info('Running: example_nodule (MorphGAC)...')
# Load the image.
img = imread(PATH_IMG_NODULE)[..., 0] / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=5.48)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (100, 126), 20)
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=45,
init_level_set=init_ls,
smoothing=1, threshold=0.31,
balloon=1, iter_callback=callback)
def example_starfish():
logging.info('Running: example_starfish (MorphGAC)...')
# Load the image.
imgcolor = imread(PATH_IMG_STARFISH) / 255.0
img = rgb2gray(imgcolor)
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=2)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (163, 137), 135)
# Callback for visual plotting
callback = visual_callback_2d(imgcolor)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=100,
init_level_set=init_ls,
smoothing=2, threshold=0.3,
balloon=-1, iter_callback=callback)
def example_coins():
logging.info('Running: example_coins (MorphGAC)...')
# Load the image.
img = imread(PATH_IMG_COINS) / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img)
# Manual initialization of the level set
init_ls = np.zeros(img.shape, dtype=np.int8)
init_ls[10:-10, 10:-10] = 1
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, 230, init_ls,
smoothing=1, threshold=0.69,
balloon=-1, iter_callback=callback)
def example_lakes():
logging.info('Running: example_lakes (MorphACWE)...')
# Load the image.
imgcolor = imread(PATH_IMG_LAKES)/255.0
img = rgb2gray(imgcolor)
# MorphACWE does not need g(I)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (80, 170), 25)
# Callback for visual plotting
callback = visual_callback_2d(imgcolor)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, iterations=200,
init_level_set=init_ls,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback)
def example_camera():
"""
Example with `morphological_chan_vese` with using the default
initialization of the level-set.
"""
logging.info('Running: example_camera (MorphACWE)...')
# Load the image.
img = imread(PATH_IMG_CAMERA)/255.0
# Callback for visual plotting
callback = visual_callback_2d(img)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, 35,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback)
def example_confocal3d():
logging.info('Running: example_confocal3d (MorphACWE)...')
# Load the image.
img = np.load(PATH_ARRAY_CONFOCAL)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (30, 50, 80), 25)
# Callback for visual plotting
callback = visual_callback_3d(plot_each=20)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, iterations=150,
init_level_set=init_ls,
smoothing=1, lambda1=1, lambda2=2,
iter_callback=callback)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
example_nodule()
example_starfish()
example_coins()
example_lakes()
example_camera()
# Uncomment the following line to see a 3D example
# This is skipped by default since mplot3d is VERY slow plotting 3d meshes
# example_confocal3d()
logging.info("Done.")
plt.show()
|
pmneila/morphsnakes | examples.py | visual_callback_3d | python | def visual_callback_3d(fig=None, plot_each=1):
from mpl_toolkits.mplot3d import Axes3D
# PyMCubes package is required for `visual_callback_3d`
try:
import mcubes
except ImportError:
raise ImportError("PyMCubes is required for 3D `visual_callback_3d`")
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(111, projection='3d')
plt.pause(0.001)
counter = [-1]
def callback(levelset):
counter[0] += 1
if (counter[0] % plot_each) != 0:
return
if ax.collections:
del ax.collections[0]
coords, triangles = mcubes.marching_cubes(levelset, 0.5)
ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=triangles)
plt.pause(0.1)
return callback | Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 3D images.
Parameters
----------
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
plot_each : positive integer
The plot will be updated once every `plot_each` calls to the callback
function.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/examples.py#L73-L129 | null |
import os
import logging
import numpy as np
from imageio import imread
import matplotlib
from matplotlib import pyplot as plt
import morphsnakes as ms
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
PATH_IMG_NODULE = 'images/mama07ORI.bmp'
PATH_IMG_STARFISH = 'images/seastar2.png'
PATH_IMG_LAKES = 'images/lakes3.jpg'
PATH_IMG_CAMERA = 'images/camera.png'
PATH_IMG_COINS = 'images/coins.png'
PATH_ARRAY_CONFOCAL = 'images/confocal.npy'
def visual_callback_2d(background, fig=None):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback
def visual_callback_3d(fig=None, plot_each=1):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 3D images.
Parameters
----------
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
plot_each : positive integer
The plot will be updated once every `plot_each` calls to the callback
function.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
from mpl_toolkits.mplot3d import Axes3D
# PyMCubes package is required for `visual_callback_3d`
try:
import mcubes
except ImportError:
raise ImportError("PyMCubes is required for 3D `visual_callback_3d`")
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(111, projection='3d')
plt.pause(0.001)
counter = [-1]
def callback(levelset):
counter[0] += 1
if (counter[0] % plot_each) != 0:
return
if ax.collections:
del ax.collections[0]
coords, triangles = mcubes.marching_cubes(levelset, 0.5)
ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=triangles)
plt.pause(0.1)
return callback
def rgb2gray(img):
"""Convert a RGB image to gray scale."""
return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]
def example_nodule():
logging.info('Running: example_nodule (MorphGAC)...')
# Load the image.
img = imread(PATH_IMG_NODULE)[..., 0] / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=5.48)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (100, 126), 20)
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=45,
init_level_set=init_ls,
smoothing=1, threshold=0.31,
balloon=1, iter_callback=callback)
def example_starfish():
logging.info('Running: example_starfish (MorphGAC)...')
# Load the image.
imgcolor = imread(PATH_IMG_STARFISH) / 255.0
img = rgb2gray(imgcolor)
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=2)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (163, 137), 135)
# Callback for visual plotting
callback = visual_callback_2d(imgcolor)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=100,
init_level_set=init_ls,
smoothing=2, threshold=0.3,
balloon=-1, iter_callback=callback)
def example_coins():
logging.info('Running: example_coins (MorphGAC)...')
# Load the image.
img = imread(PATH_IMG_COINS) / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img)
# Manual initialization of the level set
init_ls = np.zeros(img.shape, dtype=np.int8)
init_ls[10:-10, 10:-10] = 1
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, 230, init_ls,
smoothing=1, threshold=0.69,
balloon=-1, iter_callback=callback)
def example_lakes():
logging.info('Running: example_lakes (MorphACWE)...')
# Load the image.
imgcolor = imread(PATH_IMG_LAKES)/255.0
img = rgb2gray(imgcolor)
# MorphACWE does not need g(I)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (80, 170), 25)
# Callback for visual plotting
callback = visual_callback_2d(imgcolor)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, iterations=200,
init_level_set=init_ls,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback)
def example_camera():
"""
Example with `morphological_chan_vese` with using the default
initialization of the level-set.
"""
logging.info('Running: example_camera (MorphACWE)...')
# Load the image.
img = imread(PATH_IMG_CAMERA)/255.0
# Callback for visual plotting
callback = visual_callback_2d(img)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, 35,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback)
def example_confocal3d():
logging.info('Running: example_confocal3d (MorphACWE)...')
# Load the image.
img = np.load(PATH_ARRAY_CONFOCAL)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (30, 50, 80), 25)
# Callback for visual plotting
callback = visual_callback_3d(plot_each=20)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, iterations=150,
init_level_set=init_ls,
smoothing=1, lambda1=1, lambda2=2,
iter_callback=callback)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
example_nodule()
example_starfish()
example_coins()
example_lakes()
example_camera()
# Uncomment the following line to see a 3D example
# This is skipped by default since mplot3d is VERY slow plotting 3d meshes
# example_confocal3d()
logging.info("Done.")
plt.show()
|
pmneila/morphsnakes | examples.py | example_camera | python | def example_camera():
logging.info('Running: example_camera (MorphACWE)...')
# Load the image.
img = imread(PATH_IMG_CAMERA)/255.0
# Callback for visual plotting
callback = visual_callback_2d(img)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, 35,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback) | Example with `morphological_chan_vese` with using the default
initialization of the level-set. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/examples.py#L226-L243 | [
"def visual_callback_2d(background, fig=None):\n \"\"\"\n Returns a callback than can be passed as the argument `iter_callback`\n of `morphological_geodesic_active_contour` and\n `morphological_chan_vese` for visualizing the evolution\n of the levelsets. Only works for 2D images.\n\n Parameters\n ----------\n background : (M, N) array\n Image to be plotted as the background of the visual evolution.\n fig : matplotlib.figure.Figure\n Figure where results will be drawn. If not given, a new figure\n will be created.\n\n Returns\n -------\n callback : Python function\n A function that receives a levelset and updates the current plot\n accordingly. This can be passed as the `iter_callback` argument of\n `morphological_geodesic_active_contour` and\n `morphological_chan_vese`.\n\n \"\"\"\n\n # Prepare the visual environment.\n if fig is None:\n fig = plt.figure()\n fig.clf()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.imshow(background, cmap=plt.cm.gray)\n\n ax2 = fig.add_subplot(1, 2, 2)\n ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)\n plt.pause(0.001)\n\n def callback(levelset):\n\n if ax1.collections:\n del ax1.collections[0]\n ax1.contour(levelset, [0.5], colors='r')\n ax_u.set_data(levelset)\n fig.canvas.draw()\n plt.pause(0.001)\n\n return callback\n",
"def morphological_chan_vese(image, iterations, init_level_set='checkerboard',\n smoothing=1, lambda1=1, lambda2=1,\n iter_callback=lambda x: None):\n \"\"\"Morphological Active Contours without Edges (MorphACWE)\n\n Active contours without edges implemented with morphological operators. It\n can be used to segment objects in images and volumes without well defined\n borders. It is required that the inside of the object looks different on\n average than the outside (i.e., the inner area of the object should be\n darker or lighter than the outer area on average).\n\n Parameters\n ----------\n image : (M, N) or (L, M, N) array\n Grayscale image or volume to be segmented.\n iterations : uint\n Number of iterations to run\n init_level_set : str, (M, N) array, or (L, M, N) array\n Initial level set. If an array is given, it will be binarized and used\n as the initial level set. If a string is given, it defines the method\n to generate a reasonable initial level set with the shape of the\n `image`. Accepted values are 'checkerboard' and 'circle'. See the\n documentation of `checkerboard_level_set` and `circle_level_set`\n respectively for details about how these level sets are created.\n smoothing : uint, optional\n Number of times the smoothing operator is applied per iteration.\n Reasonable values are around 1-4. Larger values lead to smoother\n segmentations.\n lambda1 : float, optional\n Weight parameter for the outer region. If `lambda1` is larger than\n `lambda2`, the outer region will contain a larger range of values than\n the inner region.\n lambda2 : float, optional\n Weight parameter for the inner region. If `lambda2` is larger than\n `lambda1`, the inner region will contain a larger range of values than\n the outer region.\n iter_callback : function, optional\n If given, this function is called once per iteration with the current\n level set as the only argument. This is useful for debugging or for\n plotting intermediate results during the evolution.\n\n Returns\n -------\n out : (M, N) or (L, M, N) array\n Final segmentation (i.e., the final level set)\n\n See also\n --------\n circle_level_set, checkerboard_level_set\n\n Notes\n -----\n\n This is a version of the Chan-Vese algorithm that uses morphological\n operators instead of solving a partial differential equation (PDE) for the\n evolution of the contour. The set of morphological operators used in this\n algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE\n (see [1]_). However, morphological operators are do not suffer from the\n numerical stability issues typically found in PDEs (it is not necessary to\n find the right time step for the evolution), and are computationally\n faster.\n\n The algorithm and its theoretical derivation are described in [1]_.\n\n References\n ----------\n .. [1] A Morphological Approach to Curvature-based Evolution of Curves and\n Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE\n Transactions on Pattern Analysis and Machine Intelligence (PAMI),\n 2014, DOI 10.1109/TPAMI.2013.106\n \"\"\"\n\n init_level_set = _init_level_set(init_level_set, image.shape)\n\n _check_input(image, init_level_set)\n\n u = np.int8(init_level_set > 0)\n\n iter_callback(u)\n\n for _ in range(iterations):\n\n # inside = u > 0\n # outside = u <= 0\n c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)\n c1 = (image * u).sum() / float(u.sum() + 1e-8)\n\n # Image attachment\n du = np.gradient(u)\n abs_du = np.abs(du).sum(0)\n aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)\n\n u[aux < 0] = 1\n u[aux > 0] = 0\n\n # Smoothing\n for _ in range(smoothing):\n u = _curvop(u)\n\n iter_callback(u)\n\n return u\n"
] |
import os
import logging
import numpy as np
from imageio import imread
import matplotlib
from matplotlib import pyplot as plt
import morphsnakes as ms
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
PATH_IMG_NODULE = 'images/mama07ORI.bmp'
PATH_IMG_STARFISH = 'images/seastar2.png'
PATH_IMG_LAKES = 'images/lakes3.jpg'
PATH_IMG_CAMERA = 'images/camera.png'
PATH_IMG_COINS = 'images/coins.png'
PATH_ARRAY_CONFOCAL = 'images/confocal.npy'
def visual_callback_2d(background, fig=None):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 2D images.
Parameters
----------
background : (M, N) array
Image to be plotted as the background of the visual evolution.
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
ax1.imshow(background, cmap=plt.cm.gray)
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(np.zeros_like(background), vmin=0, vmax=1)
plt.pause(0.001)
def callback(levelset):
if ax1.collections:
del ax1.collections[0]
ax1.contour(levelset, [0.5], colors='r')
ax_u.set_data(levelset)
fig.canvas.draw()
plt.pause(0.001)
return callback
def visual_callback_3d(fig=None, plot_each=1):
"""
Returns a callback than can be passed as the argument `iter_callback`
of `morphological_geodesic_active_contour` and
`morphological_chan_vese` for visualizing the evolution
of the levelsets. Only works for 3D images.
Parameters
----------
fig : matplotlib.figure.Figure
Figure where results will be drawn. If not given, a new figure
will be created.
plot_each : positive integer
The plot will be updated once every `plot_each` calls to the callback
function.
Returns
-------
callback : Python function
A function that receives a levelset and updates the current plot
accordingly. This can be passed as the `iter_callback` argument of
`morphological_geodesic_active_contour` and
`morphological_chan_vese`.
"""
from mpl_toolkits.mplot3d import Axes3D
# PyMCubes package is required for `visual_callback_3d`
try:
import mcubes
except ImportError:
raise ImportError("PyMCubes is required for 3D `visual_callback_3d`")
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax = fig.add_subplot(111, projection='3d')
plt.pause(0.001)
counter = [-1]
def callback(levelset):
counter[0] += 1
if (counter[0] % plot_each) != 0:
return
if ax.collections:
del ax.collections[0]
coords, triangles = mcubes.marching_cubes(levelset, 0.5)
ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],
triangles=triangles)
plt.pause(0.1)
return callback
def rgb2gray(img):
"""Convert a RGB image to gray scale."""
return 0.2989 * img[..., 0] + 0.587 * img[..., 1] + 0.114 * img[..., 2]
def example_nodule():
logging.info('Running: example_nodule (MorphGAC)...')
# Load the image.
img = imread(PATH_IMG_NODULE)[..., 0] / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=5.48)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (100, 126), 20)
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=45,
init_level_set=init_ls,
smoothing=1, threshold=0.31,
balloon=1, iter_callback=callback)
def example_starfish():
logging.info('Running: example_starfish (MorphGAC)...')
# Load the image.
imgcolor = imread(PATH_IMG_STARFISH) / 255.0
img = rgb2gray(imgcolor)
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=2)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (163, 137), 135)
# Callback for visual plotting
callback = visual_callback_2d(imgcolor)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=100,
init_level_set=init_ls,
smoothing=2, threshold=0.3,
balloon=-1, iter_callback=callback)
def example_coins():
logging.info('Running: example_coins (MorphGAC)...')
# Load the image.
img = imread(PATH_IMG_COINS) / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img)
# Manual initialization of the level set
init_ls = np.zeros(img.shape, dtype=np.int8)
init_ls[10:-10, 10:-10] = 1
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, 230, init_ls,
smoothing=1, threshold=0.69,
balloon=-1, iter_callback=callback)
def example_lakes():
logging.info('Running: example_lakes (MorphACWE)...')
# Load the image.
imgcolor = imread(PATH_IMG_LAKES)/255.0
img = rgb2gray(imgcolor)
# MorphACWE does not need g(I)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (80, 170), 25)
# Callback for visual plotting
callback = visual_callback_2d(imgcolor)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, iterations=200,
init_level_set=init_ls,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback)
def example_camera():
"""
Example with `morphological_chan_vese` with using the default
initialization of the level-set.
"""
logging.info('Running: example_camera (MorphACWE)...')
# Load the image.
img = imread(PATH_IMG_CAMERA)/255.0
# Callback for visual plotting
callback = visual_callback_2d(img)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, 35,
smoothing=3, lambda1=1, lambda2=1,
iter_callback=callback)
def example_confocal3d():
logging.info('Running: example_confocal3d (MorphACWE)...')
# Load the image.
img = np.load(PATH_ARRAY_CONFOCAL)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (30, 50, 80), 25)
# Callback for visual plotting
callback = visual_callback_3d(plot_each=20)
# Morphological Chan-Vese (or ACWE)
ms.morphological_chan_vese(img, iterations=150,
init_level_set=init_ls,
smoothing=1, lambda1=1, lambda2=2,
iter_callback=callback)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
example_nodule()
example_starfish()
example_coins()
example_lakes()
example_camera()
# Uncomment the following line to see a 3D example
# This is skipped by default since mplot3d is VERY slow plotting 3d meshes
# example_confocal3d()
logging.info("Done.")
plt.show()
|
pmneila/morphsnakes | morphsnakes_v1.py | operator_si | python | def operator_si(u):
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0) | operator_si operator. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L74-L91 | null | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
import os
import logging
from itertools import cycle
import matplotlib
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
class FCycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# operator_si and operator_is operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)), np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0)
def operator_is(u):
"""operator_is operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_dilation(u, P_i)
return _aux.min(0)
# operator_si_o_is operator.
operator_si_o_is = lambda u: operator_si(operator_is(u))
operator_os_o_si = lambda u: operator_is(operator_si(u))
curvop = FCycle([operator_si_o_is, operator_os_o_si])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, nb_iters):
"""Run several nb_iters of the morphological Chan-Vese method."""
for _ in range(nb_iters):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=plt.cm.gray)
else:
ax1.imshow(background, cmap=plt.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
plt.pause(0.001)
# Iterate.
for _ in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#plt.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, fig=None, levelset=None, num_iters=20,
animate_ui=True, animate_delay=250):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
animate_ui : bool, optional
Show the animation interface
animate_delay : int, optional
The number of delay between frames.
"""
from mayavi import mlab
if levelset is not None:
msnake.levelset = levelset
if fig is None:
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=animate_ui, delay=animate_delay)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %i/%i..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
pmneila/morphsnakes | morphsnakes_v1.py | operator_is | python | def operator_is(u):
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_dilation(u, P_i)
return _aux.min(0) | operator_is operator. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L94-L111 | null | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
import os
import logging
from itertools import cycle
import matplotlib
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
class FCycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# operator_si and operator_is operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)), np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0)
def operator_is(u):
"""operator_is operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_dilation(u, P_i)
return _aux.min(0)
# operator_si_o_is operator.
operator_si_o_is = lambda u: operator_si(operator_is(u))
operator_os_o_si = lambda u: operator_is(operator_si(u))
curvop = FCycle([operator_si_o_is, operator_os_o_si])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, nb_iters):
"""Run several nb_iters of the morphological Chan-Vese method."""
for _ in range(nb_iters):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=plt.cm.gray)
else:
ax1.imshow(background, cmap=plt.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
plt.pause(0.001)
# Iterate.
for _ in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#plt.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, fig=None, levelset=None, num_iters=20,
animate_ui=True, animate_delay=250):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
animate_ui : bool, optional
Show the animation interface
animate_delay : int, optional
The number of delay between frames.
"""
from mayavi import mlab
if levelset is not None:
msnake.levelset = levelset
if fig is None:
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=animate_ui, delay=animate_delay)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %i/%i..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
pmneila/morphsnakes | morphsnakes_v1.py | gborders | python | def gborders(img, alpha=1.0, sigma=1.0):
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm) | Stopping criterion for image borders. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L121-L125 | null | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
import os
import logging
from itertools import cycle
import matplotlib
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
class FCycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# operator_si and operator_is operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)), np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0)
def operator_is(u):
"""operator_is operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_dilation(u, P_i)
return _aux.min(0)
# operator_si_o_is operator.
operator_si_o_is = lambda u: operator_si(operator_is(u))
operator_os_o_si = lambda u: operator_is(operator_si(u))
curvop = FCycle([operator_si_o_is, operator_os_o_si])
# Stopping factors (function g(I) in the paper).
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, nb_iters):
"""Run several nb_iters of the morphological Chan-Vese method."""
for _ in range(nb_iters):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=plt.cm.gray)
else:
ax1.imshow(background, cmap=plt.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
plt.pause(0.001)
# Iterate.
for _ in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#plt.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, fig=None, levelset=None, num_iters=20,
animate_ui=True, animate_delay=250):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
animate_ui : bool, optional
Show the animation interface
animate_delay : int, optional
The number of delay between frames.
"""
from mayavi import mlab
if levelset is not None:
msnake.levelset = levelset
if fig is None:
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=animate_ui, delay=animate_delay)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %i/%i..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
pmneila/morphsnakes | morphsnakes_v1.py | evolve_visual | python | def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None):
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=plt.cm.gray)
else:
ax1.imshow(background, cmap=plt.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
plt.pause(0.001)
# Iterate.
for _ in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#plt.pause(0.001)
# Return the last levelset.
return msnake.levelset | Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L316-L366 | null | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
import os
import logging
from itertools import cycle
import matplotlib
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
class FCycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# operator_si and operator_is operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)), np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0)
def operator_is(u):
"""operator_is operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_dilation(u, P_i)
return _aux.min(0)
# operator_si_o_is operator.
operator_si_o_is = lambda u: operator_si(operator_is(u))
operator_os_o_si = lambda u: operator_is(operator_si(u))
curvop = FCycle([operator_si_o_is, operator_os_o_si])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, nb_iters):
"""Run several nb_iters of the morphological Chan-Vese method."""
for _ in range(nb_iters):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=plt.cm.gray)
else:
ax1.imshow(background, cmap=plt.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
plt.pause(0.001)
# Iterate.
for _ in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#plt.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, fig=None, levelset=None, num_iters=20,
animate_ui=True, animate_delay=250):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
animate_ui : bool, optional
Show the animation interface
animate_delay : int, optional
The number of delay between frames.
"""
from mayavi import mlab
if levelset is not None:
msnake.levelset = levelset
if fig is None:
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=animate_ui, delay=animate_delay)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %i/%i..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
pmneila/morphsnakes | morphsnakes_v1.py | evolve_visual3d | python | def evolve_visual3d(msnake, fig=None, levelset=None, num_iters=20,
animate_ui=True, animate_delay=250):
from mayavi import mlab
if levelset is not None:
msnake.levelset = levelset
if fig is None:
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=animate_ui, delay=animate_delay)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %i/%i..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset | Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
animate_ui : bool, optional
Show the animation interface
animate_delay : int, optional
The number of delay between frames. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L369-L414 | null | # -*- coding: utf-8 -*-
"""
morphsnakes
===========
This is a Python implementation of the algorithms introduced in the paper
Márquez-Neila, P., Baumela, L., Álvarez, L., "A morphological approach
to curvature-based evolution of curves and surfaces". IEEE Transactions
on Pattern Analysis and Machine Intelligence (PAMI), 2013.
This implementation is intended to be as brief, understandable and self-contained
as possible. It does not include any enhancement to make it fast or efficient.
Any practical implementation of this algorithm should work only over the
neighbor pixels of the 0.5-levelset, not over all the embedding function,
and perhaps should feature multi-threading or GPU capabilities.
The classes MorphGAC and MorphACWE provide most of the functionality of this
module. They implement the Morphological Geodesic Active Contours and the
Morphological Active Contours without Edges, respectively. See the
aforementioned paper for full details.
See test.py for examples of usage.
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
import os
import logging
from itertools import cycle
import matplotlib
# in case you are running on machine without display, e.g. server
if os.environ.get('DISPLAY', '') == '':
logging.warning('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.ndimage import gaussian_filter, gaussian_gradient_magnitude
class FCycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# operator_si and operator_is operators for 2D and 3D.
_P2 = [np.eye(3), np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)), np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
_aux = np.zeros((0))
def operator_si(u):
"""operator_si operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_erosion(u, P_i)
return _aux.max(0)
def operator_is(u):
"""operator_is operator."""
global _aux
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
if u.shape != _aux.shape[1:]:
_aux = np.zeros((len(P),) + u.shape)
for _aux_i, P_i in zip(_aux, P):
_aux_i[:] = binary_dilation(u, P_i)
return _aux.min(0)
# operator_si_o_is operator.
operator_si_o_is = lambda u: operator_si(operator_is(u))
operator_os_o_si = lambda u: operator_is(operator_si(u))
curvop = FCycle([operator_si_o_is, operator_os_o_si])
# Stopping factors (function g(I) in the paper).
def gborders(img, alpha=1.0, sigma=1.0):
"""Stopping criterion for image borders."""
# The norm of the gradient.
gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')
return 1.0/np.sqrt(1.0 + alpha*gradnorm)
def glines(img, sigma=1.0):
"""Stopping criterion for image black lines."""
return gaussian_filter(img, sigma)
class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, nb_iters):
"""Run several nb_iters of the morphological Chan-Vese method."""
for _ in range(nb_iters):
self.step()
class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
def evolve_visual(msnake, fig=None, levelset=None, num_iters=20, background=None):
"""
Visual evolution of a morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
background : array-like, optional
If given, background will be shown behind the contours instead of
msnake.data.
"""
if levelset is not None:
msnake.levelset = levelset
# Prepare the visual environment.
if fig is None:
fig = plt.figure()
fig.clf()
ax1 = fig.add_subplot(1, 2, 1)
if background is None:
ax1.imshow(msnake.data, cmap=plt.cm.gray)
else:
ax1.imshow(background, cmap=plt.cm.gray)
ax1.contour(msnake.levelset, [0.5], colors='r')
ax2 = fig.add_subplot(1, 2, 2)
ax_u = ax2.imshow(msnake.levelset)
plt.pause(0.001)
# Iterate.
for _ in range(num_iters):
# Evolve.
msnake.step()
# Update figure.
del ax1.collections[0]
ax1.contour(msnake.levelset, [0.5], colors='r')
ax_u.set_data(msnake.levelset)
fig.canvas.draw()
#plt.pause(0.001)
# Return the last levelset.
return msnake.levelset
def evolve_visual3d(msnake, fig=None, levelset=None, num_iters=20,
animate_ui=True, animate_delay=250):
"""
Visual evolution of a three-dimensional morphological snake.
Parameters
----------
msnake : MorphGAC or MorphACWE instance
The morphological snake solver.
fig: object, optional
Handles to actual figure.
levelset : array-like, optional
If given, the levelset of the solver is initialized to this. If not
given, the evolution will use the levelset already set in msnake.
num_iters : int, optional
The number of iterations.
animate_ui : bool, optional
Show the animation interface
animate_delay : int, optional
The number of delay between frames.
"""
from mayavi import mlab
if levelset is not None:
msnake.levelset = levelset
if fig is None:
fig = mlab.gcf()
mlab.clf()
src = mlab.pipeline.scalar_field(msnake.data)
mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')
cnt = mlab.contour3d(msnake.levelset, contours=[0.5])
@mlab.animate(ui=animate_ui, delay=animate_delay)
def anim():
for i in range(num_iters):
msnake.step()
cnt.mlab_source.scalars = msnake.levelset
print("Iteration %i/%i..." % (i + 1, num_iters))
yield
anim()
mlab.show()
# Return the last levelset.
return msnake.levelset
|
pmneila/morphsnakes | morphsnakes_v1.py | MorphACWE.step | python | def step(self):
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res | Perform a single step of the morphological Chan-Vese evolution. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L168-L200 | null | class MorphACWE(object):
"""Morphological ACWE based on the Chan-Vese energy functional."""
def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):
"""Create a Morphological ACWE solver.
Parameters
----------
data : ndarray
The image data.
smoothing : scalar
The number of repetitions of the smoothing step (the
curv operator) in each iteration. In other terms,
this is the strength of the smoothing. This is the
parameter µ.
lambda1, lambda2 : scalars
Relative importance of the inside pixels (lambda1)
against the outside pixels (lambda2).
"""
self._u = None
self.smoothing = smoothing
self.lambda1 = lambda1
self.lambda2 = lambda2
self.data = data
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
def step(self):
"""Perform a single step of the morphological Chan-Vese evolution."""
# Assign attributes to local variables for convenience.
u = self._u
if u is None:
raise ValueError("the levelset function is not set "
"(use set_levelset)")
data = self.data
# Determine c0 and c1.
inside = (u > 0)
outside = (u <= 0)
c0 = data[outside].sum() / float(outside.sum())
c1 = data[inside].sum() / float(inside.sum())
# Image attachment.
dres = np.array(np.gradient(u))
abs_dres = np.abs(dres).sum(0)
#aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)
aux = abs_dres * (self.lambda1*(data - c1) ** 2 -
self.lambda2*(data - c0) ** 2)
res = np.copy(u)
res[aux < 0] = 1
res[aux > 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, nb_iters):
"""Run several nb_iters of the morphological Chan-Vese method."""
for _ in range(nb_iters):
self.step()
|
pmneila/morphsnakes | morphsnakes_v1.py | MorphGAC._update_mask | python | def _update_mask(self):
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v) | Pre-compute masks for speed. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L254-L257 | null | class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
|
pmneila/morphsnakes | morphsnakes_v1.py | MorphGAC.step | python | def step(self):
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res | Perform a single step of the morphological snake evolution. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L274-L308 | null | class MorphGAC(object):
"""Morphological GAC based on the Geodesic Active Contours."""
def __init__(self, data, smoothing=1, threshold=0, balloon=0):
"""Create a Morphological GAC solver.
Parameters
----------
data : array-like
The stopping criterion g(I). See functions gborders and glines.
smoothing : scalar
The number of repetitions of the smoothing step in each
iteration. This is the parameter µ.
threshold : scalar
The threshold that determines which areas are affected
by the morphological balloon. This is the parameter θ.
balloon : scalar
The strength of the morphological balloon. This is the parameter ν.
"""
self._u = None
self._v = balloon
self._theta = threshold
self.smoothing = smoothing
self.set_data(data)
def set_levelset(self, u):
self._u = np.double(u)
self._u[u>0] = 1
self._u[u<=0] = 0
def set_balloon(self, v):
self._v = v
self._update_mask()
def set_threshold(self, theta):
self._theta = theta
self._update_mask()
def set_data(self, data):
self._data = data
self._ddata = np.gradient(data)
self._update_mask()
# The structure element for binary dilation and erosion.
self.structure = np.ones((3,)*np.ndim(data))
def _update_mask(self):
"""Pre-compute masks for speed."""
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v)
levelset = property(lambda self: self._u,
set_levelset,
doc="The level set embedding function (u).")
data = property(lambda self: self._data,
set_data,
doc="The data that controls the snake evolution "
"(the image or g(I)).")
balloon = property(lambda self: self._v,
set_balloon,
doc="The morphological balloon parameter "
"(ν (nu, not v)).")
threshold = property(lambda self: self._theta,
set_threshold,
doc="The threshold value (θ).")
def step(self):
"""Perform a single step of the morphological snake evolution."""
# Assign attributes to local variables for convenience.
u = self._u
gI = self._data
dgI = self._ddata
theta = self._theta
v = self._v
if u is None:
raise ValueError("the levelset is not set (use set_levelset)")
res = np.copy(u)
# Balloon.
if v > 0:
aux = binary_dilation(u, self.structure)
elif v < 0:
aux = binary_erosion(u, self.structure)
if v!= 0:
res[self._threshold_mask_v] = aux[self._threshold_mask_v]
# Image attachment.
aux = np.zeros_like(res)
dres = np.gradient(res)
for el1, el2 in zip(dgI, dres):
aux += el1*el2
res[aux > 0] = 1
res[aux < 0] = 0
# Smoothing.
for i in range(self.smoothing):
res = curvop(res)
self._u = res
def run(self, iterations):
"""Run several iterations of the morphological snakes method."""
for _ in range(iterations):
self.step()
|
pmneila/morphsnakes | morphsnakes.py | sup_inf | python | def sup_inf(u):
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0) | SI operator. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L100-L115 | null | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | inf_sup | python | def inf_sup(u):
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0) | IS operator. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L118-L133 | null | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | _check_input | python | def _check_input(image, init_level_set):
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.") | Check that shapes of `image` and `init_level_set` match. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L140-L147 | null | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | _init_level_set | python | def _init_level_set(init_level_set, image_shape):
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res | Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L150-L165 | [
"def circle_level_set(image_shape, center=None, radius=None):\n \"\"\"Create a circle level set with binary values.\n\n Parameters\n ----------\n image_shape : tuple of positive integers\n Shape of the image\n center : tuple of positive integers, optional\n Coordinates of the center of the circle given in (row, column). If not\n given, it defaults to the center of the image.\n radius : float, optional\n Radius of the circle. If not given, it is set to the 75% of the\n smallest image dimension.\n\n Returns\n -------\n out : array with shape `image_shape`\n Binary level set of the circle with the given `radius` and `center`.\n\n See also\n --------\n checkerboard_level_set\n \"\"\"\n\n if center is None:\n center = tuple(i // 2 for i in image_shape)\n\n if radius is None:\n radius = min(image_shape) * 3.0 / 8.0\n\n grid = np.mgrid[[slice(i) for i in image_shape]]\n grid = (grid.T - center).T\n phi = radius - np.sqrt(np.sum((grid)**2, 0))\n res = np.int8(phi > 0)\n return res\n",
"def checkerboard_level_set(image_shape, square_size=5):\n \"\"\"Create a checkerboard level set with binary values.\n\n Parameters\n ----------\n image_shape : tuple of positive integers\n Shape of the image.\n square_size : int, optional\n Size of the squares of the checkerboard. It defaults to 5.\n\n Returns\n -------\n out : array with shape `image_shape`\n Binary level set of the checkerboard.\n\n See also\n --------\n circle_level_set\n \"\"\"\n\n grid = np.ogrid[[slice(i) for i in image_shape]]\n grid = [(grid_i // square_size) & 1 for grid_i in grid]\n\n checkerboard = np.bitwise_xor.reduce(grid, axis=0)\n res = np.int8(checkerboard)\n return res\n"
] | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | circle_level_set | python | def circle_level_set(image_shape, center=None, radius=None):
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res | Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L168-L202 | null | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | checkerboard_level_set | python | def checkerboard_level_set(image_shape, square_size=5):
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res | Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L205-L230 | null | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | inverse_gaussian_gradient | python | def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm) | Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`. | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L233-L262 | null | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | morphological_chan_vese | python | def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u | Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106 | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L265-L366 | [
"iter_callback=lambda x: None):\n",
"def _check_input(image, init_level_set):\n \"\"\"Check that shapes of `image` and `init_level_set` match.\"\"\"\n if not image.ndim in [2, 3]:\n raise ValueError(\"`image` must be a 2 or 3-dimensional array.\")\n\n if len(image.shape) != len(init_level_set.shape):\n raise ValueError(\"The dimensions of the initial level set do not \"\n \"match the dimensions of the image.\")\n",
"def _init_level_set(init_level_set, image_shape):\n \"\"\"Auxiliary function for initializing level sets with a string.\n\n If `init_level_set` is not a string, it is returned as is.\n \"\"\"\n if isinstance(init_level_set, str):\n if init_level_set == 'checkerboard':\n res = checkerboard_level_set(image_shape)\n elif init_level_set == 'circle':\n res = circle_level_set(image_shape)\n else:\n raise ValueError(\"`init_level_set` not in \"\n \"['checkerboard', 'circle']\")\n else:\n res = init_level_set\n return res\n",
"def callback(levelset):\n\n if ax1.collections:\n del ax1.collections[0]\n ax1.contour(levelset, [0.5], colors='r')\n ax_u.set_data(levelset)\n fig.canvas.draw()\n plt.pause(0.001)\n",
"def callback(levelset):\n\n counter[0] += 1\n if (counter[0] % plot_each) != 0:\n return\n\n if ax.collections:\n del ax.collections[0]\n\n coords, triangles = mcubes.marching_cubes(levelset, 0.5)\n ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2],\n triangles=triangles)\n plt.pause(0.1)\n",
"def callback(x):\n evolution.append(x.sum())\n"
] | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
"""Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
pmneila/morphsnakes | morphsnakes.py | morphological_geodesic_active_contour | python | def morphological_geodesic_active_contour(gimage, iterations,
init_level_set='circle', smoothing=1,
threshold='auto', balloon=0,
iter_callback=lambda x: None):
image = gimage
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
if threshold == 'auto':
threshold = np.percentile(image, 40)
structure = np.ones((3,) * len(image.shape), dtype=np.int8)
dimage = np.gradient(image)
# threshold_mask = image > threshold
if balloon != 0:
threshold_mask_balloon = image > threshold / np.abs(balloon)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# Balloon
if balloon > 0:
aux = ndi.binary_dilation(u, structure)
elif balloon < 0:
aux = ndi.binary_erosion(u, structure)
if balloon != 0:
u[threshold_mask_balloon] = aux[threshold_mask_balloon]
# Image attachment
aux = np.zeros_like(image)
du = np.gradient(u)
for el1, el2 in zip(dimage, du):
aux += el1 * el2
u[aux > 0] = 1
u[aux < 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u | Morphological Geodesic Active Contours (MorphGAC).
Geodesic active contours implemented with morphological operators. It can
be used to segment objects with visible but noisy, cluttered, broken
borders.
Parameters
----------
gimage : (M, N) or (L, M, N) array
Preprocessed image or volume to be segmented. This is very rarely the
original image. Instead, this is usually a preprocessed version of the
original image that enhances and highlights the borders (or other
structures) of the object to segment.
`morphological_geodesic_active_contour` will try to stop the contour
evolution in areas where `gimage` is small. See
`morphsnakes.inverse_gaussian_gradient` as an example function to
perform this preprocessing. Note that the quality of
`morphological_geodesic_active_contour` might greatly depend on this
preprocessing.
iterations : uint
Number of iterations to run.
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
threshold : float, optional
Areas of the image with a value smaller than this threshold will be
considered borders. The evolution of the contour will stop in this
areas.
balloon : float, optional
Balloon force to guide the contour in non-informative areas of the
image, i.e., areas where the gradient of the image is too small to push
the contour towards a border. A negative value will shrink the contour,
while a positive value will expand the contour in these areas. Setting
this to zero will disable the balloon force.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
inverse_gaussian_gradient, circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Geodesic Active Contours (GAC) algorithm that uses
morphological operators instead of solving partial differential equations
(PDEs) for the evolution of the contour. The set of morphological operators
used in this algorithm are proved to be infinitesimally equivalent to the
GAC PDEs (see [1]_). However, morphological operators are do not suffer
from the numerical stability issues typically found in PDEs (e.g., it is
not necessary to find the right time step for the evolution), and are
computationally faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106 | train | https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes.py#L369-L493 | [
"iter_callback=lambda x: None):\n",
"def _check_input(image, init_level_set):\n \"\"\"Check that shapes of `image` and `init_level_set` match.\"\"\"\n if not image.ndim in [2, 3]:\n raise ValueError(\"`image` must be a 2 or 3-dimensional array.\")\n\n if len(image.shape) != len(init_level_set.shape):\n raise ValueError(\"The dimensions of the initial level set do not \"\n \"match the dimensions of the image.\")\n",
"def _init_level_set(init_level_set, image_shape):\n \"\"\"Auxiliary function for initializing level sets with a string.\n\n If `init_level_set` is not a string, it is returned as is.\n \"\"\"\n if isinstance(init_level_set, str):\n if init_level_set == 'checkerboard':\n res = checkerboard_level_set(image_shape)\n elif init_level_set == 'circle':\n res = circle_level_set(image_shape)\n else:\n raise ValueError(\"`init_level_set` not in \"\n \"['checkerboard', 'circle']\")\n else:\n res = init_level_set\n return res\n",
"def callback(levelset):\n\n if ax1.collections:\n del ax1.collections[0]\n ax1.contour(levelset, [0.5], colors='r')\n ax_u.set_data(levelset)\n fig.canvas.draw()\n plt.pause(0.001)\n"
] | # -*- coding: utf-8 -*-
"""
====================
Morphological Snakes
====================
*Morphological Snakes* [1]_ are a family of methods for image segmentation.
Their behavior is similar to that of active contours (for example, *Geodesic
Active Contours* [2]_ or *Active Contours without Edges* [3]_). However,
*Morphological Snakes* use morphological operators (such as dilation or
erosion) over a binary array instead of solving PDEs over a floating point
array, which is the standard approach for active contours. This makes
*Morphological Snakes* faster and numerically more stable than their
traditional counterpart.
There are two *Morphological Snakes* methods available in this implementation:
*Morphological Geodesic Active Contours* (**MorphGAC**, implemented in the
function ``morphological_geodesic_active_contour``) and *Morphological Active
Contours without Edges* (**MorphACWE**, implemented in the function
``morphological_chan_vese``).
**MorphGAC** is suitable for images with visible contours, even when these
contours might be noisy, cluttered, or partially unclear. It requires, however,
that the image is preprocessed to highlight the contours. This can be done
using the function ``inverse_gaussian_gradient``, although the user might want
to define their own version. The quality of the **MorphGAC** segmentation
depends greatly on this preprocessing step.
On the contrary, **MorphACWE** works well when the pixel values of the inside
and the outside regions of the object to segment have different averages.
Unlike **MorphGAC**, **MorphACWE** does not require that the contours of the
object are well defined, and it works over the original image without any
preceding processing. This makes **MorphACWE** easier to use and tune than
**MorphGAC**.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela and Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
.. [2] Geodesic Active Contours, Vicent Caselles, Ron Kimmel and Guillermo
Sapiro. In International Journal of Computer Vision (IJCV), 1997,
DOI:10.1023/A:1007979827043
.. [3] Active Contours without Edges, Tony Chan and Luminita Vese. In IEEE
Transactions on Image Processing, 2001, DOI:10.1109/83.902291
"""
__author__ = "P. Márquez Neila <p.mneila@upm.es>"
from itertools import cycle
import numpy as np
from scipy import ndimage as ndi
__all__ = ['morphological_chan_vese',
'morphological_geodesic_active_contour',
'inverse_gaussian_gradient',
'circle_level_set',
'checkerboard_level_set'
]
__version__ = (2, 0, 1)
__version_str__ = ".".join(map(str, __version__))
class _fcycle(object):
def __init__(self, iterable):
"""Call functions from the iterable each time it is called."""
self.funcs = cycle(iterable)
def __call__(self, *args, **kwargs):
f = next(self.funcs)
return f(*args, **kwargs)
# SI and IS operators for 2D and 3D.
_P2 = [np.eye(3),
np.array([[0, 1, 0]] * 3),
np.flipud(np.eye(3)),
np.rot90([[0, 1, 0]] * 3)]
_P3 = [np.zeros((3, 3, 3)) for i in range(9)]
_P3[0][:, :, 1] = 1
_P3[1][:, 1, :] = 1
_P3[2][1, :, :] = 1
_P3[3][:, [0, 1, 2], [0, 1, 2]] = 1
_P3[4][:, [0, 1, 2], [2, 1, 0]] = 1
_P3[5][[0, 1, 2], :, [0, 1, 2]] = 1
_P3[6][[0, 1, 2], :, [2, 1, 0]] = 1
_P3[7][[0, 1, 2], [0, 1, 2], :] = 1
_P3[8][[0, 1, 2], [2, 1, 0], :] = 1
def sup_inf(u):
"""SI operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
erosions = []
for P_i in P:
erosions.append(ndi.binary_erosion(u, P_i))
return np.array(erosions, dtype=np.int8).max(0)
def inf_sup(u):
"""IS operator."""
if np.ndim(u) == 2:
P = _P2
elif np.ndim(u) == 3:
P = _P3
else:
raise ValueError("u has an invalid number of dimensions "
"(should be 2 or 3)")
dilations = []
for P_i in P:
dilations.append(ndi.binary_dilation(u, P_i))
return np.array(dilations, dtype=np.int8).min(0)
_curvop = _fcycle([lambda u: sup_inf(inf_sup(u)), # SIoIS
lambda u: inf_sup(sup_inf(u))]) # ISoSI
def _check_input(image, init_level_set):
"""Check that shapes of `image` and `init_level_set` match."""
if not image.ndim in [2, 3]:
raise ValueError("`image` must be a 2 or 3-dimensional array.")
if len(image.shape) != len(init_level_set.shape):
raise ValueError("The dimensions of the initial level set do not "
"match the dimensions of the image.")
def _init_level_set(init_level_set, image_shape):
"""Auxiliary function for initializing level sets with a string.
If `init_level_set` is not a string, it is returned as is.
"""
if isinstance(init_level_set, str):
if init_level_set == 'checkerboard':
res = checkerboard_level_set(image_shape)
elif init_level_set == 'circle':
res = circle_level_set(image_shape)
else:
raise ValueError("`init_level_set` not in "
"['checkerboard', 'circle']")
else:
res = init_level_set
return res
def circle_level_set(image_shape, center=None, radius=None):
"""Create a circle level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image
center : tuple of positive integers, optional
Coordinates of the center of the circle given in (row, column). If not
given, it defaults to the center of the image.
radius : float, optional
Radius of the circle. If not given, it is set to the 75% of the
smallest image dimension.
Returns
-------
out : array with shape `image_shape`
Binary level set of the circle with the given `radius` and `center`.
See also
--------
checkerboard_level_set
"""
if center is None:
center = tuple(i // 2 for i in image_shape)
if radius is None:
radius = min(image_shape) * 3.0 / 8.0
grid = np.mgrid[[slice(i) for i in image_shape]]
grid = (grid.T - center).T
phi = radius - np.sqrt(np.sum((grid)**2, 0))
res = np.int8(phi > 0)
return res
def checkerboard_level_set(image_shape, square_size=5):
"""Create a checkerboard level set with binary values.
Parameters
----------
image_shape : tuple of positive integers
Shape of the image.
square_size : int, optional
Size of the squares of the checkerboard. It defaults to 5.
Returns
-------
out : array with shape `image_shape`
Binary level set of the checkerboard.
See also
--------
circle_level_set
"""
grid = np.ogrid[[slice(i) for i in image_shape]]
grid = [(grid_i // square_size) & 1 for grid_i in grid]
checkerboard = np.bitwise_xor.reduce(grid, axis=0)
res = np.int8(checkerboard)
return res
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0):
"""Inverse of gradient magnitude.
Compute the magnitude of the gradients in the image and then inverts the
result in the range [0, 1]. Flat areas are assigned values close to 1,
while areas close to borders are assigned values close to 0.
This function or a similar one defined by the user should be applied over
the image as a preprocessing step before calling
`morphological_geodesic_active_contour`.
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume.
alpha : float, optional
Controls the steepness of the inversion. A larger value will make the
transition between the flat areas and border areas steeper in the
resulting array.
sigma : float, optional
Standard deviation of the Gaussian filter applied over the image.
Returns
-------
gimage : (M, N) or (L, M, N) array
Preprocessed image (or volume) suitable for
`morphological_geodesic_active_contour`.
"""
gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest')
return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def morphological_chan_vese(image, iterations, init_level_set='checkerboard',
smoothing=1, lambda1=1, lambda2=1,
iter_callback=lambda x: None):
"""Morphological Active Contours without Edges (MorphACWE)
Active contours without edges implemented with morphological operators. It
can be used to segment objects in images and volumes without well defined
borders. It is required that the inside of the object looks different on
average than the outside (i.e., the inner area of the object should be
darker or lighter than the outer area on average).
Parameters
----------
image : (M, N) or (L, M, N) array
Grayscale image or volume to be segmented.
iterations : uint
Number of iterations to run
init_level_set : str, (M, N) array, or (L, M, N) array
Initial level set. If an array is given, it will be binarized and used
as the initial level set. If a string is given, it defines the method
to generate a reasonable initial level set with the shape of the
`image`. Accepted values are 'checkerboard' and 'circle'. See the
documentation of `checkerboard_level_set` and `circle_level_set`
respectively for details about how these level sets are created.
smoothing : uint, optional
Number of times the smoothing operator is applied per iteration.
Reasonable values are around 1-4. Larger values lead to smoother
segmentations.
lambda1 : float, optional
Weight parameter for the outer region. If `lambda1` is larger than
`lambda2`, the outer region will contain a larger range of values than
the inner region.
lambda2 : float, optional
Weight parameter for the inner region. If `lambda2` is larger than
`lambda1`, the inner region will contain a larger range of values than
the outer region.
iter_callback : function, optional
If given, this function is called once per iteration with the current
level set as the only argument. This is useful for debugging or for
plotting intermediate results during the evolution.
Returns
-------
out : (M, N) or (L, M, N) array
Final segmentation (i.e., the final level set)
See also
--------
circle_level_set, checkerboard_level_set
Notes
-----
This is a version of the Chan-Vese algorithm that uses morphological
operators instead of solving a partial differential equation (PDE) for the
evolution of the contour. The set of morphological operators used in this
algorithm are proved to be infinitesimally equivalent to the Chan-Vese PDE
(see [1]_). However, morphological operators are do not suffer from the
numerical stability issues typically found in PDEs (it is not necessary to
find the right time step for the evolution), and are computationally
faster.
The algorithm and its theoretical derivation are described in [1]_.
References
----------
.. [1] A Morphological Approach to Curvature-based Evolution of Curves and
Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
Transactions on Pattern Analysis and Machine Intelligence (PAMI),
2014, DOI 10.1109/TPAMI.2013.106
"""
init_level_set = _init_level_set(init_level_set, image.shape)
_check_input(image, init_level_set)
u = np.int8(init_level_set > 0)
iter_callback(u)
for _ in range(iterations):
# inside = u > 0
# outside = u <= 0
c0 = (image * (1 - u)).sum() / float((1 - u).sum() + 1e-8)
c1 = (image * u).sum() / float(u.sum() + 1e-8)
# Image attachment
du = np.gradient(u)
abs_du = np.abs(du).sum(0)
aux = abs_du * (lambda1 * (image - c1)**2 - lambda2 * (image - c0)**2)
u[aux < 0] = 1
u[aux > 0] = 0
# Smoothing
for _ in range(smoothing):
u = _curvop(u)
iter_callback(u)
return u
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc_base._set_parameters | python | def _set_parameters(self, parameters):
nr_f = self.f.size
# sort out parameters
rho0, m, tau, c = self._sort_parameters(parameters)
newsize = (nr_f, len(m))
# rho0_resized = np.resize(rho0, newsize)
m_resized = np.resize(m, newsize)
tau_resized = np.resize(tau, newsize)
c_resized = np.resize(c, newsize)
omega = np.atleast_2d(2 * np.pi * self.f).T
self.w = np.resize(omega, (len(m), nr_f)).T
self.rho0 = rho0
self.m = m_resized
self.tau = tau_resized
self.c = c_resized
# compute some common terms
self.otc = (self.w * self.tau) ** self.c
self.otc2 = (self.w * self.tau) ** (2 * self.c)
self.ang = self.c * np.pi / 2.0 # rad
self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2 | Sort out the various possible parameter inputs and return a config
object (dict)
We have multiple input formats:
1) a list, tuple, or numpy.ndarray, containing the linear parameters
in the following order:
* for single term: rho0, m1, tau1, c1
* for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...
2) a dictionary with the entries "rho0", "m", "tau", "c"
2b) if the dictionary entries for "m", "tau", and "c" are lists, the
entries correspond to mulitple polarisazion terms | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L49-L88 | [
"def _sort_parameters(self, parameters):\n # type 1\n if isinstance(parameters, (list, tuple, np.ndarray)):\n pars = np.atleast_1d(parameters)\n nr_pars = int((pars.shape[0] - 1) / 3)\n\n rho0 = pars[0]\n m = pars[1:nr_pars + 1]\n tau = pars[nr_pars + 1: 2 * nr_pars + 1]\n c = pars[2 * nr_pars + 1:]\n\n elif isinstance(parameters, dict):\n rho0 = parameters['rho0']\n m = _make_list(parameters['m'])\n tau = _make_list(parameters['tau'])\n c = _make_list(parameters['c'])\n else:\n print(parameters)\n raise Exception('Input format not recognized')\n\n return rho0, m, tau, c\n"
] | class cc_base(object):
""" Base class for Cole-Cole objects (both resistivity and conductivity)
"""
def __init__(self, frequencies):
self.f = frequencies
def _sort_parameters(self, parameters):
# type 1
if isinstance(parameters, (list, tuple, np.ndarray)):
pars = np.atleast_1d(parameters)
nr_pars = int((pars.shape[0] - 1) / 3)
rho0 = pars[0]
m = pars[1:nr_pars + 1]
tau = pars[nr_pars + 1: 2 * nr_pars + 1]
c = pars[2 * nr_pars + 1:]
elif isinstance(parameters, dict):
rho0 = parameters['rho0']
m = _make_list(parameters['m'])
tau = _make_list(parameters['tau'])
c = _make_list(parameters['c'])
else:
print(parameters)
raise Exception('Input format not recognized')
return rho0, m, tau, c
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.response | python | def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response | r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L93-L116 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.dre_drho0 | python | def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result | r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L118-L142 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.dre_dlog10rho0 | python | def dre_dlog10rho0(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result | Compute partial derivative of real parts to log10(rho0) | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L144-L151 | [
"def dre_drho0(self, pars):\n r\"\"\" Compute partial derivative of real parts with respect to\n :math:`\\rho_0`\n\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial \\rho_0} = 1 -\n \\frac{m (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^c}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n\n Note that partial derivatives towards :math:`\\rho_0` are 1D, in\n contrast to the other parameter derivatives, which usually return 2D\n arrays!\n\n Returns\n -------\n dre_drho0: :class:`numpy.ndarray`\n Size N (nr of frequencies) array with the derivatives\n\n \"\"\"\n self._set_parameters(pars)\n numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)\n term = numerator / self.denom\n specs = np.sum(term, axis=1)\n\n result = 1 - specs\n return result\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.dre_dm | python | def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result | r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L153-L163 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.dim_dm | python | def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result | r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}` | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L257-L267 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.dim_dtau | python | def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result | r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}` | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L274-L299 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.dim_dc | python | def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result | r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}` | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L306-L334 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n * for single term: rho0, m1, tau1, c1\n * for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...\n\n 2) a dictionary with the entries \"rho0\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n rho0, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # rho0_resized = np.resize(rho0, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.rho0 = rho0\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J
|
m-weigand/sip_models | lib/sip_models/res/cc.py | cc.Jacobian_re_im | python | def Jacobian_re_im(self, pars):
r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars)
"""
partials = []
# partials.append(self.dre_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dre_drho0(pars)[:, np.newaxis])
partials.append(self.dre_dm(pars))
# partials.append(self.dre_dlog10tau(pars))
partials.append(self.dre_dtau(pars))
partials.append(self.dre_dc(pars))
# partials.append(self.dim_dlog10rho0(pars)[:, np.newaxis, :])
partials.append(self.dim_drho0(pars)[:, np.newaxis])
partials.append(self.dim_dm(pars))
# partials.append(self.dim_dlog10tau(pars))
partials.append(self.dim_dtau(pars))
partials.append(self.dim_dc(pars))
print('SHAPES')
for x in partials:
print(x.shape)
J = np.concatenate(partials, axis=1)
return J | r"""
:math:`J`
>>> import sip_models.res.cc as cc
>>> import numpy as np
>>> f = np.logspace(-3, 3, 20)
>>> pars = [100, 0.1, 0.04, 0.8]
>>> obj = cc.cc(f)
>>> J = obj.Jacobian_re_im(pars) | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L336-L367 | [
"def dre_dm(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial m} = - \\rho_0 m\n (\\omega \\tau)^c \\frac{(cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^c)}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n \"\"\"\n self._set_parameters(pars)\n numerator = -self.otc * (np.cos(self.ang) + self.otc)\n result = numerator / self.denom\n result *= self.rho0\n return result\n",
"def dre_dtau(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial \\tau} = \\rho_0\n \\frac{-m \\omega^c c \\tau^{c-1} cos(\\frac{c \\pi}{2} - m \\omega^{2 c} 2 c\n \\tau^{2c - 1}}{1 + 2 (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega\n \\tau)^{2 c}} +\n \\rho_0 \\frac{\\left[m (\\omega \\tau)^c (cos(\\frac{c \\pi}{2}) + (\\omega\n \\tau)^c) \\right] \\cdot \\left[ 2 \\omega^c c \\tau^{c-1} cos(\\frac{c\n \\pi}{2}) + 2 c \\omega^{2 c} \\tau^{2 c - 1}\\right]}{\\left[1 + 2 (\\omega\n \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \\\n (self.c - 1) *\\\n np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\\\n 2 * self.c * self.tau ** (2 * self.c - 1)\n term1 = nom1 / self.denom\n\n # term2\n nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\\\n (2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *\n np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *\n self.tau ** (2 * self.c - 1))\n term2 = nom2 / self.denom ** 2\n\n result = term1 + term2\n result *= self.rho0\n return result\n",
"def dre_dc(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial c} = \\rho_0\n \\frac{-m ln(\\omega \\tau) (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + m\n (\\omega\\tau)^c \\frac{\\pi}{2} sin(\\frac{c \\pi}{2}) + ln(\\omega\n \\tau)(\\omega \\tau)^c}{1 + 2 (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) +\n (\\omega \\tau)^{2 c}} +\n \\rho_0 \\frac{\\left[-m (\\omega \\tau)^c (cos(\\frac{c \\pi}{2}) + (\\omega\n \\tau)^c) \\right] \\cdot \\left[ -2 ln(\\omega \\tau) (\\omega \\tau)^c\n cos(\\frac{c \\pi}{2}) + 2 (\\omega \\tau)^c \\frac{\\pi}{2} cos(\\frac{c\n \\pi}{2} + 2 ln(\\omega \\tau) (\\omega \\tau)^{2 c}\\right]}{\\left[1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\\\n np.cos(self.ang) +\\\n self.m * self.otc * (np.pi / 2.0) *\\\n np.sin(self.ang) -\\\n 2 * self.m * np.log(self.w * self.tau) *\\\n self.otc2\n term1 = nom1 / self.denom\n\n # term2\n nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\\\n (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -\n 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +\n 2 * np.log(self.w * self.tau) * self.otc2)\n term2 = nom2 / self.denom ** 2\n\n result = term1 + term2\n result *= self.rho0\n return result\n",
"def dim_dm(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho''}(\\omega)}{\\partial m} = - \\rho_0 m\n (\\omega \\tau)^c \\frac{sin(\\frac{c \\pi}{2})}{1 + 2 (\\omega \\tau)^c\n cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n \"\"\"\n self._set_parameters(pars)\n numerator = -self.otc * np.sin(self.ang)\n result = numerator / self.denom\n result *= self.rho0\n return result\n",
"def dim_dtau(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho''}(\\omega)}{\\partial \\tau} = \\rho_0\n \\frac{-m \\omega^c c \\tau^{c-1} sin(\\frac{c \\pi}{2} }{1 + 2 (\\omega\n \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}} +\n \\rho_0 \\frac{\\left[-m (\\omega \\tau)^c sin(\\frac{c \\pi}{2}\n \\right] \\cdot \\left[ 2 \\omega^c c \\tau^{c-1} cos(\\frac{c\n \\pi}{2}) + 2 c \\omega^{2 c} \\tau^{2 c - 1}\\right]}{\\left[1 + 2 (\\omega\n \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\\\n self.c * self.tau ** (self.c - 1)\n term1 = nom1 / self.denom\n\n # term2\n nom2 = (self.m * self.otc * np.sin(self.ang)) *\\\n (2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *\n np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *\n self.tau ** (2 * self.c - 1))\n term2 = nom2 / self.denom ** 2\n\n result = term1 + term2\n result *= self.rho0\n return result\n",
"def dim_dc(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho''}(\\omega)}{\\partial c} = \\rho_0\n \\frac{-m sin(\\frac{c \\pi}{2}) ln(\\omega \\tau)(\\omega \\tau)^c - m\n (\\omega \\tau)^c \\frac{\\pi}{2} cos(\\frac{\\pi}{2}}{1 + 2 (\\omega \\tau)^c\n cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}} + \\rho_0 \\frac{\\left[-m\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) \\right] \\cdot \\left[ -2 ln(\\omega\n \\tau) (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + 2 (\\omega \\tau)^c\n \\frac{\\pi}{2} cos(\\frac{c \\pi}{2}) \\right] + \\left[2 ln(\\omega \\tau)\n (\\omega \\tau)^{2 c}\\right]}{\\left[1 + 2 (\\omega \\tau)^c cos(\\frac{c\n \\pi}{2}) + (\\omega \\tau)^{2 c}\\right]^2}`\n \"\"\"\n self._set_parameters(pars)\n # term1\n nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\\\n np.sin(self.ang)\n nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)\n term1 = (nom1a + nom1b) / self.denom\n\n # term2\n nom2 = (self.m * self.otc * np.sin(self.ang)) *\\\n (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -\n 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +\n 2 * np.log(self.w * self.tau) * self.otc2)\n term2 = nom2 / self.denom ** 2\n result = term1 + term2\n\n result *= self.rho0\n return result\n",
"def dre_drho0(self, pars):\n r\"\"\" Compute partial derivative of real parts with respect to\n :math:`\\rho_0`\n\n :math:`\\frac{\\partial \\hat{\\rho'}(\\omega)}{\\partial \\rho_0} = 1 -\n \\frac{m (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^c}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n\n Note that partial derivatives towards :math:`\\rho_0` are 1D, in\n contrast to the other parameter derivatives, which usually return 2D\n arrays!\n\n Returns\n -------\n dre_drho0: :class:`numpy.ndarray`\n Size N (nr of frequencies) array with the derivatives\n\n \"\"\"\n self._set_parameters(pars)\n numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)\n term = numerator / self.denom\n specs = np.sum(term, axis=1)\n\n result = 1 - specs\n return result\n",
"def dim_drho0(self, pars):\n r\"\"\"\n :math:`\\frac{\\partial \\hat{\\rho}''(\\omega)}{\\partial \\rho_0} = -\n \\frac{m (\\omega \\tau)^c sin(\\frac{c \\pi}{2})}{1 + 2\n (\\omega \\tau)^c cos(\\frac{c \\pi}{2}) + (\\omega \\tau)^{2 c}}`\n \"\"\"\n self._set_parameters(pars)\n\n result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,\n axis=1)\n\n return result\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Complex response of the Cole-Cole model::
:math:`\hat{\rho} = \rho_0 \left(1 - \sum_i m_i (1 - \frac{1}{1 + (j
\omega \tau_i)^c_i})\right)`
Parameters
----------
parameters: list or tuple or numpy.ndarray
Cole-Cole model parameters: rho0, m, tau, c (all linear)
Returns
-------
response: :class:`sip_models.sip_response.sip_response`
model response object
"""
# get a config object
self._set_parameters(parameters)
terms = self.m * (1 - (1 / (1 + (1j * self.w * self.tau) ** self.c)))
# sum up terms
specs = np.sum(terms, axis=1)
rcomplex = self.rho0 * (1 - specs)
response = sip_response.sip_response(self.f, rcomplex=rcomplex)
return response
def dre_drho0(self, pars):
r""" Compute partial derivative of real parts with respect to
:math:`\rho_0`
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \rho_0} = 1 -
\frac{m (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^c}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
Note that partial derivatives towards :math:`\rho_0` are 1D, in
contrast to the other parameter derivatives, which usually return 2D
arrays!
Returns
-------
dre_drho0: :class:`numpy.ndarray`
Size N (nr of frequencies) array with the derivatives
"""
self._set_parameters(pars)
numerator = self.m * self.otc * (np.cos(self.ang) + self.otc)
term = numerator / self.denom
specs = np.sum(term, axis=1)
result = 1 - specs
return result
def dre_dlog10rho0(self, pars):
"""Compute partial derivative of real parts to log10(rho0)
"""
# first call the linear response to set the parameters
linear_response = self.dre_drho0(pars)
result = np.log(10) * self.rho0 * linear_response
return result
def dre_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{(cos(\frac{c \pi}{2}) + (\omega \tau)^c)}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * (np.cos(self.ang) + self.otc)
result = numerator / self.denom
result *= self.rho0
return result
def dre_dlog10m(self, pars):
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} cos(\frac{c \pi}{2} - m \omega^{2 c} 2 c
\tau^{2c - 1}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega
\tau)^{2 c}} +
\rho_0 \frac{\left[m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * self.c * self.w ** self.c * self.tau ** \
(self.c - 1) *\
np.cos(self.ang) - self.m * self.w ** (2 * self.c) *\
2 * self.c * self.tau ** (2 * self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = self.m * self.otc * (np.cos(self.ang) + self.otc) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dre_dlog10tau(self, pars):
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho'}(\omega)}{\partial c} = \rho_0
\frac{-m ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + m
(\omega\tau)^c \frac{\pi}{2} sin(\frac{c \pi}{2}) + ln(\omega
\tau)(\omega \tau)^c}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) +
(\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c (cos(\frac{c \pi}{2}) + (\omega
\tau)^c) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c
cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c
\pi}{2} + 2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.log(self.w * self.tau) * self.otc *\
np.cos(self.ang) +\
self.m * self.otc * (np.pi / 2.0) *\
np.sin(self.ang) -\
2 * self.m * np.log(self.w * self.tau) *\
self.otc2
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * (np.cos(self.ang) + self.otc)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_drho0(self, pars):
r"""
:math:`\frac{\partial \hat{\rho}''(\omega)}{\partial \rho_0} = -
\frac{m (\omega \tau)^c sin(\frac{c \pi}{2})}{1 + 2
(\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10rho0(self, pars):
lin_resp = self.dim_drho0(pars)
result = np.log(10) * self.rho0 * lin_resp
return result
def dim_dm(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial m} = - \rho_0 m
(\omega \tau)^c \frac{sin(\frac{c \pi}{2})}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}}`
"""
self._set_parameters(pars)
numerator = -self.otc * np.sin(self.ang)
result = numerator / self.denom
result *= self.rho0
return result
def dim_dlog10m(self, pars):
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial \tau} = \rho_0
\frac{-m \omega^c c \tau^{c-1} sin(\frac{c \pi}{2} }{1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} +
\rho_0 \frac{\left[-m (\omega \tau)^c sin(\frac{c \pi}{2}
\right] \cdot \left[ 2 \omega^c c \tau^{c-1} cos(\frac{c
\pi}{2}) + 2 c \omega^{2 c} \tau^{2 c - 1}\right]}{\left[1 + 2 (\omega
\tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1 = - self.m * np.sin(self.ang) * self.w ** self.c *\
self.c * self.tau ** (self.c - 1)
term1 = nom1 / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * self.w ** self.c * self.c * self.tau ** (self.c - 1) *
np.cos(self.ang) + 2 * self.c * self.w ** (2 * self.c) *
self.tau ** (2 * self.c - 1))
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
def dim_dlog10tau(self, pars):
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0
\frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m
(\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c
cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m
(\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega
\tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c
\frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau)
(\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c
\pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
"""
self._set_parameters(pars)
# term1
nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\
np.sin(self.ang)
nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang)
term1 = (nom1a + nom1b) / self.denom
# term2
nom2 = (self.m * self.otc * np.sin(self.ang)) *\
(2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) -
2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) +
2 * np.log(self.w * self.tau) * self.otc2)
term2 = nom2 / self.denom ** 2
result = term1 + term2
result *= self.rho0
return result
|
m-weigand/sip_models | lib/sip_models/plot_helper.py | setup | python | def setup():
# Latex support can be activated using an environment variable, otherwise
# the default settings are:
# - for windows: off
# - else: on
use_latex = False
if('DD_USE_LATEX' in os.environ):
if os.environ['DD_USE_LATEX'] == '1':
use_latex = True
else:
if platform.system() == "Windows":
use_latex = False
else:
use_latex = True
already_loaded = 'matplotlib' in sys.modules
# just make sure we can access matplotlib as mpl
import matplotlib as mpl
if not already_loaded:
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# general settings
mpl.rcParams['font.size'] = 7.0
mpl.rcParams['axes.labelsize'] = 7.0
mpl.rcParams['xtick.labelsize'] = 7.0
mpl.rcParams['ytick.labelsize'] = 7.0
mpl.rcParams["lines.linewidth"] = 1.5
mpl.rcParams["lines.markeredgewidth"] = 3.0
mpl.rcParams["lines.markersize"] = 3.0
# mpl.rcParams['font.sans-serif'] = 'Droid Sans'
# mpl.rcParams['font.family'] = 'Open Sans'
# mpl.rcParams['font.weight'] = 400
mpl.rcParams['mathtext.default'] = 'regular'
# mpl.rcParams['font.family'] = 'Droid Sans'
if use_latex:
mpl.rcParams['text.usetex'] = True
mpl.rc(
'text.latex',
preamble=''.join((
# r'\usepackage{droidsans}',
# r'\usepackage[T1]{fontenc} ',
r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}',
r'\renewcommand\familydefault{\sfdefault} ',
# r'\usepackage{mathastext} '
))
)
else:
mpl.rcParams['text.usetex'] = False
import mpl_toolkits.axes_grid1 as axes_grid1
axes_grid1
return plt, mpl | import the matplotlib modules and set the style
Returns
-------
plt: pylab
imported pylab module
mpl: matplotlib module
imported matplotlib module | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/plot_helper.py#L13-L83 | null | # -*- coding: utf-8 -*-
""" Import all necessary matplotlib modules and set default options To use this
module, import it as:
import sip_models.plot_helper
plt, mpl = sip_models.plot_helper.setup()
"""
import os
import sys
import platform
def mpl_get_cb_bound_next_to_plot(ax):
"""
Return the coordinates for a colorbar axes next to the provided axes
object. Take into account the changes of the axes due to aspect ratio
settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
=======
cb_pos = mpl_get_cb_bound_next_to_plot(fig.axes[15])
ax1 = fig.add_axes(cb_pos, frame_on=True)
cmap = mpl.cm.jet_r
norm = mpl.colors.Normalize(vmin=float(23), vmax=float(33))
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm,
orientation='vertical')
cb1.locator = mpl.ticker.FixedLocator([23,28,33])
cb1.update_ticks()
cb1.ax.artists.remove(cb1.outline) # remove framei
"""
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
xdiff = (ax_size[2] - pb1[2]) / 2
ydiff = (ax_size[3] - pb1[3]) / 2
# the colorbar is set to 0.01 width
sizes = [ax_size[0] + xdiff + ax_size[2] + 0.01,
ax_size[1] + ydiff, 0.01, pb1[3]]
return sizes
def mpl_get_cb_bound_below_plot(ax):
"""
Return the coordinates for a colorbar axes below the provided axes object.
Take into account the changes of the axes due to aspect ratio settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
=======
"""
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
# the colorbar is set to 0.01 width
sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03]
return sizes
|
m-weigand/sip_models | lib/sip_models/plot_helper.py | mpl_get_cb_bound_below_plot | python | def mpl_get_cb_bound_below_plot(ax):
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
# the colorbar is set to 0.01 width
sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03]
return sizes | Return the coordinates for a colorbar axes below the provided axes object.
Take into account the changes of the axes due to aspect ratio settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
======= | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/plot_helper.py#L129-L154 | null | # -*- coding: utf-8 -*-
""" Import all necessary matplotlib modules and set default options To use this
module, import it as:
import sip_models.plot_helper
plt, mpl = sip_models.plot_helper.setup()
"""
import os
import sys
import platform
def setup():
"""import the matplotlib modules and set the style
Returns
-------
plt: pylab
imported pylab module
mpl: matplotlib module
imported matplotlib module
"""
# Latex support can be activated using an environment variable, otherwise
# the default settings are:
# - for windows: off
# - else: on
use_latex = False
if('DD_USE_LATEX' in os.environ):
if os.environ['DD_USE_LATEX'] == '1':
use_latex = True
else:
if platform.system() == "Windows":
use_latex = False
else:
use_latex = True
already_loaded = 'matplotlib' in sys.modules
# just make sure we can access matplotlib as mpl
import matplotlib as mpl
if not already_loaded:
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# general settings
mpl.rcParams['font.size'] = 7.0
mpl.rcParams['axes.labelsize'] = 7.0
mpl.rcParams['xtick.labelsize'] = 7.0
mpl.rcParams['ytick.labelsize'] = 7.0
mpl.rcParams["lines.linewidth"] = 1.5
mpl.rcParams["lines.markeredgewidth"] = 3.0
mpl.rcParams["lines.markersize"] = 3.0
# mpl.rcParams['font.sans-serif'] = 'Droid Sans'
# mpl.rcParams['font.family'] = 'Open Sans'
# mpl.rcParams['font.weight'] = 400
mpl.rcParams['mathtext.default'] = 'regular'
# mpl.rcParams['font.family'] = 'Droid Sans'
if use_latex:
mpl.rcParams['text.usetex'] = True
mpl.rc(
'text.latex',
preamble=''.join((
# r'\usepackage{droidsans}',
# r'\usepackage[T1]{fontenc} ',
r'\usepackage{sfmath} \renewcommand{\rmfamily}{\sffamily}',
r'\renewcommand\familydefault{\sfdefault} ',
# r'\usepackage{mathastext} '
))
)
else:
mpl.rcParams['text.usetex'] = False
import mpl_toolkits.axes_grid1 as axes_grid1
axes_grid1
return plt, mpl
def mpl_get_cb_bound_next_to_plot(ax):
"""
Return the coordinates for a colorbar axes next to the provided axes
object. Take into account the changes of the axes due to aspect ratio
settings.
Parts of this code are taken from the transforms.py file from matplotlib
Important: Use only AFTER fig.subplots_adjust(...)
Use as:
=======
cb_pos = mpl_get_cb_bound_next_to_plot(fig.axes[15])
ax1 = fig.add_axes(cb_pos, frame_on=True)
cmap = mpl.cm.jet_r
norm = mpl.colors.Normalize(vmin=float(23), vmax=float(33))
cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm,
orientation='vertical')
cb1.locator = mpl.ticker.FixedLocator([23,28,33])
cb1.update_ticks()
cb1.ax.artists.remove(cb1.outline) # remove framei
"""
position = ax.get_position()
figW, figH = ax.get_figure().get_size_inches()
fig_aspect = figH / figW
box_aspect = ax.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds
ax_size = ax.get_position().bounds
xdiff = (ax_size[2] - pb1[2]) / 2
ydiff = (ax_size[3] - pb1[3]) / 2
# the colorbar is set to 0.01 width
sizes = [ax_size[0] + xdiff + ax_size[2] + 0.01,
ax_size[1] + ydiff, 0.01, pb1[3]]
return sizes
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.response | python | def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response | r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L99-L122 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dre_dsigmai | python | def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L124-L133 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dre_dm | python | def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L141-L149 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dre_dtau | python | def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L157-L177 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dre_dc | python | def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L185-L208 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dim_dsigmai | python | def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L210-L218 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dim_dm | python | def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L226-L234 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dim_dtau | python | def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L242-L262 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
m-weigand/sip_models | lib/sip_models/cond/cc.py | cc.dim_dc | python | def dim_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\
* self.otc
num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2)
term1 = self.sigma0 * (-num1a - num1b) / self.denom
# term 2
num2a = -self.m * self.otc * np.cos(self.ang)
num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang)
num2d = 2 * np.log(self.w * self.tau) * self.otc2
numerator = num2a * (num2b + num2c) + num2d
term2 = self.sigma0 * numerator / (self.denom ** 2)
result = term1 + term2
return result | r"""
:math:Add formula | train | https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/cond/cc.py#L270-L291 | [
"def _set_parameters(self, parameters):\n \"\"\"Sort out the various possible parameter inputs and return a config\n object (dict)\n\n We have multiple input formats:\n\n 1) a list, tuple, or numpy.ndarray, containing the linear parameters\n in the following order:\n for single term: sigmai, m1, tau1, c1\n for multiple terms: sigmai, m1, m2, ..., tau1, tau2, ..., c1, c2,\n ...\n\n 2) a dictionary with the entries \"sigmai\", \"m\", \"tau\", \"c\"\n\n 2b) if the dictionary entries for \"m\", \"tau\", and \"c\" are lists, the\n entries correspond to mulitple polarisazion terms\n\n \"\"\"\n nr_f = self.f.size\n\n # sort out parameters\n sigmai, m, tau, c = self._sort_parameters(parameters)\n\n newsize = (nr_f, len(m))\n # sigmai_resized = np.resize(sigmai, newsize)\n m_resized = np.resize(m, newsize)\n tau_resized = np.resize(tau, newsize)\n c_resized = np.resize(c, newsize)\n\n omega = np.atleast_2d(2 * np.pi * self.f).T\n self.w = np.resize(omega, (len(m), nr_f)).T\n self.sigmai = sigmai\n self.m = m_resized\n self.tau = tau_resized\n self.c = c_resized\n self.sigma0 = (1 - self.m) * self.sigmai\n\n # compute some common terms\n self.otc = (self.w * self.tau) ** self.c\n self.otc1 = (self.w * self.tau) ** (self.c - 1)\n self.otc2 = (self.w * self.tau) ** (2 * self.c)\n self.ang = self.c * np.pi / 2.0 # rad\n # numerator and denominator\n self.num = 1 + self.otc * np.cos(self.ang)\n self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2\n"
] | class cc(cc_base):
def response(self, parameters):
r"""Return the forward response in base dimensions
:math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac
{m_i}{1 + (j \omega \tau_i)^c_i}\right)`
Parameters
----------
pars:
Returns
-------
response: Nx2 array, first axis denotes frequencies, seconds real and
imaginary parts
"""
# get a config object
self._set_parameters(parameters)
terms = self.m / (1 + (1j * self.w * self.tau) ** self.c)
# sum up terms
specs = np.sum(terms, axis=1)
ccomplex = self.sigmai * (1 - specs)
response = sip_response.sip_response(self.f, ccomplex=ccomplex)
return response
def dre_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.m * self.num / self.denom
specs = np.sum(terms, axis=1)
result = 1 - specs
return result
def dre_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
linear_response = self.dre_dsigmai(pars)
result = np.log(10) * self.sigmai * linear_response
return result
def dre_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
terms = self.num / self.denom
result = - self.sigmai * terms
return result
def dre_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dre_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = self.c * self.w * self.otc1 * np.cos(self.ang)
term1 = num1/self.denom
# term 2
num2a = self.otc * np.cos(self.ang)
num2b = 1 + num2a
denom2 = self.denom ** 2
term2 = num2b / denom2
# term 3
term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dre_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_response = self.dre_dtau(pars)
result = np.log(10) * self.tau * lin_response
return result
def dre_dc(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang)
num1b = self.otc * np.cos(self.ang) * np.pi / 2.0
term1 = (num1a + num1b) / self.denom
# term 2
num2 = self.otc * np.sin(self.c / np.pi) * 2
denom2 = self.denom ** 2
term2 = num2 / denom2
# term 3
num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang)
num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang)
num3c = 2 * np.log(self.w * self.tau) * self.otc2
term3 = num3a - num3b + num3c
result = self.sigmai * self.m * (term1 + term2 * term3)
return result
def dim_dsigmai(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom,
axis=1)
return result
def dim_dlog10sigmai(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dsigmai(pars)
result = np.log(10) * self.sigmai * lin_response
return result
def dim_dm(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
num1 = self.otc * np.sin(self.ang)
result = -self.sigmai * num1 / self.denom
return result
def dim_dlog10m(self, pars):
# first call the linear response to set the parameters
lin_response = self.dim_dm(pars)
result = np.log(10) * self.m * lin_response
return result
def dim_dtau(self, pars):
r"""
:math:Add formula
"""
self._set_parameters(pars)
# term 1
num1 = -self.m * (self.w ** self.c) * self.c\
* (self.tau ** (self.c - 1)) * np.sin(self.ang)
term1 = self.sigmai * num1 / self.denom
# term 2
num2a = -self.m * self.otc * np.sin(self.ang)
num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\
np.cos(self.ang)
num2c = 2 * self.c * (self.w ** (self.c * 2)) *\
(self.tau ** (2 * self.c - 1))
term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2)
result = term1 + term2
return result
def dim_dlog10tau(self, pars):
# first call the linear response to set the parameters
lin_resp = self.dim_dtau(pars)
result = np.log(10) * self.tau * lin_resp
return result
def test_derivatives(self):
parameters = {
'sigmai': 0.01,
'm': 0.1,
'tau': 0.04,
'c': 0.8
}
# parameters = {
# 'sigmai': 0.01,
# 'm': (0.15, 0.2),
# 'tau': (0.4, 0.004),
# 'c': (0.5, 0.8),
# }
print(self.dre_dsigmai(parameters))
print(self.dre_dm(parameters))
print(self.dre_dtau(parameters))
print(self.dre_dc(parameters))
print(self.dim_dsigmai(parameters))
print(self.dim_dm(parameters))
print(self.dim_dtau(parameters))
print(self.dim_dc(parameters))
|
prthkms/alex | alex/preprocess.py | QueryMatcher.calculate_inverse_document_frequencies | python | def calculate_inverse_document_frequencies(self):
for doc in self.processed_corpus:
for word in doc:
self.inverse_document_frequencies[word] += 1
for key,value in self.inverse_document_frequencies.iteritems():
idf = log((1.0 * len(self.corpus)) / value)
self.inverse_document_frequencies[key] = idf | Q.calculate_inverse_document_frequencies() -- measures how much
information the term provides, i.e. whether the term is common or
rare across all documents.
This is obtained by dividing the total number of documents
by the number of documents containing the term,
and then taking the logarithm of that quotient. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L26-L40 | null | class QueryMatcher(object):
"""This an implementation of tf-idf ranking
(term frequency - inverse document frequency) for information
retreival and text mining.
1. Each sentence in 'corpus.txt' acts as a document,
and the processed words in each sentence act as terms.
2. Frequently occuring stop words are removed.
3. Stemming is done on each word, i.e. reducing inflected or derived
words to their word stem, base or root form.
4. A new user query undergoes tf-idf ranking, and the highest
ranked sentence(document) is picked up and mapped to a category.
"""
def __init__(self):
super(QueryMatcher, self).__init__()
self.initialize()
def calculate_term_frequencies(self):
"""Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d.
"""
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc)
def initialize(self):
'''
corpus : contains a list of sentences, each of which acts as
a document
category : contains a category of each sentence in the corpus.
stemmer : imported from the nltk library, used for reducing
words to their root form.
'''
self.stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'between', 'into','to', 'during', 'before',
'after', 'above', 'below', 'from', 'up', 'down', 'in', 'on', 'under',
'again', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some',
'such', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now']
ALEX_DIR = os.path.join(os.path.expanduser('~'),'alex')
#ALEX_DIR = '/home/pratheek/work/git_repos/alex/alex'
#ALEX_DIR = '/home/chitra/aost/alex/alex'
#ALEX_DIR = '/home/anushree/aost/alex/alex'
self.category = open(os.path.join(ALEX_DIR,'category.txt'))
self.corpus = open(os.path.join(ALEX_DIR,'corpus.txt'))
self.corpus_list = self.corpus.readlines()
self.category_list = self.category.readlines()
self.corpus.seek(0)
self.corpus = self.corpus.read()
self.processed_corpus = []
self.punctuation = [',', '.', '?', '!']
self.stemmer = PorterStemmer()
self.inverse_document_frequencies = defaultdict(float)
self.term_frequencies = []
#--------------------------------------
self.process_corpus()
self.calculate_inverse_document_frequencies()
self.calculate_term_frequencies()
def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence)
def process_query(self):
"""Q.process_query() -- processes the user query,
by tokenizing and stemming words.
"""
self.query = wt(self.query)
self.processed_query = []
for word in self.query:
if word not in self.stop_words and word not in self.punctuation:
self.processed_query.append(self.stemmer.stem(word))
def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip()
|
prthkms/alex | alex/preprocess.py | QueryMatcher.calculate_term_frequencies | python | def calculate_term_frequencies(self):
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc) | Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L42-L53 | null | class QueryMatcher(object):
"""This an implementation of tf-idf ranking
(term frequency - inverse document frequency) for information
retreival and text mining.
1. Each sentence in 'corpus.txt' acts as a document,
and the processed words in each sentence act as terms.
2. Frequently occuring stop words are removed.
3. Stemming is done on each word, i.e. reducing inflected or derived
words to their word stem, base or root form.
4. A new user query undergoes tf-idf ranking, and the highest
ranked sentence(document) is picked up and mapped to a category.
"""
def __init__(self):
super(QueryMatcher, self).__init__()
self.initialize()
def calculate_inverse_document_frequencies(self):
"""Q.calculate_inverse_document_frequencies() -- measures how much
information the term provides, i.e. whether the term is common or
rare across all documents.
This is obtained by dividing the total number of documents
by the number of documents containing the term,
and then taking the logarithm of that quotient.
"""
for doc in self.processed_corpus:
for word in doc:
self.inverse_document_frequencies[word] += 1
for key,value in self.inverse_document_frequencies.iteritems():
idf = log((1.0 * len(self.corpus)) / value)
self.inverse_document_frequencies[key] = idf
def calculate_term_frequencies(self):
"""Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d.
"""
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc)
def initialize(self):
'''
corpus : contains a list of sentences, each of which acts as
a document
category : contains a category of each sentence in the corpus.
stemmer : imported from the nltk library, used for reducing
words to their root form.
'''
self.stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'between', 'into','to', 'during', 'before',
'after', 'above', 'below', 'from', 'up', 'down', 'in', 'on', 'under',
'again', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some',
'such', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now']
ALEX_DIR = os.path.join(os.path.expanduser('~'),'alex')
#ALEX_DIR = '/home/pratheek/work/git_repos/alex/alex'
#ALEX_DIR = '/home/chitra/aost/alex/alex'
#ALEX_DIR = '/home/anushree/aost/alex/alex'
self.category = open(os.path.join(ALEX_DIR,'category.txt'))
self.corpus = open(os.path.join(ALEX_DIR,'corpus.txt'))
self.corpus_list = self.corpus.readlines()
self.category_list = self.category.readlines()
self.corpus.seek(0)
self.corpus = self.corpus.read()
self.processed_corpus = []
self.punctuation = [',', '.', '?', '!']
self.stemmer = PorterStemmer()
self.inverse_document_frequencies = defaultdict(float)
self.term_frequencies = []
#--------------------------------------
self.process_corpus()
self.calculate_inverse_document_frequencies()
self.calculate_term_frequencies()
def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence)
def process_query(self):
"""Q.process_query() -- processes the user query,
by tokenizing and stemming words.
"""
self.query = wt(self.query)
self.processed_query = []
for word in self.query:
if word not in self.stop_words and word not in self.punctuation:
self.processed_query.append(self.stemmer.stem(word))
def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip()
|
prthkms/alex | alex/preprocess.py | QueryMatcher.match_query_to_corpus | python | def match_query_to_corpus(self):
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index | Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L104-L121 | null | class QueryMatcher(object):
"""This an implementation of tf-idf ranking
(term frequency - inverse document frequency) for information
retreival and text mining.
1. Each sentence in 'corpus.txt' acts as a document,
and the processed words in each sentence act as terms.
2. Frequently occuring stop words are removed.
3. Stemming is done on each word, i.e. reducing inflected or derived
words to their word stem, base or root form.
4. A new user query undergoes tf-idf ranking, and the highest
ranked sentence(document) is picked up and mapped to a category.
"""
def __init__(self):
super(QueryMatcher, self).__init__()
self.initialize()
def calculate_inverse_document_frequencies(self):
"""Q.calculate_inverse_document_frequencies() -- measures how much
information the term provides, i.e. whether the term is common or
rare across all documents.
This is obtained by dividing the total number of documents
by the number of documents containing the term,
and then taking the logarithm of that quotient.
"""
for doc in self.processed_corpus:
for word in doc:
self.inverse_document_frequencies[word] += 1
for key,value in self.inverse_document_frequencies.iteritems():
idf = log((1.0 * len(self.corpus)) / value)
self.inverse_document_frequencies[key] = idf
def calculate_term_frequencies(self):
"""Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d.
"""
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc)
def initialize(self):
'''
corpus : contains a list of sentences, each of which acts as
a document
category : contains a category of each sentence in the corpus.
stemmer : imported from the nltk library, used for reducing
words to their root form.
'''
self.stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'between', 'into','to', 'during', 'before',
'after', 'above', 'below', 'from', 'up', 'down', 'in', 'on', 'under',
'again', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some',
'such', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now']
ALEX_DIR = os.path.join(os.path.expanduser('~'),'alex')
#ALEX_DIR = '/home/pratheek/work/git_repos/alex/alex'
#ALEX_DIR = '/home/chitra/aost/alex/alex'
#ALEX_DIR = '/home/anushree/aost/alex/alex'
self.category = open(os.path.join(ALEX_DIR,'category.txt'))
self.corpus = open(os.path.join(ALEX_DIR,'corpus.txt'))
self.corpus_list = self.corpus.readlines()
self.category_list = self.category.readlines()
self.corpus.seek(0)
self.corpus = self.corpus.read()
self.processed_corpus = []
self.punctuation = [',', '.', '?', '!']
self.stemmer = PorterStemmer()
self.inverse_document_frequencies = defaultdict(float)
self.term_frequencies = []
#--------------------------------------
self.process_corpus()
self.calculate_inverse_document_frequencies()
self.calculate_term_frequencies()
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence)
def process_query(self):
"""Q.process_query() -- processes the user query,
by tokenizing and stemming words.
"""
self.query = wt(self.query)
self.processed_query = []
for word in self.query:
if word not in self.stop_words and word not in self.punctuation:
self.processed_query.append(self.stemmer.stem(word))
def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip()
|
prthkms/alex | alex/preprocess.py | QueryMatcher.process_corpus | python | def process_corpus(self):
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence) | Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L125-L136 | null | class QueryMatcher(object):
"""This an implementation of tf-idf ranking
(term frequency - inverse document frequency) for information
retreival and text mining.
1. Each sentence in 'corpus.txt' acts as a document,
and the processed words in each sentence act as terms.
2. Frequently occuring stop words are removed.
3. Stemming is done on each word, i.e. reducing inflected or derived
words to their word stem, base or root form.
4. A new user query undergoes tf-idf ranking, and the highest
ranked sentence(document) is picked up and mapped to a category.
"""
def __init__(self):
super(QueryMatcher, self).__init__()
self.initialize()
def calculate_inverse_document_frequencies(self):
"""Q.calculate_inverse_document_frequencies() -- measures how much
information the term provides, i.e. whether the term is common or
rare across all documents.
This is obtained by dividing the total number of documents
by the number of documents containing the term,
and then taking the logarithm of that quotient.
"""
for doc in self.processed_corpus:
for word in doc:
self.inverse_document_frequencies[word] += 1
for key,value in self.inverse_document_frequencies.iteritems():
idf = log((1.0 * len(self.corpus)) / value)
self.inverse_document_frequencies[key] = idf
def calculate_term_frequencies(self):
"""Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d.
"""
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc)
def initialize(self):
'''
corpus : contains a list of sentences, each of which acts as
a document
category : contains a category of each sentence in the corpus.
stemmer : imported from the nltk library, used for reducing
words to their root form.
'''
self.stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'between', 'into','to', 'during', 'before',
'after', 'above', 'below', 'from', 'up', 'down', 'in', 'on', 'under',
'again', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some',
'such', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now']
ALEX_DIR = os.path.join(os.path.expanduser('~'),'alex')
#ALEX_DIR = '/home/pratheek/work/git_repos/alex/alex'
#ALEX_DIR = '/home/chitra/aost/alex/alex'
#ALEX_DIR = '/home/anushree/aost/alex/alex'
self.category = open(os.path.join(ALEX_DIR,'category.txt'))
self.corpus = open(os.path.join(ALEX_DIR,'corpus.txt'))
self.corpus_list = self.corpus.readlines()
self.category_list = self.category.readlines()
self.corpus.seek(0)
self.corpus = self.corpus.read()
self.processed_corpus = []
self.punctuation = [',', '.', '?', '!']
self.stemmer = PorterStemmer()
self.inverse_document_frequencies = defaultdict(float)
self.term_frequencies = []
#--------------------------------------
self.process_corpus()
self.calculate_inverse_document_frequencies()
self.calculate_term_frequencies()
def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index
def process_query(self):
"""Q.process_query() -- processes the user query,
by tokenizing and stemming words.
"""
self.query = wt(self.query)
self.processed_query = []
for word in self.query:
if word not in self.stop_words and word not in self.punctuation:
self.processed_query.append(self.stemmer.stem(word))
def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip()
|
prthkms/alex | alex/preprocess.py | QueryMatcher.process_query | python | def process_query(self):
self.query = wt(self.query)
self.processed_query = []
for word in self.query:
if word not in self.stop_words and word not in self.punctuation:
self.processed_query.append(self.stemmer.stem(word)) | Q.process_query() -- processes the user query,
by tokenizing and stemming words. | train | https://github.com/prthkms/alex/blob/79d3167c877e94cc07db0aab55a35857fac67ef7/alex/preprocess.py#L139-L147 | null | class QueryMatcher(object):
"""This an implementation of tf-idf ranking
(term frequency - inverse document frequency) for information
retreival and text mining.
1. Each sentence in 'corpus.txt' acts as a document,
and the processed words in each sentence act as terms.
2. Frequently occuring stop words are removed.
3. Stemming is done on each word, i.e. reducing inflected or derived
words to their word stem, base or root form.
4. A new user query undergoes tf-idf ranking, and the highest
ranked sentence(document) is picked up and mapped to a category.
"""
def __init__(self):
super(QueryMatcher, self).__init__()
self.initialize()
def calculate_inverse_document_frequencies(self):
"""Q.calculate_inverse_document_frequencies() -- measures how much
information the term provides, i.e. whether the term is common or
rare across all documents.
This is obtained by dividing the total number of documents
by the number of documents containing the term,
and then taking the logarithm of that quotient.
"""
for doc in self.processed_corpus:
for word in doc:
self.inverse_document_frequencies[word] += 1
for key,value in self.inverse_document_frequencies.iteritems():
idf = log((1.0 * len(self.corpus)) / value)
self.inverse_document_frequencies[key] = idf
def calculate_term_frequencies(self):
"""Q.calculate_term_frequencies() -- calculate the number of times
each term t occurs in document d.
"""
for doc in self.processed_corpus:
term_frequency_doc = defaultdict(int)
for word in doc:
term_frequency_doc[word] += 1
for key,value in term_frequency_doc.iteritems():
term_frequency_doc[key] = (1.0 * value) / len(doc)
self.term_frequencies.append(term_frequency_doc)
def initialize(self):
'''
corpus : contains a list of sentences, each of which acts as
a document
category : contains a category of each sentence in the corpus.
stemmer : imported from the nltk library, used for reducing
words to their root form.
'''
self.stop_words = ['i', 'me', 'my', 'myself', 'we', 'our', 'ours',
'ourselves', 'you', 'your', 'yours', 'yourself', 'yourselves',
'he', 'him', 'his', 'himself', 'she', 'her', 'hers', 'herself',
'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those',
'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has',
'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and',
'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'between', 'into','to', 'during', 'before',
'after', 'above', 'below', 'from', 'up', 'down', 'in', 'on', 'under',
'again', 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how',
'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some',
'such', 'nor', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's',
't', 'can', 'will', 'just', 'don', 'should', 'now']
ALEX_DIR = os.path.join(os.path.expanduser('~'),'alex')
#ALEX_DIR = '/home/pratheek/work/git_repos/alex/alex'
#ALEX_DIR = '/home/chitra/aost/alex/alex'
#ALEX_DIR = '/home/anushree/aost/alex/alex'
self.category = open(os.path.join(ALEX_DIR,'category.txt'))
self.corpus = open(os.path.join(ALEX_DIR,'corpus.txt'))
self.corpus_list = self.corpus.readlines()
self.category_list = self.category.readlines()
self.corpus.seek(0)
self.corpus = self.corpus.read()
self.processed_corpus = []
self.punctuation = [',', '.', '?', '!']
self.stemmer = PorterStemmer()
self.inverse_document_frequencies = defaultdict(float)
self.term_frequencies = []
#--------------------------------------
self.process_corpus()
self.calculate_inverse_document_frequencies()
self.calculate_term_frequencies()
def match_query_to_corpus(self):
"""Q.match_query_to_corpus() -> index -- return the matched corpus
index of the user query
"""
ranking = []
for i,doc in enumerate(self.processed_corpus):
rank = 0.0
for word in self.processed_query:
if word in doc:
rank += self.term_frequencies[i][word] * self.inverse_document_frequencies[word]
ranking.append((rank,i))
matching_corpus_index = 0
max_rank = 0
for rank,index in ranking:
if rank > max_rank:
matching_corpus_index = index
max_rank = rank
return matching_corpus_index
def process_corpus(self):
"""Q.process_corpus() -- processes the queries defined by us,
by tokenizing, stemming, and removing stop words.
"""
for doc in self.corpus_list:
doc = wt(doc)
sentence = []
for word in doc:
if word not in self.stop_words and word not in self.punctuation:
word = self.stemmer.stem(word)
sentence.append(word)
self.processed_corpus.append(sentence)
def query(self, query):
"""Q.query(query string) -> category string -- return the matched
category for any user query
"""
self.query = query
self.process_query()
matching_corpus_index = self.match_query_to_corpus()
return self.category_list[matching_corpus_index].strip()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.