repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
sdispater/cleo | cleo/commands/command.py | Command.spin | python | def spin(self, start_message, end_message, fmt=None, interval=100, values=None):
spinner = ProgressIndicator(self.io, fmt, interval, values)
return spinner.auto(start_message, end_message) | Automatically spin a progress indicator. | train | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L290-L296 | null | class Command(BaseCommand):
signature = None
validation = None
TABLE_STYLES = {
"ascii": TableStyle.ascii(),
"borderless": TableStyle.borderless(),
"solid": TableStyle.solid(),
"compact": TableStyle.compact(),
}
def __init__(self):
self._args = Args(ArgsFormat())
self._io = None
self._command = None
super(Command, self).__init__()
doc = self.__doc__ or super(self.__class__, self).__doc__
if doc:
self._parse_doc(doc)
if not self.signature:
parent = super(self.__class__, self)
if hasattr(parent, "signature"):
self.signature = parent.signature
if self.signature:
self._configure_using_fluent_definition()
self._config.set_handler_method("wrap_handle")
@property
def io(self): # type: () -> ConsoleIO
return self._io
def _parse_doc(self, doc):
doc = doc.strip().split("\n", 1)
if len(doc) > 1:
self._config.set_description(doc[0].strip())
self.signature = re.sub(r"\s{2,}", " ", doc[1].strip())
else:
self._config.set_description(doc[0].strip())
def _configure_using_fluent_definition(self):
"""
Configure the console command using a fluent definition.
"""
definition = Parser.parse(self.signature)
self._config.set_name(definition["name"])
for name, flags, description, default in definition["arguments"]:
self._config.add_argument(name, flags, description, default)
for long_name, short_name, flags, description, default in definition["options"]:
self._config.add_option(long_name, short_name, flags, description, default)
def wrap_handle(
self, args, io, command
): # type: (Args, IO, CliKitCommand) -> Optional[int]
self._args = args
self._io = io
self._command = command
return self.handle()
def handle(self): # type: () -> Optional[int]
"""
Executes the command.
"""
raise NotImplementedError()
def call(self, name, args=None): # type: (str, Optional[str]) -> int
"""
Call another command.
"""
if args is None:
args = ""
args = StringArgs(args)
command = self.application.get_command(name)
return command.run(args, self.io)
def call_silent(self, name, args=None): # type: (str, Optional[str]) -> int
"""
Call another command.
"""
if args is None:
args = ""
args = StringArgs(args)
command = self.application.get_command(name)
return command.run(args, NullIO())
def argument(self, key=None):
"""
Get the value of a command argument.
"""
if key is None:
return self._args.arguments()
return self._args.argument(key)
def option(self, key=None):
"""
Get the value of a command option.
"""
if key is None:
return self._args.options()
return self._args.option(key)
def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex)
def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question)
return self._io.ask(question, default)
def secret(self, question):
"""
Prompt the user for input but hide the answer from the console.
"""
return self._io.ask_hidden(question)
def choice(self, question, choices, default=None, attempts=None, multiple=False):
"""
Give the user a single choice from an list of answers.
"""
question = ChoiceQuestion(question, choices, default)
question.set_max_attempts(attempts)
question.set_multi_select(multiple)
return self._io.ask_question(question)
def create_question(self, question, type=None, **kwargs):
"""
Returns a Question of specified type.
"""
if not type:
return Question(question, **kwargs)
if type == "choice":
return ChoiceQuestion(question, **kwargs)
if type == "confirmation":
return ConfirmationQuestion(question, **kwargs)
def table(self, header=None, rows=None, style=None):
"""
Return a Table instance.
"""
if style is not None:
style = self.TABLE_STYLES[style]
table = Table(style)
if header:
table.set_header_row(header)
if rows:
table.set_rows(rows)
return table
def render_table(self, headers, rows, style=None):
"""
Format input to textual table.
"""
table = self.table(headers, rows, style)
table.render(self._io)
def write(self, text, style=None):
"""
Writes a string without a new line.
Useful if you want to use overwrite().
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write(styled)
def line(self, text, style=None, verbosity=None):
"""
Write a string as information output.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write_line(styled, verbosity)
def line_error(self, text, style=None, verbosity=None):
"""
Write a string as information output to stderr.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.error_line(styled, verbosity)
def info(self, text):
"""
Write a string as information output.
:param text: The line to write
:type text: str
"""
self.line(text, "info")
def comment(self, text):
"""
Write a string as comment output.
:param text: The line to write
:type text: str
"""
self.line(text, "comment")
def question(self, text):
"""
Write a string as question output.
:param text: The line to write
:type text: str
"""
self.line(text, "question")
def progress_bar(self, max=0):
"""
Creates a new progress bar
:param max: The maximum number of steps
:type max: int
:rtype: ProgressBar
"""
return self._io.progress_bar(max)
def progress_indicator(self, fmt=None, interval=100, values=None):
"""
Creates a new progress indicator.
"""
return ProgressIndicator(self.io, fmt, interval, values)
def add_style(self, name, fg=None, bg=None, options=None):
"""
Adds a new style
"""
style = Style(name)
if fg is not None:
style.fg(fg)
if bg is not None:
style.bg(bg)
if options is not None:
if "bold" in options:
style.bold()
if "underline" in options:
style.underlined()
self._io.output.formatter.add_style(style)
self._io.error_output.formatter.add_style(style)
def overwrite(self, text, size=None):
"""
Overwrites the current line.
It will not add a new line so use line('')
if necessary.
"""
self._io.overwrite(text, size=size)
|
sdispater/cleo | cleo/commands/command.py | Command.add_style | python | def add_style(self, name, fg=None, bg=None, options=None):
style = Style(name)
if fg is not None:
style.fg(fg)
if bg is not None:
style.bg(bg)
if options is not None:
if "bold" in options:
style.bold()
if "underline" in options:
style.underlined()
self._io.output.formatter.add_style(style)
self._io.error_output.formatter.add_style(style) | Adds a new style | train | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L298-L317 | null | class Command(BaseCommand):
signature = None
validation = None
TABLE_STYLES = {
"ascii": TableStyle.ascii(),
"borderless": TableStyle.borderless(),
"solid": TableStyle.solid(),
"compact": TableStyle.compact(),
}
def __init__(self):
self._args = Args(ArgsFormat())
self._io = None
self._command = None
super(Command, self).__init__()
doc = self.__doc__ or super(self.__class__, self).__doc__
if doc:
self._parse_doc(doc)
if not self.signature:
parent = super(self.__class__, self)
if hasattr(parent, "signature"):
self.signature = parent.signature
if self.signature:
self._configure_using_fluent_definition()
self._config.set_handler_method("wrap_handle")
@property
def io(self): # type: () -> ConsoleIO
return self._io
def _parse_doc(self, doc):
doc = doc.strip().split("\n", 1)
if len(doc) > 1:
self._config.set_description(doc[0].strip())
self.signature = re.sub(r"\s{2,}", " ", doc[1].strip())
else:
self._config.set_description(doc[0].strip())
def _configure_using_fluent_definition(self):
"""
Configure the console command using a fluent definition.
"""
definition = Parser.parse(self.signature)
self._config.set_name(definition["name"])
for name, flags, description, default in definition["arguments"]:
self._config.add_argument(name, flags, description, default)
for long_name, short_name, flags, description, default in definition["options"]:
self._config.add_option(long_name, short_name, flags, description, default)
def wrap_handle(
self, args, io, command
): # type: (Args, IO, CliKitCommand) -> Optional[int]
self._args = args
self._io = io
self._command = command
return self.handle()
def handle(self): # type: () -> Optional[int]
"""
Executes the command.
"""
raise NotImplementedError()
def call(self, name, args=None): # type: (str, Optional[str]) -> int
"""
Call another command.
"""
if args is None:
args = ""
args = StringArgs(args)
command = self.application.get_command(name)
return command.run(args, self.io)
def call_silent(self, name, args=None): # type: (str, Optional[str]) -> int
"""
Call another command.
"""
if args is None:
args = ""
args = StringArgs(args)
command = self.application.get_command(name)
return command.run(args, NullIO())
def argument(self, key=None):
"""
Get the value of a command argument.
"""
if key is None:
return self._args.arguments()
return self._args.argument(key)
def option(self, key=None):
"""
Get the value of a command option.
"""
if key is None:
return self._args.options()
return self._args.option(key)
def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex)
def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question)
return self._io.ask(question, default)
def secret(self, question):
"""
Prompt the user for input but hide the answer from the console.
"""
return self._io.ask_hidden(question)
def choice(self, question, choices, default=None, attempts=None, multiple=False):
"""
Give the user a single choice from an list of answers.
"""
question = ChoiceQuestion(question, choices, default)
question.set_max_attempts(attempts)
question.set_multi_select(multiple)
return self._io.ask_question(question)
def create_question(self, question, type=None, **kwargs):
"""
Returns a Question of specified type.
"""
if not type:
return Question(question, **kwargs)
if type == "choice":
return ChoiceQuestion(question, **kwargs)
if type == "confirmation":
return ConfirmationQuestion(question, **kwargs)
def table(self, header=None, rows=None, style=None):
"""
Return a Table instance.
"""
if style is not None:
style = self.TABLE_STYLES[style]
table = Table(style)
if header:
table.set_header_row(header)
if rows:
table.set_rows(rows)
return table
def render_table(self, headers, rows, style=None):
"""
Format input to textual table.
"""
table = self.table(headers, rows, style)
table.render(self._io)
def write(self, text, style=None):
"""
Writes a string without a new line.
Useful if you want to use overwrite().
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write(styled)
def line(self, text, style=None, verbosity=None):
"""
Write a string as information output.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write_line(styled, verbosity)
def line_error(self, text, style=None, verbosity=None):
"""
Write a string as information output to stderr.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.error_line(styled, verbosity)
def info(self, text):
"""
Write a string as information output.
:param text: The line to write
:type text: str
"""
self.line(text, "info")
def comment(self, text):
"""
Write a string as comment output.
:param text: The line to write
:type text: str
"""
self.line(text, "comment")
def question(self, text):
"""
Write a string as question output.
:param text: The line to write
:type text: str
"""
self.line(text, "question")
def progress_bar(self, max=0):
"""
Creates a new progress bar
:param max: The maximum number of steps
:type max: int
:rtype: ProgressBar
"""
return self._io.progress_bar(max)
def progress_indicator(self, fmt=None, interval=100, values=None):
"""
Creates a new progress indicator.
"""
return ProgressIndicator(self.io, fmt, interval, values)
def spin(self, start_message, end_message, fmt=None, interval=100, values=None):
"""
Automatically spin a progress indicator.
"""
spinner = ProgressIndicator(self.io, fmt, interval, values)
return spinner.auto(start_message, end_message)
def overwrite(self, text, size=None):
"""
Overwrites the current line.
It will not add a new line so use line('')
if necessary.
"""
self._io.overwrite(text, size=size)
|
sdispater/cleo | cleo/commands/command.py | Command.overwrite | python | def overwrite(self, text, size=None):
self._io.overwrite(text, size=size) | Overwrites the current line.
It will not add a new line so use line('')
if necessary. | train | https://github.com/sdispater/cleo/blob/cf44ac2eba2d6435516501e47e5521ee2da9115a/cleo/commands/command.py#L319-L326 | null | class Command(BaseCommand):
signature = None
validation = None
TABLE_STYLES = {
"ascii": TableStyle.ascii(),
"borderless": TableStyle.borderless(),
"solid": TableStyle.solid(),
"compact": TableStyle.compact(),
}
def __init__(self):
self._args = Args(ArgsFormat())
self._io = None
self._command = None
super(Command, self).__init__()
doc = self.__doc__ or super(self.__class__, self).__doc__
if doc:
self._parse_doc(doc)
if not self.signature:
parent = super(self.__class__, self)
if hasattr(parent, "signature"):
self.signature = parent.signature
if self.signature:
self._configure_using_fluent_definition()
self._config.set_handler_method("wrap_handle")
@property
def io(self): # type: () -> ConsoleIO
return self._io
def _parse_doc(self, doc):
doc = doc.strip().split("\n", 1)
if len(doc) > 1:
self._config.set_description(doc[0].strip())
self.signature = re.sub(r"\s{2,}", " ", doc[1].strip())
else:
self._config.set_description(doc[0].strip())
def _configure_using_fluent_definition(self):
"""
Configure the console command using a fluent definition.
"""
definition = Parser.parse(self.signature)
self._config.set_name(definition["name"])
for name, flags, description, default in definition["arguments"]:
self._config.add_argument(name, flags, description, default)
for long_name, short_name, flags, description, default in definition["options"]:
self._config.add_option(long_name, short_name, flags, description, default)
def wrap_handle(
self, args, io, command
): # type: (Args, IO, CliKitCommand) -> Optional[int]
self._args = args
self._io = io
self._command = command
return self.handle()
def handle(self): # type: () -> Optional[int]
"""
Executes the command.
"""
raise NotImplementedError()
def call(self, name, args=None): # type: (str, Optional[str]) -> int
"""
Call another command.
"""
if args is None:
args = ""
args = StringArgs(args)
command = self.application.get_command(name)
return command.run(args, self.io)
def call_silent(self, name, args=None): # type: (str, Optional[str]) -> int
"""
Call another command.
"""
if args is None:
args = ""
args = StringArgs(args)
command = self.application.get_command(name)
return command.run(args, NullIO())
def argument(self, key=None):
"""
Get the value of a command argument.
"""
if key is None:
return self._args.arguments()
return self._args.argument(key)
def option(self, key=None):
"""
Get the value of a command option.
"""
if key is None:
return self._args.options()
return self._args.option(key)
def confirm(self, question, default=False, true_answer_regex="(?i)^y"):
"""
Confirm a question with the user.
"""
return self._io.confirm(question, default, true_answer_regex)
def ask(self, question, default=None):
"""
Prompt the user for input.
"""
if isinstance(question, Question):
return self._io.ask_question(question)
return self._io.ask(question, default)
def secret(self, question):
"""
Prompt the user for input but hide the answer from the console.
"""
return self._io.ask_hidden(question)
def choice(self, question, choices, default=None, attempts=None, multiple=False):
"""
Give the user a single choice from an list of answers.
"""
question = ChoiceQuestion(question, choices, default)
question.set_max_attempts(attempts)
question.set_multi_select(multiple)
return self._io.ask_question(question)
def create_question(self, question, type=None, **kwargs):
"""
Returns a Question of specified type.
"""
if not type:
return Question(question, **kwargs)
if type == "choice":
return ChoiceQuestion(question, **kwargs)
if type == "confirmation":
return ConfirmationQuestion(question, **kwargs)
def table(self, header=None, rows=None, style=None):
"""
Return a Table instance.
"""
if style is not None:
style = self.TABLE_STYLES[style]
table = Table(style)
if header:
table.set_header_row(header)
if rows:
table.set_rows(rows)
return table
def render_table(self, headers, rows, style=None):
"""
Format input to textual table.
"""
table = self.table(headers, rows, style)
table.render(self._io)
def write(self, text, style=None):
"""
Writes a string without a new line.
Useful if you want to use overwrite().
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write(styled)
def line(self, text, style=None, verbosity=None):
"""
Write a string as information output.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.write_line(styled, verbosity)
def line_error(self, text, style=None, verbosity=None):
"""
Write a string as information output to stderr.
"""
if style:
styled = "<%s>%s</>" % (style, text)
else:
styled = text
self._io.error_line(styled, verbosity)
def info(self, text):
"""
Write a string as information output.
:param text: The line to write
:type text: str
"""
self.line(text, "info")
def comment(self, text):
"""
Write a string as comment output.
:param text: The line to write
:type text: str
"""
self.line(text, "comment")
def question(self, text):
"""
Write a string as question output.
:param text: The line to write
:type text: str
"""
self.line(text, "question")
def progress_bar(self, max=0):
"""
Creates a new progress bar
:param max: The maximum number of steps
:type max: int
:rtype: ProgressBar
"""
return self._io.progress_bar(max)
def progress_indicator(self, fmt=None, interval=100, values=None):
"""
Creates a new progress indicator.
"""
return ProgressIndicator(self.io, fmt, interval, values)
def spin(self, start_message, end_message, fmt=None, interval=100, values=None):
"""
Automatically spin a progress indicator.
"""
spinner = ProgressIndicator(self.io, fmt, interval, values)
return spinner.auto(start_message, end_message)
def add_style(self, name, fg=None, bg=None, options=None):
"""
Adds a new style
"""
style = Style(name)
if fg is not None:
style.fg(fg)
if bg is not None:
style.bg(bg)
if options is not None:
if "bold" in options:
style.bold()
if "underline" in options:
style.underlined()
self._io.output.formatter.add_style(style)
self._io.error_output.formatter.add_style(style)
|
ECRL/ecabc | ecabc/abc.py | ABC.add_argument | python | def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value | Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L96-L112 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.add_value | python | def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
) | Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L114-L138 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.args | python | def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args)) | Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L147-L154 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.minimize | python | def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize)) | Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L165-L175 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.num_employers | python | def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit)) | Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L184-L202 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.value_ranges | python | def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
)) | Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val" | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L211-L224 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.processes | python | def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
)) | Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L268-L284 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.infer_process_count | python | def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4 | Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L286-L298 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.create_employers | python | def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete') | Generate employer bees. This should be called directly after the
ABC is initialized. | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L300-L344 | [
"def idealDayTest(values, args=None): # Fitness function that will be passed to the abc\n temperature = values[0] + values[1] # Calcuate the day's temperature\n humidity = values[2] * values[3] # Calculate the day's humidity\n\n cost_temperature = abs(70 - temperature) # Check how close th... | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.run_iteration | python | def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions() | Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L346-L354 | [
"def _employer_phase(self):\n '''Iterates through the employer bees and merges each with another\n random bee (one value is moved in accordance with the second bee's\n value); if the mutation performs better, the bee is moved to the new\n position\n '''\n\n self._logger.log('debug', 'Employer bee ... | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC._employer_phase | python | def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get()) | Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L356-L375 | [
"def _merge_bee(self, bee):\n '''Shifts a random value for a supplied bee with in accordance with\n another random bee's value\n\n Args:\n bee (EmployerBee): supplied bee to merge\n\n Returns:\n tuple: (score of new position, values of new position, fitness\n function return val... | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC._calc_probability | python | def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score) | Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L377-L398 | [
"def __update(self, score, values, error):\n '''Update the best score and values if the given score is better than\n the current best score\n\n Args:\n score (float): new score to evaluate\n values (list): new value ranges to evaluate\n error (float): new fitness function return value ... | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC._onlooker_phase | python | def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error) | Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L400-L428 | [
"def _merge_bee(self, bee):\n '''Shifts a random value for a supplied bee with in accordance with\n another random bee's value\n\n Args:\n bee (EmployerBee): supplied bee to merge\n\n Returns:\n tuple: (score of new position, values of new position, fitness\n function return val... | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC._check_positions | python | def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values() | Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L430-L450 | [
"def __gen_random_values(self):\n '''Generate random values based on supplied value ranges\n\n Returns:\n list: random values, one per tunable variable\n '''\n\n values = []\n if self._value_ranges is None:\n self._logger.log(\n 'crit',\n 'Must set the type/range o... | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC._merge_bee | python | def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error) | Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position) | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L452-L478 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC._move_bee | python | def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position') | Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value) | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L480-L498 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.__update | python | def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False | Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L500-L537 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.__gen_random_values | python | def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values | Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L539-L567 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.__verify_ready | python | def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers') | Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L569-L588 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.import_settings | python | def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit'] | Import settings from a JSON file
Args:
filename (string): name of the file to import from | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L590-L618 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/abc.py | ABC.save_settings | python | def save_settings(self, filename):
'''Save settings to a JSON file
Arge:
filename (string): name of the file to save to
'''
data = dict()
data['valueRanges'] = self._value_ranges
data['best_values'] = [str(value) for value in self._best_values]
data['minimize'] = self._minimize
data['num_employers'] = self._num_employers
data['best_score'] = str(self._best_score)
data['limit'] = self._limit
data['best_error'] = self._best_error
with open(filename, 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True) | Save settings to a JSON file
Arge:
filename (string): name of the file to save to | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L620-L636 | null | class ABC:
def __init__(self, fitness_fxn, num_employers=50, value_ranges=[],
print_level='info', file_logging='disable', args={},
processes=4):
'''ABC object: manages employer and onlooker bees to optimize a set
of generic values for a user-supplied fitness function. Handles data
transfer and manipulation between bees.
Args:
fitness_fxn (callable): fitness function supplied by the user;
should accept a tuple of tunable ints/floats, and optionally
additional user-defined arguments (kwargs)
num_employers (int): number of employer bees the colony utilizes
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
print_level (string): console logging level: "debug", "info",
"warn", "crit", "error", "disable"
file_logging (string): file logging level: "debug", "info", "warn",
"crit", "error", "disable"
args (dict): additional user-defined arguments to pass to fitness
function; these are not tuned
processes (int): number of concurrent processes the algorithm will
utililze via multiprocessing.Pool
Return:
None
'''
self._logger = ColorLogger(
stream_level=print_level,
file_level=file_logging
)
self._value_ranges = value_ranges
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._best_values = []
self._best_score = None
self._best_error = None
self._minimize = True
self._fitness_fxn = fitness_fxn
self.__onlooker = OnlookerBee()
self._limit = num_employers*len(value_ranges)
self._employers = []
self._args = args
self._total_score = 0
self._cycle_number = 0
self._processes = processes
if self._processes > 1:
self._pool = multiprocessing.Pool(self._processes)
else:
self._pool = None
if not callable(self._fitness_fxn):
raise ValueError('submitted *fitness_fxn* is not callable')
def add_argument(self, arg_name, arg_value):
'''Add an additional argument to be passed to the fitness function
via additional arguments dictionary; this argument/value is not tuned
Args:
arg_name (string): name/dictionary key of argument
arg_value (any): dictionary value of argument
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding an argument after the employers have been created'
)
if self._args is None:
self._args = {}
self._args[arg_name] = arg_value
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
)
@property
def args(self):
'''Arguments that will be passed to the fitness function at runtime'''
return self._args
@args.setter
def args(self, args):
'''Set additional arguments to be passed to the fitness function
Args:
args (dict): additional arguments
'''
self._args = args
self._logger.log('debug', 'Args set to {}'.format(args))
@property
def minimize(self):
'''If True, minimizes fitness function return value rather than
derived score
'''
return self._minimize
@minimize.setter
def minimize(self, minimize):
'''Configures the ABC to minimize fitness function return value or
derived score
Args:
minimize (bool): if True, minimizes fitness function return value;
if False, minimizes derived score
'''
self._minimize = minimize
self._logger.log('debug', 'Minimize set to {}'.format(minimize))
@property
def num_employers(self):
'''Number of employer bees present in the ABC'''
return self._num_employers
@num_employers.setter
def num_employers(self, num_employers):
'''Sets the number of employer bees; at least two are required
Args:
num_employers (int): number of employer bees
'''
if num_employers < 2:
self._logger.log(
'warn',
'Two employers are needed: setting to two'
)
num_employers = 2
self._num_employers = num_employers
self._logger.log('debug', 'Number of employers set to {}'.format(
num_employers
))
self._limit = num_employers * len(self._value_ranges)
self._logger.log('debug', 'Limit set to {}'.format(self._limit))
@property
def value_ranges(self):
'''Value types, min/max values for tunable parameters'''
return self._value_ranges
@value_ranges.setter
def value_ranges(self, value_ranges):
'''Set the types, min/max values for tunable parameters
Args:
value_ranges (list): each element defines a tunable variable in
the form "(type ('int' or 'float'), (min_val, max_val))";
initial, random values for each bee will between "min_val" and
"max_val"
'''
self._value_ranges = value_ranges
self._logger.log('debug', 'Value ranges set to {}'.format(
value_ranges
))
@property
def best_performer(self):
'''Return the best performing values: (score, values, error)'''
return (self._best_score, self._best_values, self._best_error)
@property
def best_employers(self):
'''Return a list of best performing employer bees'''
return self.__onlooker.best_employers
@property
def limit(self):
'''Maximum number of cycles a bee is allowed to stay at its current
food source before abandoning it (moving to a randomly generated one)
'''
return self._limit
@limit.setter
def limit(self, limit):
'''Set the maximum number of cycles a bee is allowed to stay at its
current food source before abandoning it (moving to a randomly
generated one); by default, this is set to the number of employers
times the number of tunable values
Args:
limit (int): maximum number of cycles
'''
self._limit = limit
@property
def processes(self):
'''How many concurrent processes the ABC will utililze for fitness
function evaluation via multiprocessing.Pool
'''
return self._processes
@processes.setter
def processes(self, processes):
'''Set the number of concurrent processes the ABC will utilize for
fitness function evaluation; if <= 1, single process is used
Args:
processes (int): number of concurrent processes
'''
if self._processes > 1:
self._pool.close()
self._pool.join()
self._pool = multiprocessing.Pool(processes)
else:
self._pool = None
self._logger.log('debug', 'Number of processes set to {}'.format(
processes
))
def infer_process_count(self):
'''Infers the number of CPU cores in the current system, sets the
number of concurrent processes accordingly
'''
try:
self.processes = multiprocessing.cpu_count()
except NotImplementedError:
self._logger.log(
'error',
'Could infer CPU count, setting number of processes back to 4'
)
self.processes = 4
def create_employers(self):
'''Generate employer bees. This should be called directly after the
ABC is initialized.
'''
self.__verify_ready(True)
employers = []
for i in range(self._num_employers):
employer = EmployerBee(self.__gen_random_values())
if self._processes <= 1:
employer.error = self._fitness_fxn(
employer.values, **self._args
)
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
else:
employer.error = self._pool.apply_async(
self._fitness_fxn,
[employer.values],
self._args
)
employers.append(employer)
self._employers.append(employer)
for idx, employer in enumerate(employers):
try:
employer.error = employer.error.get()
employer.score = employer.get_score()
if np.isnan(employer.score):
self._logger.log('warn', 'NaN bee score: {}, {}'.format(
employer.id, employer.score
))
self._logger.log('debug', 'Bee number {} created'.format(
i + 1
))
self.__update(employer.score, employer.values, employer.error)
except Exception as e:
raise e
self._logger.log('debug', 'Employer creation complete')
def run_iteration(self):
'''Runs a single iteration of the ABC; employer phase -> probability
calculation -> onlooker phase -> check positions
'''
self._employer_phase()
self._calc_probability()
self._onlooker_phase()
self._check_positions()
def _employer_phase(self):
'''Iterates through the employer bees and merges each with another
random bee (one value is moved in accordance with the second bee's
value); if the mutation performs better, the bee is moved to the new
position
'''
self._logger.log('debug', 'Employer bee phase')
modified = []
for bee in self._employers:
if self._processes <= 1:
new_values = self._merge_bee(bee)
self._move_bee(bee, new_values)
else:
modified.append((
bee,
self._pool.apply_async(self._merge_bee, [bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
def _calc_probability(self):
'''Determines the probability that each bee will be chosen during the
onlooker phase; also determines if a new best-performing bee is found
'''
self._logger.log('debug', 'Calculating bee probabilities')
self.__verify_ready()
self._total_score = 0
for employer in self._employers:
self._total_score += employer.score
if self.__update(employer.score, employer.values, employer.error):
self._logger.log(
'info',
'Update to best performer -'
' error: {} | score: {} | values: {}'.format(
employer.error,
employer.score,
employer.values
)
)
for employer in self._employers:
employer.calculate_probability(self._total_score)
def _onlooker_phase(self):
'''Well-performing bees (chosen probabilistically based on fitness
score) have a value merged with a second random bee
'''
self.__verify_ready()
self._logger.log('debug', 'Onlooker bee phase')
modified = []
for _ in self._employers:
chosen_bee = np.random.choice(
self._employers,
p=[e.probability for e in self._employers]
)
if self._processes <= 1:
new_values = self._merge_bee(chosen_bee)
self._move_bee(chosen_bee, new_values)
self.__update(
chosen_bee.score,
chosen_bee.values,
chosen_bee.error
)
else:
modified.append((
chosen_bee,
self._pool.apply_async(self._merge_bee, [chosen_bee])
))
for pair in modified:
self._move_bee(pair[0], pair[1].get())
self.__update(pair[0].score, pair[0].values, pair[0].error)
def _check_positions(self):
'''Checks each bee to see if it abandons its current food source (has
not found a better one in self._limit iterations); if abandoning, it
becomes a scout and generates a new, random food source
'''
self.__verify_ready()
max_trials = 0
scout = None
for bee in self._employers:
if (bee.failed_trials >= max_trials):
max_trials = bee.failed_trials
scout = bee
if scout is not None and scout.failed_trials > self._limit:
self._logger.log(
'debug',
'Sending scout (error of {} with limit of {})'.format(
scout.error, scout.failed_trials
)
)
scout.values = self.__gen_random_values()
def _merge_bee(self, bee):
'''Shifts a random value for a supplied bee with in accordance with
another random bee's value
Args:
bee (EmployerBee): supplied bee to merge
Returns:
tuple: (score of new position, values of new position, fitness
function return value of new position)
'''
random_dimension = randint(0, len(self._value_ranges) - 1)
second_bee = randint(0, self._num_employers - 1)
while (bee.id == self._employers[second_bee].id):
second_bee = randint(0, self._num_employers - 1)
new_bee = deepcopy(bee)
new_bee.values[random_dimension] = self.__onlooker.calculate_positions(
new_bee.values[random_dimension],
self._employers[second_bee].values[random_dimension],
self._value_ranges[random_dimension]
)
fitness_score = new_bee.get_score(self._fitness_fxn(
new_bee.values,
**self._args
))
return (fitness_score, new_bee.values, new_bee.error)
def _move_bee(self, bee, new_values):
'''Moves a bee to a new position if new fitness score is better than
the bee's current fitness score
Args:
bee (EmployerBee): bee to move
new_values (tuple): (new score, new values, new fitness function
return value)
'''
score = np.nan_to_num(new_values[0])
if bee.score > score:
bee.failed_trials += 1
else:
bee.values = new_values[1]
bee.score = score
bee.error = new_values[2]
bee.failed_trials = 0
self._logger.log('debug', 'Bee assigned to new merged position')
def __update(self, score, values, error):
'''Update the best score and values if the given score is better than
the current best score
Args:
score (float): new score to evaluate
values (list): new value ranges to evaluate
error (float): new fitness function return value to evaluate
Returns:
bool: True if new score is better, False otherwise
'''
if self._minimize:
if self._best_score is None or score > self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
elif not self._minimize:
if self._best_score is None or score < self._best_score:
self._best_score = score
self._best_values = values.copy()
self._best_error = error
self._logger.log(
'debug',
'New best food source memorized: {}'.format(
self._best_error
)
)
return True
return False
def __gen_random_values(self):
'''Generate random values based on supplied value ranges
Returns:
list: random values, one per tunable variable
'''
values = []
if self._value_ranges is None:
self._logger.log(
'crit',
'Must set the type/range of possible values'
)
raise RuntimeError("Must set the type/range of possible values")
else:
for t in self._value_ranges:
if t[0] == 'int':
values.append(randint(t[1][0], t[1][1]))
elif t[0] == 'float':
values.append(np.random.uniform(t[1][0], t[1][1]))
else:
self._logger.log(
'crit',
'Value type must be either an `int` or a `float`'
)
raise RuntimeError(
'Value type must be either an `int` or a `float`'
)
return values
def __verify_ready(self, creating=False):
'''Some cleanup, ensures that everything is set up properly to avoid
random errors during execution
Args:
creating (bool): True if currently creating employer bees, False
for checking all other operations
'''
if len(self._value_ranges) == 0:
self._logger.log(
'crit',
'Attribute value_ranges must have at least one value'
)
raise RuntimeWarning(
'Attribute value_ranges must have at least one value'
)
if len(self._employers) == 0 and creating is False:
self._logger.log('crit', 'Need to create employers')
raise RuntimeWarning('Need to create employers')
def import_settings(self, filename):
'''Import settings from a JSON file
Args:
filename (string): name of the file to import from
'''
if not os.path.isfile(filename):
self._logger.log(
'error',
'File: {} not found, continuing with default settings'.format(
filename
)
)
else:
with open(filename, 'r') as jsonFile:
data = json.load(jsonFile)
self._value_ranges = data['valueRanges']
self._best_values = data['best_values']
self._best_values = []
for index, value in enumerate(data['best_values']):
if self._value_ranges[index] == 'int':
self._best_values.append(int(value))
else:
self._best_values.append(float(value))
self.minimize = data['minimize']
self.num_employers = data['num_employers']
self._best_score = float(data['best_score'])
self.limit = data['limit']
def __getstate__(self):
'''Returns appropriate dictionary for correctly pickling the ABC
object in case of multiprocssing
'''
state = self.__dict__.copy()
del state['_logger']
del state['_pool']
return state
|
ECRL/ecabc | ecabc/bees.py | EmployerBee.get_score | python | def get_score(self, error=None):
'''Calculate bee's fitness score given a value returned by the fitness
function
Args:
error (float): value returned by the fitness function
Returns:
float: derived fitness score
'''
if error is not None:
self.error = error
if self.error >= 0:
return 1 / (self.error + 1)
else:
return 1 + abs(self.error) | Calculate bee's fitness score given a value returned by the fitness
function
Args:
error (float): value returned by the fitness function
Returns:
float: derived fitness score | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/bees.py#L40-L56 | null | class EmployerBee:
def __init__(self, values=[]):
'''EmployerBee object: stores individual employer bee information such
as ID, position (current values), fitness function return value,
fitness score, and probability of being chosen during the onlooker
phase
Args:
values (list): position/current values for the bee
'''
self.values = values
self.score = None
self.probability = 0
self.failed_trials = 0
self.id = uuid.uuid4()
self.error = None
def calculate_probability(self, fitness_total):
'''Calculates the probability that the bee is chosen during the
onlooker phase
Args:
fitness_total (float): sum of fitness scores from all bees
'''
self.probability = self.score / fitness_total
|
ECRL/ecabc | ecabc/bees.py | OnlookerBee.calculate_positions | python | def calculate_positions(self, first_bee_val, second_bee_val, value_range):
'''Calculate the new value/position for two given bee values
Args:
first_bee_val (int or float): value from the first bee
second_bee_val (int or float): value from the second bee
value_ranges (tuple): "(value type, (min_val, max_val))" for the
given value
Returns:
int or float: new value
'''
value = first_bee_val + np.random.uniform(-1, 1) \
* (first_bee_val - second_bee_val)
if value_range[0] == 'int':
value = int(value)
if value > value_range[1][1]:
value = value_range[1][1]
if value < value_range[1][0]:
value = value_range[1][0]
return value | Calculate the new value/position for two given bee values
Args:
first_bee_val (int or float): value from the first bee
second_bee_val (int or float): value from the second bee
value_ranges (tuple): "(value type, (min_val, max_val))" for the
given value
Returns:
int or float: new value | train | https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/bees.py#L78-L100 | null | class OnlookerBee:
def __init__(self):
'''OnlookerBee object: stores best-performing bees, function for
calculating merged position of two bees
'''
self.best_employers = []
|
celery/cell | cell/workflow/entities.py | Server.main | python | def main(self, *args, **kwargs):
self.start(*args, **kwargs)
try:
while 1:
body, message = yield self.receive()
handler = self.get_handler(message)
handler(body, message)
finally:
self.stop(*args, **kwargs) | Implement the actor main loop by waiting forever for messages. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/workflow/entities.py#L73-L82 | [
"def get_handler(self, message):\n if message.properties.get('reply_to'):\n handler = self.handle_call\n else:\n handler = self.handle_cast\n return handler()\n",
"def start(self, *args, **kwargs):\n \"\"\"Override to be notified when the server starts.\"\"\"\n pass\n",
"def stop(se... | class Server(Actor):
"""An actor which responds to the call protocol by looking for the
specified method and calling it.
Also, Server provides start and stop methods which can be overridden
to customize setup.
"""
def get_handler(self, message):
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
return handler()
def start(self, *args, **kwargs):
"""Override to be notified when the server starts."""
pass
def stop(self, *args, **kwargs):
"""Override to be notified when the server stops."""
pass
|
celery/cell | cell/actors.py | Actor.send | python | def send(self, method, args={}, to=None, nowait=False, **kwargs):
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get() | Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L259-L275 | [
"def call_or_cast(self, method, args={}, nowait=False, **kwargs):\n \"\"\"Apply remote `method` asynchronously or synchronously depending\n on the value of `nowait`.\n\n :param method: The name of the remote method to perform.\n :param args: Dictionary of arguments for the method.\n :keyword nowait: ... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.throw | python | def throw(self, method, args={}, nowait=False, **kwargs):
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r | Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L277-L290 | [
"def call_or_cast(self, method, args={}, nowait=False, **kwargs):\n \"\"\"Apply remote `method` asynchronously or synchronously depending\n on the value of `nowait`.\n\n :param method: The name of the remote method to perform.\n :param args: Dictionary of arguments for the method.\n :keyword nowait: ... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.scatter | python | def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs) | Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L292-L320 | [
"def call_or_cast(self, method, args={}, nowait=False, **kwargs):\n \"\"\"Apply remote `method` asynchronously or synchronously depending\n on the value of `nowait`.\n\n :param method: The name of the remote method to perform.\n :param args: Dictionary of arguments for the method.\n :keyword nowait: ... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.call_or_cast | python | def call_or_cast(self, method, args={}, nowait=False, **kwargs):
return (nowait and self.cast or self.call)(method, args, **kwargs) | Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L322-L347 | null | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.get_direct_queue | python | def get_direct_queue(self):
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True) | Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L366-L370 | null | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.get_scatter_queue | python | def get_scatter_queue(self):
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True) | Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L372-L376 | null | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.get_rr_queue | python | def get_rr_queue(self):
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True) | Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L378-L382 | null | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.Consumer | python | def Consumer(self, channel, **kwargs):
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs) | Returns a :class:`kombu.Consumer` instance for this Actor | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L389-L393 | [
"def get_queues(self):\n return [self.type_to_queue[type]() for type in self.types]\n"
] | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.cast | python | def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props) | Send message to actor. Discarding replies. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L398-L420 | null | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.call | python | def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self) | Send message to the same actor and return :class:`AsyncResult`. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L422-L428 | [
"def get_reply_queue(self, ticket):\n return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,\n queue_arguments={\n 'x-expires': int(self.reply_expires * 1000)})\n",
"def cast(self, method, args={}, declare=None, retry=None,\n retry_policy=None, type=None... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor.handle_call | python | def handle_call(self, body, message):
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r) | Handle call message. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L434-L442 | [
"def reply(self, req, body, **props):\n with producers[self._connection].acquire(block=True) as producer:\n content_type = req.content_type\n serializer = serialization.registry.type_to_name[content_type]\n return producer.publish(\n body,\n declare=[self.reply_exchange... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor._on_message | python | def _on_message(self, body, message):
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle() | What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L460-L489 | [
"def handle():\n # Do not ack the message if an exceptional error occurs,\n # but do ack the message if SystemExit or KeyboardInterrupt\n # is raised, as this is probably intended.\n try:\n handler(body, message)\n except Exception:\n raise\n except BaseException:\n message.ac... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _DISPATCH(self, body, ticket=None):
"""Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore).
"""
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r)
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/actors.py | Actor._DISPATCH | python | def _DISPATCH(self, body, ticket=None):
if ticket:
sticket = '%s' % (shortuuid(ticket), )
else:
ticket = sticket = str(next(self.ticket_counter))
try:
method, args = itemgetter('method', 'args')(body)
self.log.info('#%s --> %s',
sticket, self._reprcall(method, args))
act = self.lookup_action(method)
r = {'ok': act(args or {})}
self.log.info('#%s <-- %s', sticket, reprkwargs(r))
except self.Next:
raise
except Exception as exc:
einfo = sys.exc_info()
r = {'nok': [safe_repr(exc), self._get_traceback(einfo)]}
self.log.error('#%s <-- nok=%r', sticket, exc)
return dict(self._default_fields, **r) | Dispatch message to the appropriate method
in :attr:`state`, handle possible exceptions,
and return a response suitable to be used in a reply.
To protect from calling special methods it does not dispatch
method names starting with underscore (``_``).
This returns the return value or exception error
with defaults fields in a suitable format to be used
as a reply.
The exceptions :exc:`SystemExit` and :exc:`KeyboardInterrupt`
will not be handled, and will propagate.
In the case of a successful call the return value will
be::
{'ok': return_value, **default_fields}
If the method raised an exception the return value
will be::
{'nok': [repr exc, str traceback], **default_fields}
:raises KeyError: if the method specified is unknown
or is a special method (name starting with underscore). | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/actors.py#L518-L564 | [
"def shortuuid(u):\n if '-' in u:\n return u[:u.index('-')]\n return abbr(u, 16)\n",
"def lookup_action(self, name):\n try:\n if not name:\n method = self.default_receive\n else:\n method = getattr(self.state, name)\n except AttributeError:\n raise Key... | class Actor:
AsyncResult = AsyncResult
Error = exceptions.CellError
Next = exceptions.Next
NoReplyError = exceptions.NoReplyError
NoRouteError = exceptions.NoRouteError
NotBoundError = exceptions.NotBoundError
#: Actor name.
#: Defaults to the defined class name.
name = None
#: Default exchange(direct) used for messages to this actor.
exchange = None
#: Default routing key used if no ``to`` argument passed.
default_routing_key = None
#: Delivery mode: persistent or transient. Default is persistent.
delivery_mode = 'persistent'
#: Set to True to disable acks.
no_ack = False
#: List of calling types this actor should handle.
#: Valid types are:
#:
#: * direct
#: Send the message directly to an agent by exact routing key.
#: * round-robin
#: Send the message to an agent by round-robin.
#: * scatter
#: Send the message to all of the agents (broadcast).
types = (ACTOR_TYPE.DIRECT, ACTOR_TYPE.SCATTER, ACTOR_TYPE.RR)
#: Default serializer used to send messages and reply messages.
serializer = 'json'
#: Default timeout in seconds as a float which after
#: we give up waiting for replies.
default_timeout = 5.0
#: Time in seconds as a float which after replies expires.
reply_expires = 100.0
#: Exchange used for replies.
reply_exchange = Exchange('cl.reply', 'direct')
#: Exchange used for forwarding/binding with other actors.
outbox_exchange = None
#: Exchange used for receiving broadcast commands for this actor type.
_scatter_exchange = None
#: Exchange used for round-robin commands for this actor type.
_rr_exchange = None
#: Should we retry publishing messages by default?
#: Default: NO
retry = None
#: time-to-live for the actor before becoming Idle
ttl = 20
idle = 40
#: Default policy used when retrying publishing messages.
#: see :meth:`kombu.BrokerConnection.ensure` for a list
#: of supported keys.
retry_policy = {'max_retries': 100,
'interval_start': 0,
'interval_max': 1,
'interval_step': 0.2}
#: returns the next anonymous ticket number
#: used fo+r identifying related logs.
ticket_count = count(1)
#: Additional fields added to reply messages by default.
default_fields = {}
#: Map of calling types and their special routing keys.
type_to_rkey = {'rr': '__rr__',
ACTOR_TYPE.RR: '__rr__',
ACTOR_TYPE.SCATTER: '__scatter__'}
meta = {}
consumer = None
class state:
"""Placeholder class for actor's supported methods."""
pass
def __init__(self, connection=None, id=None, name=None, exchange=None,
logger=None, agent=None, outbox_exchange=None,
group_exchange=None, **kwargs):
self.connection = connection
self.id = id or uuid()
self.name = name or self.name or self.__class__.__name__
self.outbox_exchange = outbox_exchange or self.outbox_exchange
self.agent = agent
if self.default_fields is None:
self.default_fields = {}
# - setup exchanges and queues
self.exchange = exchange or self.get_direct_exchange()
if group_exchange:
self._scatter_exchange = Exchange(
group_exchange, 'fanout', auto_delete=True)
typemap = {
ACTOR_TYPE.DIRECT: [self.get_direct_queue, self._inbox_direct],
ACTOR_TYPE.RR: [self.get_rr_queue, self._inbox_rr],
ACTOR_TYPE.SCATTER: [self.get_scatter_queue, self._inbox_scatter]
}
self.type_to_queue = {k: v[0] for k, v in items(typemap)}
self.type_to_exchange = {k: v[1] for k, v in items(typemap)}
if not self.outbox_exchange:
self.outbox_exchange = Exchange(
'cl.%s.output' % self.name, type='topic',
)
# - setup logging
logger_name = self.name
if self.agent:
logger_name = '%s#%s' % (self.name, shortuuid(self.id))
self.log = Log('!<%s>' % logger_name, logger=logger)
self.state = self.contribute_to_state(self.construct_state())
# actor specific initialization.
self.construct()
def _add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
binder = self.get_binder(inbox_type)
maybe_declare(source_exchange, self.connection.default_channel)
binder(exchange=source_exchange, routing_key=routing_key)
def _remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
source_exchange = Exchange(**source)
unbinder = self.get_unbinder(inbox_type)
unbinder(exchange=source_exchange, routing_key=routing_key)
def get_binder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
elif type in self.types:
entity = self.type_to_exchange[type]()
else:
raise ValueError('Unsupported type: {0}'.format(type))
binder = entity.bind_to
# @TODO: Declare probably should not happened here
entity.maybe_bind(self.connection.default_channel)
maybe_declare(entity, entity.channel)
return binder
def get_unbinder(self, type):
if type == ACTOR_TYPE.DIRECT:
entity = self.type_to_queue[type]()
unbinder = entity.unbind_from
else:
entity = self.type_to_exchange[type]()
unbinder = entity.exchange_unbind
entity = entity.maybe_bind(self.connection.default_channel)
# @TODO: Declare probably should not happened here
return unbinder
def add_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('add_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def remove_binding(self, source, routing_key='',
inbox_type=ACTOR_TYPE.DIRECT):
self.call('remove_binding', {
'source': source.as_dict(),
'routing_key': routing_key,
'inbox_type': inbox_type,
}, type=ACTOR_TYPE.DIRECT)
def construct(self):
"""Actor specific initialization."""
pass
def construct_state(self):
"""Instantiates the state class of this actor."""
return self.state()
def on_agent_ready(self):
pass
def contribute_to_object(self, obj, map):
for attr, value in items(map):
setattr_default(obj, attr, value)
return obj
def contribute_to_state(self, state):
try:
contribute = state.contribute_to_state
except AttributeError:
# set default state attributes.
return self.contribute_to_object(state, {
'actor': self,
'agent': self.agent,
'connection': self.connection,
'log': self.log,
'Next': self.Next,
'NoRouteError': self.NoRouteError,
'NoReplyError': self.NoReplyError,
'add_binding': self._add_binding,
'remove_binding': self._remove_binding,
})
else:
return contribute(self)
def send(self, method, args={}, to=None, nowait=False, **kwargs):
"""Call method on agent listening to ``routing_key``.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
j
"""
if to is None:
to = self.routing_key
r = self.call_or_cast(method, args, routing_key=to,
nowait=nowait, **kwargs)
if not nowait:
return r.get()
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs):
"""Broadcast method to all agents.
if nowait is False, returns generator to iterate over the results.
:keyword limit: Limit number of reads from the queue.
Unlimited by default.
:keyword timeout: the timeout (in float seconds) waiting for replies.
Default is :attr:`default_timeout`.
**Examples**
``scatter`` is a generator (if nowait is False)::
>>> res = scatter()
>>> res.next() # one event consumed, or timed out.
>>> res = scatter(limit=2):
>>> for i in res: # two events consumed or timeout
>>> pass
See :meth:`call_or_cast` for a full list of supported
arguments.
"""
timeout = timeout if timeout is not None else self.default_timeout
r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER,
nowait=nowait, timeout=timeout, **kwargs)
if not nowait:
return r.gather(timeout=timeout, **kwargs)
def call_or_cast(self, method, args={}, nowait=False, **kwargs):
"""Apply remote `method` asynchronously or synchronously depending
on the value of `nowait`.
:param method: The name of the remote method to perform.
:param args: Dictionary of arguments for the method.
:keyword nowait: If false the call will block until the result
is available and return it (default), if true the call will be
non-blocking and no result will be returned.
:keyword retry: If set to true then message sending will be retried
in the event of connection failures. Default is decided by the
:attr:`retry` attributed.
:keyword retry_policy: Override retry policies.
See :attr:`retry_policy`. This must be a dictionary, and keys will
be merged with the default retry policy.
:keyword timeout: Timeout to wait for replies in seconds as a float
(**only relevant in blocking mode**).
:keyword limit: Limit number of replies to wait for
(**only relevant in blocking mode**).
:keyword callback: If provided, this callback will be called for every
reply received (**only relevant in blocking mode**).
:keyword \*\*props: Additional message properties.
See :meth:`kombu.Producer.publish`.
"""
return (nowait and self.cast or self.call)(method, args, **kwargs)
def get_scatter_exchange(self):
"""Returns a :class:'kombu.Exchange' for type fanout"""
return Exchange('cl.scatter.%s' % self.name, 'fanout',
auto_delete=True)
def get_rr_exchange(self):
"""Returns a :class:'kombu.Exchange' instance with type set to fanout.
The exchange is used for sending in a round-robin style"""
return Exchange('cl.rr.%s' % self.name, 'fanout', auto_delete=True)
def get_direct_exchange(self):
"""Returns a :class:'kombu.Exchange' with type direct"""
return Exchange('cl.%s' % self.name, 'direct', auto_delete=True)
def get_queues(self):
return [self.type_to_queue[type]() for type in self.types]
def get_direct_queue(self):
"""Returns a :class: `kombu.Queue` instance to be used to listen
for messages send to this specific Actor instance"""
return Queue(self.id, self.inbox_direct, routing_key=self.routing_key,
auto_delete=True)
def get_scatter_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving broadcast
commands for this actor type."""
return Queue('%s.%s.scatter' % (self.name, self.id),
self.inbox_scatter, auto_delete=True)
def get_rr_queue(self):
"""Returns a :class: `kombu.Queue` instance for receiving round-robin
commands for this actor type."""
return Queue(self.inbox_rr.name + '.rr', self.inbox_rr,
auto_delete=True)
def get_reply_queue(self, ticket):
return Queue(ticket, self.reply_exchange, ticket, auto_delete=True,
queue_arguments={
'x-expires': int(self.reply_expires * 1000)})
def Consumer(self, channel, **kwargs):
"""Returns a :class:`kombu.Consumer` instance for this Actor"""
kwargs.setdefault('no_ack', self.no_ack)
return Consumer(channel, self.get_queues(),
callbacks=[self.on_message], **kwargs)
def emit(self, method, args={}, retry=None):
return self.cast(method, args, retry=retry, exchange=self.outbox)
def cast(self, method, args={}, declare=None, retry=None,
retry_policy=None, type=None, exchange=None, **props):
"""Send message to actor. Discarding replies."""
retry = self.retry if retry is None else retry
body = {'class': self.name, 'method': method, 'args': args}
_retry_policy = self.retry_policy
if retry_policy: # merge default and custom policies.
_retry_policy = dict(_retry_policy, **retry_policy)
if type and type not in self.types:
raise ValueError('Unsupported type: {0}'.format(type))
elif not type:
type = ACTOR_TYPE.DIRECT
props.setdefault('routing_key', self.routing_key)
props.setdefault('serializer', self.serializer)
exchange = exchange or self.type_to_exchange[type]()
declare = (maybe_list(declare) or []) + [exchange]
with producers[self._connection].acquire(block=True) as producer:
return producer.publish(body, exchange=exchange, declare=declare,
retry=retry, retry_policy=retry_policy,
**props)
def call(self, method, args={}, retry=False, retry_policy=None,
ticket=None, **props):
"""Send message to the same actor and return :class:`AsyncResult`."""
ticket = ticket or uuid()
reply_q = self.get_reply_queue(ticket)
self.cast(method, args, declare=[reply_q], reply_to=ticket, **props)
return self.AsyncResult(ticket, self)
def handle_cast(self, body, message):
"""Handle cast message."""
self._DISPATCH(body)
def handle_call(self, body, message):
"""Handle call message."""
try:
r = self._DISPATCH(body, ticket=message.properties['reply_to'])
except self.Next:
# don't reply, delegate to other agents.
pass
else:
self.reply(message, r)
def reply(self, req, body, **props):
with producers[self._connection].acquire(block=True) as producer:
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
return producer.publish(
body,
declare=[self.reply_exchange],
routing_key=req.properties['reply_to'],
correlation_id=req.properties.get('correlation_id'),
serializer=serializer,
**props
)
def on_message(self, body, message):
self.agent.process_message(self, body, message)
def _on_message(self, body, message):
"""What to do when a message is received.
This is a kombu consumer callback taking the standard
``body`` and ``message`` arguments.
Note that if the properties of the message contains
a value for ``reply_to`` then a proper implementation
is expected to send a reply.
"""
if message.properties.get('reply_to'):
handler = self.handle_call
else:
handler = self.handle_cast
def handle():
# Do not ack the message if an exceptional error occurs,
# but do ack the message if SystemExit or KeyboardInterrupt
# is raised, as this is probably intended.
try:
handler(body, message)
except Exception:
raise
except BaseException:
message.ack()
raise
else:
message.ack()
handle()
def _collect_replies(self, conn, channel, ticket, *args, **kwargs):
kwargs.setdefault('timeout', self.default_timeout)
if 'limit' not in kwargs and self.agent:
kwargs['limit'] = self.agent.get_default_scatter_limit()
if 'ignore_timeout' not in kwargs and not kwargs.get('limit', None):
kwargs.setdefault('ignore_timeout', False)
return collect_replies(conn, channel, self.get_reply_queue(ticket),
*args, **kwargs)
def lookup_action(self, name):
try:
if not name:
method = self.default_receive
else:
method = getattr(self.state, name)
except AttributeError:
raise KeyError(name)
if not callable(method) or name.startswith('_'):
raise KeyError(method)
return method
def default_receive(self, msg_body):
"""Override in the derived classes."""
pass
def _get_traceback(self, exc_info):
return ''.join(traceback.format_exception(*exc_info))
def _reprcall(self, method, args):
return '%s.%s' % (self.name, reprcall(method, (), args))
def bind(self, connection, agent=None):
return self.__class__(connection, self.id,
self.name, self.exchange, agent=agent)
def is_bound(self):
return self.connection is not None
def __copy__(self):
cls, args = self.__reduce__()
return cls(*args)
def __reduce__(self):
return (self.__class__, (self.connection, self.id,
self.name, self.exchange))
@property
def outbox(self):
return self.outbox_exchange
def _inbox_rr(self):
if not self._rr_exchange:
self._rr_exchange = self.get_rr_exchange()
return self._rr_exchange
@property
def inbox_rr(self):
return self._inbox_rr()
def _inbox_direct(self):
return self.exchange
@property
def inbox_direct(self):
return self._inbox_direct()
def _inbox_scatter(self):
if not self._scatter_exchange:
self._scatter_exchange = self.get_scatter_exchange()
return self._scatter_exchange
@property
def inbox_scatter(self):
return self._inbox_scatter()
@property
def _connection(self):
if not self.is_bound():
raise self.NotBoundError('Actor is not bound to any connection.')
return self.connection
@cached_property
def _default_fields(self):
return dict(BUILTIN_FIELDS, **self.default_fields)
@property
def routing_key(self):
if self.default_routing_key:
return self.default_routing_key
else:
return self.id
|
celery/cell | cell/bin/base.py | Command.handle_argv | python | def handle_argv(self, prog_name, argv):
options, args = self.parse_options(prog_name, argv)
return self.run(*args, **vars(options)) | Parses command line arguments from ``argv`` and dispatches
to :meth:`run`.
:param prog_name: The program name (``argv[0]``).
:param argv: Command arguments. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/bin/base.py#L44-L53 | [
"def run(self, *args, **options):\n raise NotImplementedError('subclass responsibility')\n",
"def parse_options(self, prog_name, arguments):\n \"\"\"Parse the available options.\"\"\"\n # Don't want to load configuration to just print the version,\n # so we handle --version manually here.\n if '--v... | class Command(object):
Parser = optparse.OptionParser
args = ''
version = __version__
option_list = ()
prog_name = None
def run(self, *args, **options):
raise NotImplementedError('subclass responsibility')
def execute_from_commandline(self, argv=None):
"""Execute application from command line.
:keyword argv: The list of command line arguments.
Defaults to ``sys.argv``.
"""
if argv is None:
argv = list(sys.argv)
self.prog_name = os.path.basename(argv[0])
return self.handle_argv(self.prog_name, argv[1:])
def usage(self):
"""Returns the command-line usage string for this app."""
return '%%prog [options] %s' % (self.args, )
def get_options(self):
"""Get supported command line options."""
return self.option_list
def exit(self, v=0):
sys.exit(v)
def exit_status(self, msg, status=0, fh=sys.stderr):
fh.write('%s\n' % (msg, ))
self.exit(status)
def exit_usage(self, msg):
sys.stderr.write('ERROR: %s\n\n' % (msg, ))
self.exit_status('Usage: %s' % (
self.usage().replace('%prog', self.prog_name), ))
def parse_options(self, prog_name, arguments):
"""Parse the available options."""
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args
def create_parser(self, prog_name):
return self.Parser(prog=prog_name,
usage=self.usage(),
version=self.version,
option_list=self.get_options())
|
celery/cell | cell/bin/base.py | Command.parse_options | python | def parse_options(self, prog_name, arguments):
# Don't want to load configuration to just print the version,
# so we handle --version manually here.
if '--version' in arguments:
self.exit_status(self.version, fh=sys.stdout)
parser = self.create_parser(prog_name)
options, args = parser.parse_args(arguments)
return options, args | Parse the available options. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/bin/base.py#L67-L75 | [
"def exit_status(self, msg, status=0, fh=sys.stderr):\n fh.write('%s\\n' % (msg, ))\n self.exit(status)\n",
"def create_parser(self, prog_name):\n return self.Parser(prog=prog_name,\n usage=self.usage(),\n version=self.version,\n option_li... | class Command(object):
Parser = optparse.OptionParser
args = ''
version = __version__
option_list = ()
prog_name = None
def run(self, *args, **options):
raise NotImplementedError('subclass responsibility')
def execute_from_commandline(self, argv=None):
"""Execute application from command line.
:keyword argv: The list of command line arguments.
Defaults to ``sys.argv``.
"""
if argv is None:
argv = list(sys.argv)
self.prog_name = os.path.basename(argv[0])
return self.handle_argv(self.prog_name, argv[1:])
def usage(self):
"""Returns the command-line usage string for this app."""
return '%%prog [options] %s' % (self.args, )
def get_options(self):
"""Get supported command line options."""
return self.option_list
def handle_argv(self, prog_name, argv):
"""Parses command line arguments from ``argv`` and dispatches
to :meth:`run`.
:param prog_name: The program name (``argv[0]``).
:param argv: Command arguments.
"""
options, args = self.parse_options(prog_name, argv)
return self.run(*args, **vars(options))
def exit(self, v=0):
sys.exit(v)
def exit_status(self, msg, status=0, fh=sys.stderr):
fh.write('%s\n' % (msg, ))
self.exit(status)
def exit_usage(self, msg):
sys.stderr.write('ERROR: %s\n\n' % (msg, ))
self.exit_status('Usage: %s' % (
self.usage().replace('%prog', self.prog_name), ))
def create_parser(self, prog_name):
return self.Parser(prog=prog_name,
usage=self.usage(),
version=self.version,
option_list=self.get_options())
|
celery/cell | docs/_ext/literals_to_xrefs.py | colorize | python | def colorize(text='', opts=(), **kwargs):
color_names = ('black', 'red', 'green', 'yellow',
'blue', 'magenta', 'cyan', 'white')
foreground = dict([(color_names[x], '3%s' % x) for x in range(8)])
background = dict([(color_names[x], '4%s' % x) for x in range(8)])
RESET = '0'
opt_dict = {'bold': '1',
'underscore': '4',
'blink': '5',
'reverse': '7',
'conceal': '8'}
text = str(text)
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.items():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text | Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red' | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/docs/_ext/literals_to_xrefs.py#L118-L173 | null | """
Runs through a reST file looking for old-style literals, and helps replace them
with new-style references.
"""
import re
import sys
import shelve
try:
input = input
except NameError:
input = raw_input # noqa
refre = re.compile(r'``([^`\s]+?)``')
ROLES = (
'attr',
'class',
"djadmin",
'data',
'exc',
'file',
'func',
'lookup',
'meth',
'mod',
"djadminopt",
"ref",
"setting",
"term",
"tfilter",
"ttag",
# special
"skip",
)
ALWAYS_SKIP = [
"NULL",
"True",
"False",
]
def fixliterals(fname):
data = open(fname).read()
last = 0
new = []
storage = shelve.open("/tmp/literals_to_xref.shelve")
lastvalues = storage.get("lastvalues", {})
for m in refre.finditer(data):
new.append(data[last:m.start()])
last = m.end()
line_start = data.rfind("\n", 0, m.start())
line_end = data.find("\n", m.end())
prev_start = data.rfind("\n", 0, line_start)
next_end = data.find("\n", line_end + 1)
# Skip always-skip stuff
if m.group(1) in ALWAYS_SKIP:
new.append(m.group(0))
continue
# skip when the next line is a title
next_line = data[m.end():next_end].strip()
if next_line[0] in "!-/:-@[-`{-~" and \
all(c == next_line[0] for c in next_line):
new.append(m.group(0))
continue
sys.stdout.write("\n" + "-" * 80 + "\n")
sys.stdout.write(data[prev_start + 1:m.start()])
sys.stdout.write(colorize(m.group(0), fg="red"))
sys.stdout.write(data[m.end():next_end])
sys.stdout.write("\n\n")
replace_type = None
while replace_type is None:
replace_type = input(
colorize("Replace role: ", fg="yellow")).strip().lower()
if replace_type and replace_type not in ROLES:
replace_type = None
if replace_type == "":
new.append(m.group(0))
continue
if replace_type == "skip":
new.append(m.group(0))
ALWAYS_SKIP.append(m.group(1))
continue
default = lastvalues.get(m.group(1), m.group(1))
if default.endswith("()") and \
replace_type in ("class", "func", "meth"):
default = default[:-2]
replace_value = input(
colorize("Text <target> [", fg="yellow") +
default + colorize("]: ", fg="yellow"),
).strip()
if not replace_value:
replace_value = default
new.append(":%s:`%s`" % (replace_type, replace_value))
lastvalues[m.group(1)] = replace_value
new.append(data[last:])
open(fname, "w").write("".join(new))
storage["lastvalues"] = lastvalues
storage.close()
if __name__ == '__main__':
try:
fixliterals(sys.argv[1])
except (KeyboardInterrupt, SystemExit):
print
|
celery/cell | cell/results.py | AsyncResult.get | python | def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs)) | What kind of arguments should be pass here | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L30-L33 | [
"def _first(self, replies):\n if replies is not None:\n replies = list(replies)\n if replies:\n return replies[0]\n raise self.NoReplyError('No reply received within time constraint')\n",
"def gather(self, propagate=True, **kwargs):\n # mock collect_replies.\n # check to_pytho... | class AsyncResult:
Error = CellError
NoReplyError = NoReplyError
def __init__(self, ticket, actor):
self.ticket = ticket
self.actor = actor
self._result = None
def _first(self, replies):
if replies is not None:
replies = list(replies)
if replies:
return replies[0]
raise self.NoReplyError('No reply received within time constraint')
def result(self, **kwargs):
if not self._result:
self._result = self.get(**kwargs)
return self._result
def gather(self, propagate=True, **kwargs):
# mock collect_replies.
# check to_python is invoked for every result
# check collect_replies is called with teh exact parameters
# test collect_replies separately
connection = self.actor.connection
gather = self._gather
with producers[connection].acquire(block=True) as producer:
for r in gather(producer.connection, producer.channel, self.ticket,
propagate=propagate, **kwargs):
yield r
def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs))
def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
"""
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error
|
celery/cell | cell/results.py | AsyncResult._gather | python | def _gather(self, *args, **kwargs):
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs)) | Generator over the results | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L47-L52 | null | class AsyncResult:
Error = CellError
NoReplyError = NoReplyError
def __init__(self, ticket, actor):
self.ticket = ticket
self.actor = actor
self._result = None
def _first(self, replies):
if replies is not None:
replies = list(replies)
if replies:
return replies[0]
raise self.NoReplyError('No reply received within time constraint')
def result(self, **kwargs):
if not self._result:
self._result = self.get(**kwargs)
return self._result
def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs))
def gather(self, propagate=True, **kwargs):
# mock collect_replies.
# check to_python is invoked for every result
# check collect_replies is called with teh exact parameters
# test collect_replies separately
connection = self.actor.connection
gather = self._gather
with producers[connection].acquire(block=True) as producer:
for r in gather(producer.connection, producer.channel, self.ticket,
propagate=propagate, **kwargs):
yield r
def to_python(self, reply, propagate=True):
"""Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error.
"""
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error
|
celery/cell | cell/results.py | AsyncResult.to_python | python | def to_python(self, reply, propagate=True):
try:
return reply['ok']
except KeyError:
error = self.Error(*reply.get('nok') or ())
if propagate:
raise error
return error | Extracts the value out of the reply message.
:param reply: In the case of a successful call the reply message
will be::
{'ok': return_value, **default_fields}
Therefore the method returns: return_value, **default_fields
If the method raises an exception the reply message
will be::
{'nok': [repr exc, str traceback], **default_fields}
:keyword propagate - Propagate exceptions raised instead of returning
a result representation of the error. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/results.py#L54-L79 | null | class AsyncResult:
Error = CellError
NoReplyError = NoReplyError
def __init__(self, ticket, actor):
self.ticket = ticket
self.actor = actor
self._result = None
def _first(self, replies):
if replies is not None:
replies = list(replies)
if replies:
return replies[0]
raise self.NoReplyError('No reply received within time constraint')
def result(self, **kwargs):
if not self._result:
self._result = self.get(**kwargs)
return self._result
def get(self, **kwargs):
"What kind of arguments should be pass here"
kwargs.setdefault('limit', 1)
return self._first(self.gather(**kwargs))
def gather(self, propagate=True, **kwargs):
# mock collect_replies.
# check to_python is invoked for every result
# check collect_replies is called with teh exact parameters
# test collect_replies separately
connection = self.actor.connection
gather = self._gather
with producers[connection].acquire(block=True) as producer:
for r in gather(producer.connection, producer.channel, self.ticket,
propagate=propagate, **kwargs):
yield r
def _gather(self, *args, **kwargs):
"""Generator over the results
"""
propagate = kwargs.pop('propagate', True)
return (self.to_python(reply, propagate=propagate)
for reply in self.actor._collect_replies(*args, **kwargs))
|
celery/cell | cell/agents.py | dAgent.spawn | python | def spawn(self, cls, kwargs={}, nowait=False):
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs) | Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L99-L128 | [
"def qualname(obj): # noqa\n if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):\n obj = obj.__class__\n return '%s.%s' % (obj.__module__, obj.__name__)\n",
"def call(self, method, args={}, retry=False, retry_policy=None,\n ticket=None, **props):\n \"\"\"Send message to the sam... | class dAgent(Actor):
types = (ACTOR_TYPE.RR, ACTOR_TYPE.SCATTER, ACTOR_TYPE.DIRECT)
MAX_ACTORS = 2
class state:
def __init__(self):
self.registry = {}
def _start_actor_consumer(self, actor):
actor.consumer = actor.Consumer(self.connection.channel())
actor.consumer.consume()
self.registry[actor.id] = actor
actor.agent = weakref.proxy(self.agent)
actor.on_agent_ready()
def spawn(self, cls, id, kwargs={}):
"""Add actor to the registry and start the actor's main method."""
try:
actor = symbol_by_name(cls)(
connection=self.connection, id=id, **kwargs)
if actor.id in self.registry:
warn('Actor id %r already exists', actor.id)
self._start_actor_consumer(actor)
debug('Actor registered: %s', cls)
return actor.id
except Exception as exc:
error('Cannot start actor: %r', exc, exc_info=True)
def stop_all(self):
self.agent.shutdown()
def reset(self):
debug('Resetting active actors')
for actor in values(self.registry):
if actor.consumer:
ignore_errors(self.connection, actor.consumer.cancel)
actor.connection = self.connection
self._start_actor_consumer(actor)
def kill(self, actor_id):
if actor_id not in self.registry:
raise Actor.Next()
else:
actor = self.registry.pop(actor_id)
if actor.consumer and actor.consumer.channel:
ignore_errors(self.connection, actor.consumer.cancel)
def select(self, cls):
for key, val in items(self.registry):
if qualname(val.__class__) == cls:
return key
# delegate to next agent.
raise Actor.Next()
def _shutdown(self, cancel=True, close=True, clear=True):
try:
for actor in values(self.registry):
if actor and actor.consumer:
if cancel:
ignore_errors(self.connection,
actor.consumer.cancel)
if close and actor.consumer.channel:
ignore_errors(self.connection,
actor.consumer.channel.close)
finally:
if clear:
self.registry.clear()
def __init__(self, connection, id=None):
self.registry = {}
Actor.__init__(self, connection=connection, id=id, agent=self)
def spawn_group(self, group, cls, n=1, nowait=False):
return self.spawn(
group, {'act_type': qualname(cls), 'number': n}, nowait)
def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs)
def kill(self, actor_id, nowait=False):
return self.scatter('kill', {'actor_id': actor_id},
nowait=nowait)
# ------------------------------------------------------------
# Control methods. To be invoked locally
# ------------------------------------------------------------
def start(self):
debug('Starting agent %s', self.id)
consumer = self.Consumer(self.connection.channel())
consumer.consume()
self.state.reset()
def stop(self):
debug('Stopping agent %s', self.id)
self.state._shutdown(clear=False)
def shutdown(self):
debug('Shutdown agent %s', self.id)
self.state._shutdown(cancel=False)
def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
"""
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message)
def is_green(self):
return self.pool is not None and self.pool.is_green
def get_default_scatter_limit(self):
return None
|
celery/cell | cell/agents.py | dAgent.select | python | def select(self, cls, **kwargs):
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs) | Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L130-L139 | [
"def qualname(obj): # noqa\n if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):\n obj = obj.__class__\n return '%s.%s' % (obj.__module__, obj.__name__)\n",
"def first_reply(replies, key):\n try:\n return next(replies)\n except StopIteration:\n raise KeyError(key)\n",
... | class dAgent(Actor):
types = (ACTOR_TYPE.RR, ACTOR_TYPE.SCATTER, ACTOR_TYPE.DIRECT)
MAX_ACTORS = 2
class state:
def __init__(self):
self.registry = {}
def _start_actor_consumer(self, actor):
actor.consumer = actor.Consumer(self.connection.channel())
actor.consumer.consume()
self.registry[actor.id] = actor
actor.agent = weakref.proxy(self.agent)
actor.on_agent_ready()
def spawn(self, cls, id, kwargs={}):
"""Add actor to the registry and start the actor's main method."""
try:
actor = symbol_by_name(cls)(
connection=self.connection, id=id, **kwargs)
if actor.id in self.registry:
warn('Actor id %r already exists', actor.id)
self._start_actor_consumer(actor)
debug('Actor registered: %s', cls)
return actor.id
except Exception as exc:
error('Cannot start actor: %r', exc, exc_info=True)
def stop_all(self):
self.agent.shutdown()
def reset(self):
debug('Resetting active actors')
for actor in values(self.registry):
if actor.consumer:
ignore_errors(self.connection, actor.consumer.cancel)
actor.connection = self.connection
self._start_actor_consumer(actor)
def kill(self, actor_id):
if actor_id not in self.registry:
raise Actor.Next()
else:
actor = self.registry.pop(actor_id)
if actor.consumer and actor.consumer.channel:
ignore_errors(self.connection, actor.consumer.cancel)
def select(self, cls):
for key, val in items(self.registry):
if qualname(val.__class__) == cls:
return key
# delegate to next agent.
raise Actor.Next()
def _shutdown(self, cancel=True, close=True, clear=True):
try:
for actor in values(self.registry):
if actor and actor.consumer:
if cancel:
ignore_errors(self.connection,
actor.consumer.cancel)
if close and actor.consumer.channel:
ignore_errors(self.connection,
actor.consumer.channel.close)
finally:
if clear:
self.registry.clear()
def __init__(self, connection, id=None):
self.registry = {}
Actor.__init__(self, connection=connection, id=id, agent=self)
def spawn_group(self, group, cls, n=1, nowait=False):
return self.spawn(
group, {'act_type': qualname(cls), 'number': n}, nowait)
def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
"""
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs)
def kill(self, actor_id, nowait=False):
return self.scatter('kill', {'actor_id': actor_id},
nowait=nowait)
# ------------------------------------------------------------
# Control methods. To be invoked locally
# ------------------------------------------------------------
def start(self):
debug('Starting agent %s', self.id)
consumer = self.Consumer(self.connection.channel())
consumer.consume()
self.state.reset()
def stop(self):
debug('Stopping agent %s', self.id)
self.state._shutdown(clear=False)
def shutdown(self):
debug('Shutdown agent %s', self.id)
self.state._shutdown(cancel=False)
def process_message(self, actor, body, message):
"""Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`.
"""
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message)
def is_green(self):
return self.pool is not None and self.pool.is_green
def get_default_scatter_limit(self):
return None
|
celery/cell | cell/agents.py | dAgent.process_message | python | def process_message(self, actor, body, message):
if actor is not self and self.is_green():
self.pool.spawn_n(actor._on_message, body, message)
else:
if not self.is_green() and message.properties.get('reply_to'):
warn('Starting a blocking call (%s) on actor (%s) '
'when greenlets are disabled.',
itemgetter('method')(body), actor.__class__)
actor._on_message(body, message) | Process actor message depending depending on the the worker settings.
If greenlets are enabled in the worker, the actor message is processed
in a greenlet from the greenlet pool,
Otherwise, the message is processed by the same thread.
The method is invoked from the callback `cell.actors.Actor.on_message`
upon receiving of a message.
:keyword actor: instance of :class:`Actor` or its derivative.
The actor instance to process the message.
For the full list of arguments see
:meth:`cell.actors.Actor._on_message`. | train | https://github.com/celery/cell/blob/c7f9b3a0c11ae3429eacb4114279cf2614e94a48/cell/agents.py#L164-L187 | [
"def _on_message(self, body, message):\n \"\"\"What to do when a message is received.\n\n This is a kombu consumer callback taking the standard\n ``body`` and ``message`` arguments.\n\n Note that if the properties of the message contains\n a value for ``reply_to`` then a proper implementation\n is... | class dAgent(Actor):
types = (ACTOR_TYPE.RR, ACTOR_TYPE.SCATTER, ACTOR_TYPE.DIRECT)
MAX_ACTORS = 2
class state:
def __init__(self):
self.registry = {}
def _start_actor_consumer(self, actor):
actor.consumer = actor.Consumer(self.connection.channel())
actor.consumer.consume()
self.registry[actor.id] = actor
actor.agent = weakref.proxy(self.agent)
actor.on_agent_ready()
def spawn(self, cls, id, kwargs={}):
"""Add actor to the registry and start the actor's main method."""
try:
actor = symbol_by_name(cls)(
connection=self.connection, id=id, **kwargs)
if actor.id in self.registry:
warn('Actor id %r already exists', actor.id)
self._start_actor_consumer(actor)
debug('Actor registered: %s', cls)
return actor.id
except Exception as exc:
error('Cannot start actor: %r', exc, exc_info=True)
def stop_all(self):
self.agent.shutdown()
def reset(self):
debug('Resetting active actors')
for actor in values(self.registry):
if actor.consumer:
ignore_errors(self.connection, actor.consumer.cancel)
actor.connection = self.connection
self._start_actor_consumer(actor)
def kill(self, actor_id):
if actor_id not in self.registry:
raise Actor.Next()
else:
actor = self.registry.pop(actor_id)
if actor.consumer and actor.consumer.channel:
ignore_errors(self.connection, actor.consumer.cancel)
def select(self, cls):
for key, val in items(self.registry):
if qualname(val.__class__) == cls:
return key
# delegate to next agent.
raise Actor.Next()
def _shutdown(self, cancel=True, close=True, clear=True):
try:
for actor in values(self.registry):
if actor and actor.consumer:
if cancel:
ignore_errors(self.connection,
actor.consumer.cancel)
if close and actor.consumer.channel:
ignore_errors(self.connection,
actor.consumer.channel.close)
finally:
if clear:
self.registry.clear()
def __init__(self, connection, id=None):
self.registry = {}
Actor.__init__(self, connection=connection, id=id, agent=self)
def spawn_group(self, group, cls, n=1, nowait=False):
return self.spawn(
group, {'act_type': qualname(cls), 'number': n}, nowait)
def spawn(self, cls, kwargs={}, nowait=False):
"""Spawn a new actor on a celery worker by sending
a remote command to the worker.
:param cls: the name of the :class:`~.cell.actors.Actor` class or its
derivative.
:keyword kwargs: The keyword arguments to pass on to
actor __init__ (a :class:`dict`)
:keyword nowait: If set to True (default) the call waits for the
result of spawning the actor. if False, the spawning
is asynchronous.
:returns :class:`~.cell.actors.ActorProxy`:,
holding the id of the spawned actor.
"""
actor_id = uuid()
if str(qualname(cls)) == '__builtin__.unicode':
name = cls
else:
name = qualname(cls)
res = self.call('spawn', {'cls': name, 'id': actor_id,
'kwargs': kwargs},
type=ACTOR_TYPE.RR, nowait=nowait)
return ActorProxy(name, actor_id, res, agent=self,
connection=self.connection, **kwargs)
def select(self, cls, **kwargs):
"""Get the id of already spawned actor
:keyword actor: the name of the :class:`Actor` class
"""
name = qualname(cls)
id = first_reply(
self.scatter('select', {'cls': name}, limit=1), cls)
return ActorProxy(name, id, agent=self,
connection=self.connection, **kwargs)
def kill(self, actor_id, nowait=False):
return self.scatter('kill', {'actor_id': actor_id},
nowait=nowait)
# ------------------------------------------------------------
# Control methods. To be invoked locally
# ------------------------------------------------------------
def start(self):
debug('Starting agent %s', self.id)
consumer = self.Consumer(self.connection.channel())
consumer.consume()
self.state.reset()
def stop(self):
debug('Stopping agent %s', self.id)
self.state._shutdown(clear=False)
def shutdown(self):
debug('Shutdown agent %s', self.id)
self.state._shutdown(cancel=False)
def is_green(self):
return self.pool is not None and self.pool.is_green
def get_default_scatter_limit(self):
return None
|
dr-leo/pandaSDMX | pandasdmx/utils/anynamedtuple.py | namedtuple | python | def namedtuple(typename, field_names, verbose=False, rename=False):
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split()
field_names = list(map(str, field_names))
typename = str(typename)
for name in [typename] + field_names:
if type(name) != str:
raise TypeError('Type names and field names must be strings')
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if not _isidentifier(typename):
raise ValueError('Type names must be valid '
'identifiers: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
arg_names = ['_' + str(i) for i in range(len(field_names))]
# Fill-in the class template
class_definition = _class_template.format(
typename=typename,
field_names=tuple(field_names),
num_fields=len(field_names),
arg_list=repr(tuple(arg_names)).replace("'", "")[1:-1],
repr_fmt=', '.join(_repr_template.format(name=name)
for name in field_names),
field_defs='\n'.join(_field_template.format(index=index, name=name)
for index, name in enumerate(field_names) if _isidentifier(name))
)
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename)
exec(class_definition, namespace)
result = namespace[typename]
result._source = class_definition
if verbose:
print(result._source)
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(
1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result | Returns a new subclass of tuple with named fields.
This is a patched version of collections.namedtuple from the stdlib.
Unlike the latter, it accepts non-identifier strings as field names.
All values are accessible through dict syntax. Fields whose names are
identifiers are also accessible via attribute syntax as in ordinary namedtuples, alongside traditional
indexing. This feature is needed as SDMX allows field names
to contain '-'.
>>> Point = namedtuple('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22) | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/utils/anynamedtuple.py#L89-L172 | [
"def _isidentifier3(name):\n return name.isidentifier()\n",
"def _isidentifier2(name):\n if (name[0].isdigit()\n or [c for c in name if not (c.isalnum() or c == '_')]):\n return False\n else:\n return True\n"
] | import sys as _sys
from operator import itemgetter as _itemgetter, eq as _eq
from keyword import iskeyword as _iskeyword
# 2to3 helpers
def _isidentifier3(name):
return name.isidentifier()
def _isidentifier2(name):
if (name[0].isdigit()
or [c for c in name if not (c.isalnum() or c == '_')]):
return False
else:
return True
if _sys.version.startswith('3'):
_isidentifier = _isidentifier3
else:
_isidentifier = _isidentifier2
##########################################################################
# namedtuple
##########################################################################
_class_template = """\
from pandasdmx.utils import str_type
from operator import itemgetter as _itemgetter
from collections import OrderedDict
class {typename}(tuple):
'{typename}({arg_list})'
__slots__ = ()
_fields = {field_names!r}
def __new__(_cls, {arg_list}):
'Create new instance of {typename}({arg_list})'
return tuple.__new__(_cls, ({arg_list}))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new {typename} object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != {num_fields:d}:
raise TypeError('Expected {num_fields:d} arguments, got %d' % len(result))
return result
def _replace(_self, **kwds):
'Return a new {typename} object replacing specified fields with new values'
result = _self._make(map(kwds.pop, {field_names!r}, _self))
if kwds:
raise ValueError('Got unexpected field names: %r' % list(kwds))
return result
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '({repr_fmt})' % self
def _asdict(self):
'Return a new OrderedDict which maps field names to their values.'
return OrderedDict(zip(self._fields, self))
def __getnewargs__(self):
'Return self as a plain tuple. Used by copy and pickle.'
return tuple(self)
def __getitem__(self, key):
if isinstance(key, str_type):
return super({typename}, self).__getitem__(self._fields.index(key))
else:
return super({typename}, self).__getitem__(key)
{field_defs}
"""
_repr_template = '{name}=%r'
_field_template = '''\
{name} = property(_itemgetter({index:d}), doc='Alias for field number {index:d}')
'''
|
dr-leo/pandaSDMX | pandasdmx/reader/sdmxjson.py | Reader.write_source | python | def write_source(self, filename):
'''
Save source to file by calling `write` on the root element.
'''
with open(filename, 'w') as fp:
return json.dump(self.message._elem, fp, indent=4, sort_keys=True) | Save source to file by calling `write` on the root element. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/reader/sdmxjson.py#L86-L91 | null | class Reader(BaseReader):
"""
Read SDMXJSON 2.1 and expose it as instances from pandasdmx.model
"""
def read_as_str(self, name, sdmxobj, first_only=True):
result = self._paths[name](sdmxobj._elem)
if result:
if first_only:
return result[0].value
else:
return [r.value for r in result]
def initialize(self, source):
tree = json.load(source)
# pre-fetch some structures for efficient use in series and obs
a = tree['structure']['attributes']
self._dataset_attrib = a['dataSet']
self._series_attrib = a['series']
self._obs_attrib = a['observation']
d = tree['structure']['dimensions']
self._dataset_dim = d.get('dataSet', [])
self._series_dim = d['series']
self._obs_dim = d['observation']
self._dataset_dim_key = {dim['keyPosition']: dim['id']
for dim in self._dataset_dim}
self._dataset_dim_values = {dim['keyPosition']: dim['values'][0]['id']
for dim in self._dataset_dim}
if self._series_dim:
self._key_len = len(self._dataset_dim) + len(self._series_dim)
# Map keyPositions of dimensions at series level to dimension IDs, like with dataset-level dims above.
# In case of cross-sectional dataset, the only dimension at series level has no
# keyPosition, eg. TIME_PERIOD. Instead, the keyPosition of the dim at observation
# is used to fill the gap.
self._series_dim_key = {dim.get('keyPosition',
self._obs_dim[0].get('keyPosition')):
dim['id'] for dim in self._series_dim}
self.SeriesKeyTuple = namedtuple_factory('SeriesKeyTuple',
(self._dataset_dim_key.get(i) or self._series_dim_key.get(i)
for i in range(self._key_len)))
else:
# Dataset must be flat
self._key_len = len(self._dataset_dim) + len(self._obs_dim)
self.obs_attr_id = [d['id'] for d in self._obs_attrib]
# init message instance
cls = model.DataMessage
self.message = cls(self, tree)
return self.message
# flag to prevent multiple compiling. See BaseReader.__init__
_compiled = False
def write_source(self, filename):
'''
Save source to file by calling `write` on the root element.
'''
with open(filename, 'w') as fp:
return json.dump(self.message._elem, fp, indent=4, sort_keys=True)
_paths = {
# 'footer_text': 'com:Text/text()',
# 'footer_code': '@code',
# 'footer_severity': '@severity',
# 'dataflow_from_msg': 'mes:Structures/str:Dataflows',
# 'constraint_attachment': 'str:ConstraintAttachment',
# 'include': '@include',
# 'id': '@id',
# 'urn': '@urn',
# 'url': '@url',
# 'uri': '@uri',
# 'agencyID': '@agencyID',
# 'maintainable_parent_id': '@maintainableParentID',
# 'value': 'com:Value/text()',
'headerID': '$.header.id',
'header_prepared': '$.header.prepared',
'header_sender': '$.header.sender.*',
# 'header_receiver': 'mes:Receiver/@*',
# 'assignment_status': '@assignmentStatus',
# 'error': 'mes:error/@*',
# 'ref_version': '@version',
# 'concept_id': 'str:ConceptIdentity',
# 'position': '@position',
# 'isfinal': '@isfinal',
# 'ref_package': '@package',
# 'ref_class': '@class',
# 'ref_target': 'str:Target',
# 'ref_source': 'str:Source',
# 'ref_structure': 'str:Structure',
# 'annotationtype': 'com:AnnotationType/text()',
# 'generic_obs_path': 'gen:Obs',
# 'obs_key_id_path': 'gen:ObsKey/gen:Value/@id',
# 'obs_key_values_path': 'gen:ObsKey/gen:Value/@value',
# 'series_key_values_path': 'gen:SeriesKey/gen:Value/@value',
# 'series_key_id_path': 'gen:SeriesKey/gen:Value/@id',
# 'generic_series_dim_path': 'gen:ObsDimension/@value',
# 'group_key_values_path': 'gen:GroupKey/gen:Value/@value',
# 'group_key_id_path': 'gen:GroupKey/gen:Value/@id',
# 'obs_value_path': 'gen:ObsValue/@value',
# 'attr_id_path': 'gen:Attributes/gen:Value/@id',
# 'attr_values_path': 'gen:Attributes/gen:Value/@value',
# model.Code: 'str:Code',
# model.Categorisation: 'str:Categorisation',
# model.CategoryScheme: 'mes:Structures/str:CategorySchemes/str:CategoryScheme',
# model.DataStructureDefinition: 'mes:Structures/str:DataStructures/str:DataStructure',
# model.DataflowDefinition: 'str:Dataflow',
# model.ConceptScheme: 'mes:Structures/str:Concepts/str:ConceptScheme',
# model.ContentConstraint: 'mes:Structures/str:Constraints/str:ContentConstraint',
# model.Concept: 'str:Concept',
# model.Codelist: 'mes:Structures/str:Codelists/str:Codelist',
# model.Categorisations: 'mes:Structures/str:Categorisations',
model.Footer: 'footer.message',
# model.Category: 'str:Category',
# model.DimensionDescriptor: 'str:DataStructureComponents/str:DimensionList',
# model.Dimension: 'str:Dimension',
# model.TimeDimension: 'str:TimeDimension',
# model.MeasureDimension: 'str:MeasureDimension',
# model.MeasureDescriptor: 'str:DataStructureComponents/str:MeasureList',
# model.PrimaryMeasure: 'str:PrimaryMeasure',
# model.AttributeDescriptor: 'str:DataStructureComponents/str:AttributeList',
# model.DataAttribute: 'str:Attribute',
# model.CubeRegion: 'str:CubeRegion',
# model.KeyValue: 'com:KeyValue',
# model.Ref: 'Ref',
model.Header: '$.header',
# model.Annotation: 'com:Annotations/com:Annotation',
# model.Group: 'gen:Group',
# model.Series: 'gen:Series',
model.DataSet: '$.dataSets[0]',
# 'int_str_names': './*[local-name() = $name]/@xml:lang',
# model.Representation: 'str:LocalRepresentation',
# 'int_str_values': './*[local-name() = $name]/text()',
# 'enumeration': 'str:Enumeration',
# 'texttype': 'str:TextFormat/@textType',
# 'maxlength': 'str:TextFormat/@maxLength',
# # need this? It is just a non-offset Ref
# 'attr_relationship': '*/Ref/@id',
}
@classmethod
def _compile_paths(cls):
for key, path in cls._paths.items():
cls._paths[key] = XPath(path)
def international_str(self, name, sdmxobj):
'''
return DictLike of xml:lang attributes. If node has no attributes,
assume that language is 'en'.
'''
# Get language tokens like 'en', 'fr'...
elem_attrib = self._paths['int_str_names'](sdmxobj._elem, name=name)
values = self._paths['int_str_values'](sdmxobj._elem, name=name)
# Unilingual strings have no attributes. Assume 'en' instead.
if not elem_attrib:
elem_attrib = ['en']
return DictLike(zip(elem_attrib, values))
def header_error(self, sdmxobj):
try:
return DictLike(sdmxobj._elem.Error.attrib)
except AttributeError:
return None
def dim_at_obs(self, sdmxobj):
if len(self._obs_dim) > 1:
return 'AllDimensions'
else:
return self._obs_dim[0]['id']
def structured_by(self, sdmxobj):
return None # complete this
# Types for generic observations
_ObsTuple = namedtuple_factory(
'GenericObservation', ('key', 'value', 'attrib'))
_SeriesObsTuple = namedtuple_factory(
'SeriesObservation', ('dim', 'value', 'attrib'))
# Operators
getitem0 = itemgetter(0)
getitem_key = itemgetter('_key')
def iter_generic_obs(self, sdmxobj, with_value, with_attributes):
# Make type namedtuple for obs_key. It must be
# merged with any dimension values at dataset level maintaining the
# key position order.
# Note that the measure dimension (such as TIME_PERIOD) has no key position.
# We fill this gap by injecting the highest key position.
_obs_dim_key = {dim.get('keyPosition', self._key_len - 1): dim['id']
for dim in self._obs_dim}
_GenericObsKey = namedtuple_factory('GenericObservationKey',
(self._dataset_dim_key.get(d)
or _obs_dim_key.get(d)
for d in range(self._key_len)))
obs_l = sorted(sdmxobj._elem.value['observations'].items(),
key=self.getitem0)
for dim, value in obs_l:
# Construct the key for this observation
key_idx = [int(i) for i in dim.split(':')]
obs_key_values = [d['values'][i]['id'] for i, d in
zip(key_idx, self._obs_dim)]
obs_key = _GenericObsKey._make(self._dataset_dim_values.get(d)
or obs_key_values.pop(0)
for d in range(self._key_len))
# Read the value
obs_value = value[0] if with_value else None
# Read any attributes
if with_attributes and len(value) > 1:
obs_attr_idx = value[1:]
obs_attr_raw = [(d['id'],
d['values'][i].get('id'))
for i, d in zip(obs_attr_idx, self._obs_attrib)]
if obs_attr_raw:
obs_attr_id, obs_attr_values = zip(*obs_attr_raw)
obs_attr_type = namedtuple_factory(
'ObsAttributes', obs_attr_id)
obs_attr = obs_attr_type(*obs_attr_values)
else:
obs_attr = None
else:
obs_attr = None
yield self._SeriesObsTuple(obs_key, obs_value, obs_attr)
def generic_series(self, sdmxobj):
for key, series in sdmxobj._elem.value['series'].items():
series['_key'] = key
for series in sorted(sdmxobj._elem.value['series'].values(), key=self.getitem_key):
yield model.Series(self, series, dataset=sdmxobj)
def generic_groups(self, sdmxobj):
return []
def series_key(self, sdmxobj):
key_idx = [int(i) for i in sdmxobj._elem['_key'].split(':')]
series_key_values = [d['values'][i]['id'] for i, d in
zip(key_idx, self._series_dim)]
full_key_values = [self._dataset_dim_values.get(d)
or series_key_values.pop(0)
for d in range(self._key_len)]
return self.SeriesKeyTuple._make(full_key_values)
def group_key(self, sdmxobj):
group_key_id = self._paths['group_key_id_path'](sdmxobj._elem)
group_key_values = self._paths[
'group_key_values_path'](sdmxobj._elem)
GroupKeyTuple = namedtuple_factory('GroupKey', group_key_id)
return GroupKeyTuple._make(group_key_values)
def dataset_attrib(self, sdmxobj):
value_idx = sdmxobj._elem.value.get('attributes')
if value_idx:
attrib_list = [(a['id'],
a['values'][i].get('id', a['values'][i]['name']))
for i, a in zip(value_idx, self._dataset_attrib) if i is not None]
attrib_ids, attrib_values = zip(*attrib_list)
return namedtuple_factory('Attrib', attrib_ids)(*attrib_values)
def series_attrib(self, sdmxobj):
value_idx = sdmxobj._elem.get('attributes')
if value_idx:
attrib_list = [(a['id'],
a['values'][i].get('id', a['values'][i]['name']))
for i, a in zip(value_idx, self._series_attrib) if i is not None]
attrib_ids, attrib_values = zip(*attrib_list)
return namedtuple_factory('Attrib', attrib_ids)(*attrib_values)
def iter_generic_series_obs(self, sdmxobj, with_value, with_attributes,
reverse_obs=False):
obs_l = sorted(sdmxobj._elem['observations'].items(),
key=self.getitem0, reverse=reverse_obs)
for obs in obs_l:
# value for dim at obs, e.g. '2014' for time series.
# As this method is called only when each obs has but one dimension, we
# it is at index 0.
obs_dim_value = self._obs_dim[0]['values'][int(obs[0])]['id']
obs_value = obs[1][0] if with_value else None
if with_attributes and len(obs[1]) > 1:
obs_attr_idx = obs[1][1:]
obs_attr_raw = [(d['id'],
d['values'][i].get('id'))
for i, d in zip(obs_attr_idx, self._obs_attrib) if i is not None]
if obs_attr_raw:
obs_attr_id, obs_attr_values = zip(*obs_attr_raw)
obs_attr_type = namedtuple_factory(
'ObsAttributes', obs_attr_id)
obs_attr = obs_attr_type(*obs_attr_values)
else:
obs_attr = None
else:
obs_attr = None
yield self._SeriesObsTuple(obs_dim_value, obs_value, obs_attr)
|
dr-leo/pandaSDMX | pandasdmx/reader/sdmxjson.py | Reader.international_str | python | def international_str(self, name, sdmxobj):
'''
return DictLike of xml:lang attributes. If node has no attributes,
assume that language is 'en'.
'''
# Get language tokens like 'en', 'fr'...
elem_attrib = self._paths['int_str_names'](sdmxobj._elem, name=name)
values = self._paths['int_str_values'](sdmxobj._elem, name=name)
# Unilingual strings have no attributes. Assume 'en' instead.
if not elem_attrib:
elem_attrib = ['en']
return DictLike(zip(elem_attrib, values)) | return DictLike of xml:lang attributes. If node has no attributes,
assume that language is 'en'. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/reader/sdmxjson.py#L177-L188 | null | class Reader(BaseReader):
"""
Read SDMXJSON 2.1 and expose it as instances from pandasdmx.model
"""
def read_as_str(self, name, sdmxobj, first_only=True):
result = self._paths[name](sdmxobj._elem)
if result:
if first_only:
return result[0].value
else:
return [r.value for r in result]
def initialize(self, source):
tree = json.load(source)
# pre-fetch some structures for efficient use in series and obs
a = tree['structure']['attributes']
self._dataset_attrib = a['dataSet']
self._series_attrib = a['series']
self._obs_attrib = a['observation']
d = tree['structure']['dimensions']
self._dataset_dim = d.get('dataSet', [])
self._series_dim = d['series']
self._obs_dim = d['observation']
self._dataset_dim_key = {dim['keyPosition']: dim['id']
for dim in self._dataset_dim}
self._dataset_dim_values = {dim['keyPosition']: dim['values'][0]['id']
for dim in self._dataset_dim}
if self._series_dim:
self._key_len = len(self._dataset_dim) + len(self._series_dim)
# Map keyPositions of dimensions at series level to dimension IDs, like with dataset-level dims above.
# In case of cross-sectional dataset, the only dimension at series level has no
# keyPosition, eg. TIME_PERIOD. Instead, the keyPosition of the dim at observation
# is used to fill the gap.
self._series_dim_key = {dim.get('keyPosition',
self._obs_dim[0].get('keyPosition')):
dim['id'] for dim in self._series_dim}
self.SeriesKeyTuple = namedtuple_factory('SeriesKeyTuple',
(self._dataset_dim_key.get(i) or self._series_dim_key.get(i)
for i in range(self._key_len)))
else:
# Dataset must be flat
self._key_len = len(self._dataset_dim) + len(self._obs_dim)
self.obs_attr_id = [d['id'] for d in self._obs_attrib]
# init message instance
cls = model.DataMessage
self.message = cls(self, tree)
return self.message
# flag to prevent multiple compiling. See BaseReader.__init__
_compiled = False
def write_source(self, filename):
'''
Save source to file by calling `write` on the root element.
'''
with open(filename, 'w') as fp:
return json.dump(self.message._elem, fp, indent=4, sort_keys=True)
_paths = {
# 'footer_text': 'com:Text/text()',
# 'footer_code': '@code',
# 'footer_severity': '@severity',
# 'dataflow_from_msg': 'mes:Structures/str:Dataflows',
# 'constraint_attachment': 'str:ConstraintAttachment',
# 'include': '@include',
# 'id': '@id',
# 'urn': '@urn',
# 'url': '@url',
# 'uri': '@uri',
# 'agencyID': '@agencyID',
# 'maintainable_parent_id': '@maintainableParentID',
# 'value': 'com:Value/text()',
'headerID': '$.header.id',
'header_prepared': '$.header.prepared',
'header_sender': '$.header.sender.*',
# 'header_receiver': 'mes:Receiver/@*',
# 'assignment_status': '@assignmentStatus',
# 'error': 'mes:error/@*',
# 'ref_version': '@version',
# 'concept_id': 'str:ConceptIdentity',
# 'position': '@position',
# 'isfinal': '@isfinal',
# 'ref_package': '@package',
# 'ref_class': '@class',
# 'ref_target': 'str:Target',
# 'ref_source': 'str:Source',
# 'ref_structure': 'str:Structure',
# 'annotationtype': 'com:AnnotationType/text()',
# 'generic_obs_path': 'gen:Obs',
# 'obs_key_id_path': 'gen:ObsKey/gen:Value/@id',
# 'obs_key_values_path': 'gen:ObsKey/gen:Value/@value',
# 'series_key_values_path': 'gen:SeriesKey/gen:Value/@value',
# 'series_key_id_path': 'gen:SeriesKey/gen:Value/@id',
# 'generic_series_dim_path': 'gen:ObsDimension/@value',
# 'group_key_values_path': 'gen:GroupKey/gen:Value/@value',
# 'group_key_id_path': 'gen:GroupKey/gen:Value/@id',
# 'obs_value_path': 'gen:ObsValue/@value',
# 'attr_id_path': 'gen:Attributes/gen:Value/@id',
# 'attr_values_path': 'gen:Attributes/gen:Value/@value',
# model.Code: 'str:Code',
# model.Categorisation: 'str:Categorisation',
# model.CategoryScheme: 'mes:Structures/str:CategorySchemes/str:CategoryScheme',
# model.DataStructureDefinition: 'mes:Structures/str:DataStructures/str:DataStructure',
# model.DataflowDefinition: 'str:Dataflow',
# model.ConceptScheme: 'mes:Structures/str:Concepts/str:ConceptScheme',
# model.ContentConstraint: 'mes:Structures/str:Constraints/str:ContentConstraint',
# model.Concept: 'str:Concept',
# model.Codelist: 'mes:Structures/str:Codelists/str:Codelist',
# model.Categorisations: 'mes:Structures/str:Categorisations',
model.Footer: 'footer.message',
# model.Category: 'str:Category',
# model.DimensionDescriptor: 'str:DataStructureComponents/str:DimensionList',
# model.Dimension: 'str:Dimension',
# model.TimeDimension: 'str:TimeDimension',
# model.MeasureDimension: 'str:MeasureDimension',
# model.MeasureDescriptor: 'str:DataStructureComponents/str:MeasureList',
# model.PrimaryMeasure: 'str:PrimaryMeasure',
# model.AttributeDescriptor: 'str:DataStructureComponents/str:AttributeList',
# model.DataAttribute: 'str:Attribute',
# model.CubeRegion: 'str:CubeRegion',
# model.KeyValue: 'com:KeyValue',
# model.Ref: 'Ref',
model.Header: '$.header',
# model.Annotation: 'com:Annotations/com:Annotation',
# model.Group: 'gen:Group',
# model.Series: 'gen:Series',
model.DataSet: '$.dataSets[0]',
# 'int_str_names': './*[local-name() = $name]/@xml:lang',
# model.Representation: 'str:LocalRepresentation',
# 'int_str_values': './*[local-name() = $name]/text()',
# 'enumeration': 'str:Enumeration',
# 'texttype': 'str:TextFormat/@textType',
# 'maxlength': 'str:TextFormat/@maxLength',
# # need this? It is just a non-offset Ref
# 'attr_relationship': '*/Ref/@id',
}
@classmethod
def _compile_paths(cls):
for key, path in cls._paths.items():
cls._paths[key] = XPath(path)
def international_str(self, name, sdmxobj):
'''
return DictLike of xml:lang attributes. If node has no attributes,
assume that language is 'en'.
'''
# Get language tokens like 'en', 'fr'...
elem_attrib = self._paths['int_str_names'](sdmxobj._elem, name=name)
values = self._paths['int_str_values'](sdmxobj._elem, name=name)
# Unilingual strings have no attributes. Assume 'en' instead.
if not elem_attrib:
elem_attrib = ['en']
return DictLike(zip(elem_attrib, values))
def header_error(self, sdmxobj):
try:
return DictLike(sdmxobj._elem.Error.attrib)
except AttributeError:
return None
def dim_at_obs(self, sdmxobj):
if len(self._obs_dim) > 1:
return 'AllDimensions'
else:
return self._obs_dim[0]['id']
def structured_by(self, sdmxobj):
return None # complete this
# Types for generic observations
_ObsTuple = namedtuple_factory(
'GenericObservation', ('key', 'value', 'attrib'))
_SeriesObsTuple = namedtuple_factory(
'SeriesObservation', ('dim', 'value', 'attrib'))
# Operators
getitem0 = itemgetter(0)
getitem_key = itemgetter('_key')
def iter_generic_obs(self, sdmxobj, with_value, with_attributes):
# Make type namedtuple for obs_key. It must be
# merged with any dimension values at dataset level maintaining the
# key position order.
# Note that the measure dimension (such as TIME_PERIOD) has no key position.
# We fill this gap by injecting the highest key position.
_obs_dim_key = {dim.get('keyPosition', self._key_len - 1): dim['id']
for dim in self._obs_dim}
_GenericObsKey = namedtuple_factory('GenericObservationKey',
(self._dataset_dim_key.get(d)
or _obs_dim_key.get(d)
for d in range(self._key_len)))
obs_l = sorted(sdmxobj._elem.value['observations'].items(),
key=self.getitem0)
for dim, value in obs_l:
# Construct the key for this observation
key_idx = [int(i) for i in dim.split(':')]
obs_key_values = [d['values'][i]['id'] for i, d in
zip(key_idx, self._obs_dim)]
obs_key = _GenericObsKey._make(self._dataset_dim_values.get(d)
or obs_key_values.pop(0)
for d in range(self._key_len))
# Read the value
obs_value = value[0] if with_value else None
# Read any attributes
if with_attributes and len(value) > 1:
obs_attr_idx = value[1:]
obs_attr_raw = [(d['id'],
d['values'][i].get('id'))
for i, d in zip(obs_attr_idx, self._obs_attrib)]
if obs_attr_raw:
obs_attr_id, obs_attr_values = zip(*obs_attr_raw)
obs_attr_type = namedtuple_factory(
'ObsAttributes', obs_attr_id)
obs_attr = obs_attr_type(*obs_attr_values)
else:
obs_attr = None
else:
obs_attr = None
yield self._SeriesObsTuple(obs_key, obs_value, obs_attr)
def generic_series(self, sdmxobj):
for key, series in sdmxobj._elem.value['series'].items():
series['_key'] = key
for series in sorted(sdmxobj._elem.value['series'].values(), key=self.getitem_key):
yield model.Series(self, series, dataset=sdmxobj)
def generic_groups(self, sdmxobj):
return []
def series_key(self, sdmxobj):
key_idx = [int(i) for i in sdmxobj._elem['_key'].split(':')]
series_key_values = [d['values'][i]['id'] for i, d in
zip(key_idx, self._series_dim)]
full_key_values = [self._dataset_dim_values.get(d)
or series_key_values.pop(0)
for d in range(self._key_len)]
return self.SeriesKeyTuple._make(full_key_values)
def group_key(self, sdmxobj):
group_key_id = self._paths['group_key_id_path'](sdmxobj._elem)
group_key_values = self._paths[
'group_key_values_path'](sdmxobj._elem)
GroupKeyTuple = namedtuple_factory('GroupKey', group_key_id)
return GroupKeyTuple._make(group_key_values)
def dataset_attrib(self, sdmxobj):
value_idx = sdmxobj._elem.value.get('attributes')
if value_idx:
attrib_list = [(a['id'],
a['values'][i].get('id', a['values'][i]['name']))
for i, a in zip(value_idx, self._dataset_attrib) if i is not None]
attrib_ids, attrib_values = zip(*attrib_list)
return namedtuple_factory('Attrib', attrib_ids)(*attrib_values)
def series_attrib(self, sdmxobj):
value_idx = sdmxobj._elem.get('attributes')
if value_idx:
attrib_list = [(a['id'],
a['values'][i].get('id', a['values'][i]['name']))
for i, a in zip(value_idx, self._series_attrib) if i is not None]
attrib_ids, attrib_values = zip(*attrib_list)
return namedtuple_factory('Attrib', attrib_ids)(*attrib_values)
def iter_generic_series_obs(self, sdmxobj, with_value, with_attributes,
reverse_obs=False):
obs_l = sorted(sdmxobj._elem['observations'].items(),
key=self.getitem0, reverse=reverse_obs)
for obs in obs_l:
# value for dim at obs, e.g. '2014' for time series.
# As this method is called only when each obs has but one dimension, we
# it is at index 0.
obs_dim_value = self._obs_dim[0]['values'][int(obs[0])]['id']
obs_value = obs[1][0] if with_value else None
if with_attributes and len(obs[1]) > 1:
obs_attr_idx = obs[1][1:]
obs_attr_raw = [(d['id'],
d['values'][i].get('id'))
for i, d in zip(obs_attr_idx, self._obs_attrib) if i is not None]
if obs_attr_raw:
obs_attr_id, obs_attr_values = zip(*obs_attr_raw)
obs_attr_type = namedtuple_factory(
'ObsAttributes', obs_attr_id)
obs_attr = obs_attr_type(*obs_attr_values)
else:
obs_attr = None
else:
obs_attr = None
yield self._SeriesObsTuple(obs_dim_value, obs_value, obs_attr)
|
dr-leo/pandaSDMX | pandasdmx/remote.py | REST.get | python | def get(self, url, fromfile=None, params={}, headers={}):
'''Get SDMX message from REST service or local file
Args:
url(str): URL of the REST service without the query part
If None, fromfile must be set. Default is None
params(dict): will be appended as query part to the URL after a '?'
fromfile(str): path to SDMX file containing an SDMX message.
It will be passed on to the
reader for parsing.
headers(dict): http headers. Overwrite instance-wide headers.
Default is {}
Returns:
tuple: three objects:
0. file-like object containing the SDMX message
1. the complete URL, if any, including the query part
constructed from params
2. the status code
Raises:
HTTPError if SDMX service responded with
status code 401. Otherwise, the status code
is returned
'''
if fromfile:
try:
# Load data from local file
# json files must be opened in text mode, all others in binary as
# they may be zip files or xml.
if fromfile.endswith('.json'):
mode_str = 'r'
else:
mode_str = 'rb'
source = open(fromfile, mode_str)
except TypeError:
# so fromfile must be file-like
source = fromfile
final_url = resp_headers = status_code = None
else:
source, final_url, resp_headers, status_code = self.request(
url, params=params, headers=headers)
return source, final_url, resp_headers, status_code | Get SDMX message from REST service or local file
Args:
url(str): URL of the REST service without the query part
If None, fromfile must be set. Default is None
params(dict): will be appended as query part to the URL after a '?'
fromfile(str): path to SDMX file containing an SDMX message.
It will be passed on to the
reader for parsing.
headers(dict): http headers. Overwrite instance-wide headers.
Default is {}
Returns:
tuple: three objects:
0. file-like object containing the SDMX message
1. the complete URL, if any, including the query part
constructed from params
2. the status code
Raises:
HTTPError if SDMX service responded with
status code 401. Otherwise, the status code
is returned | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/remote.py#L55-L99 | [
"def request(self, url, params={}, headers={}):\n \"\"\"\n Retrieve SDMX messages.\n If needed, override in subclasses to support other data providers.\n\n :param url: The URL of the message.\n :type url: str\n :return: the xml data as file-like object\n \"\"\"\n # Generate current config. M... | class REST:
"""
Query SDMX resources via REST or from a file
The constructor accepts arbitrary keyword arguments that will be passed
to the requests.get function on each call. This makes the REST class somewhat similar to a requests.Session. E.g., proxies or
authorisation data needs only be provided once. The keyword arguments are
stored in self.config. Modify this dict to issue the next 'get' request with
changed arguments.
"""
max_size = 2 ** 24
'''upper bound for in-memory temp file. Larger files will be spooled from disc'''
def __init__(self, cache, http_cfg):
default_cfg = dict(stream=True, timeout=30.1)
for it in default_cfg.items():
http_cfg.setdefault(*it)
self.config = DictLike(http_cfg)
if cache:
requests_cache.install_cache(**cache)
def get(self, url, fromfile=None, params={}, headers={}):
'''Get SDMX message from REST service or local file
Args:
url(str): URL of the REST service without the query part
If None, fromfile must be set. Default is None
params(dict): will be appended as query part to the URL after a '?'
fromfile(str): path to SDMX file containing an SDMX message.
It will be passed on to the
reader for parsing.
headers(dict): http headers. Overwrite instance-wide headers.
Default is {}
Returns:
tuple: three objects:
0. file-like object containing the SDMX message
1. the complete URL, if any, including the query part
constructed from params
2. the status code
Raises:
HTTPError if SDMX service responded with
status code 401. Otherwise, the status code
is returned
'''
if fromfile:
try:
# Load data from local file
# json files must be opened in text mode, all others in binary as
# they may be zip files or xml.
if fromfile.endswith('.json'):
mode_str = 'r'
else:
mode_str = 'rb'
source = open(fromfile, mode_str)
except TypeError:
# so fromfile must be file-like
source = fromfile
final_url = resp_headers = status_code = None
else:
source, final_url, resp_headers, status_code = self.request(
url, params=params, headers=headers)
return source, final_url, resp_headers, status_code
def request(self, url, params={}, headers={}):
"""
Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object
"""
# Generate current config. Merge in any given headers
cur_config = self.config.copy()
if 'headers' in cur_config:
cur_config['headers'] = cur_config['headers'].copy()
cur_config['headers'].update(headers)
else:
cur_config['headers'] = headers
with closing(requests.get(url, params=params, **cur_config)) as response:
if response.status_code == requests.codes.OK:
# Prepare the temp file. xml content will be
# stored in a binary file, json in a textfile.
if (response.headers.get('Content-Type')
and ('json' in response.headers['Content-Type'])):
enc, fmode = response.encoding, 'w+t'
else:
enc, fmode = None, 'w+b'
# Create temp file ensuring 2to3 compatibility
if str_type == str: # we are on py3
source = STF(
max_size=self.max_size, mode=fmode, encoding=enc)
else:
# On py27 we must omit the 'encoding' kwarg
source = STF(max_size=self.max_size, mode=fmode)
for c in response.iter_content(chunk_size=1000000,
decode_unicode=bool(enc)):
source.write(c)
else:
source = None
code = int(response.status_code)
if 400 <= code <= 499:
raise response.raise_for_status()
return source, response.url, response.headers, code
|
dr-leo/pandaSDMX | pandasdmx/remote.py | REST.request | python | def request(self, url, params={}, headers={}):
"""
Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object
"""
# Generate current config. Merge in any given headers
cur_config = self.config.copy()
if 'headers' in cur_config:
cur_config['headers'] = cur_config['headers'].copy()
cur_config['headers'].update(headers)
else:
cur_config['headers'] = headers
with closing(requests.get(url, params=params, **cur_config)) as response:
if response.status_code == requests.codes.OK:
# Prepare the temp file. xml content will be
# stored in a binary file, json in a textfile.
if (response.headers.get('Content-Type')
and ('json' in response.headers['Content-Type'])):
enc, fmode = response.encoding, 'w+t'
else:
enc, fmode = None, 'w+b'
# Create temp file ensuring 2to3 compatibility
if str_type == str: # we are on py3
source = STF(
max_size=self.max_size, mode=fmode, encoding=enc)
else:
# On py27 we must omit the 'encoding' kwarg
source = STF(max_size=self.max_size, mode=fmode)
for c in response.iter_content(chunk_size=1000000,
decode_unicode=bool(enc)):
source.write(c)
else:
source = None
code = int(response.status_code)
if 400 <= code <= 499:
raise response.raise_for_status()
return source, response.url, response.headers, code | Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/remote.py#L101-L143 | null | class REST:
"""
Query SDMX resources via REST or from a file
The constructor accepts arbitrary keyword arguments that will be passed
to the requests.get function on each call. This makes the REST class somewhat similar to a requests.Session. E.g., proxies or
authorisation data needs only be provided once. The keyword arguments are
stored in self.config. Modify this dict to issue the next 'get' request with
changed arguments.
"""
max_size = 2 ** 24
'''upper bound for in-memory temp file. Larger files will be spooled from disc'''
def __init__(self, cache, http_cfg):
default_cfg = dict(stream=True, timeout=30.1)
for it in default_cfg.items():
http_cfg.setdefault(*it)
self.config = DictLike(http_cfg)
if cache:
requests_cache.install_cache(**cache)
def get(self, url, fromfile=None, params={}, headers={}):
'''Get SDMX message from REST service or local file
Args:
url(str): URL of the REST service without the query part
If None, fromfile must be set. Default is None
params(dict): will be appended as query part to the URL after a '?'
fromfile(str): path to SDMX file containing an SDMX message.
It will be passed on to the
reader for parsing.
headers(dict): http headers. Overwrite instance-wide headers.
Default is {}
Returns:
tuple: three objects:
0. file-like object containing the SDMX message
1. the complete URL, if any, including the query part
constructed from params
2. the status code
Raises:
HTTPError if SDMX service responded with
status code 401. Otherwise, the status code
is returned
'''
if fromfile:
try:
# Load data from local file
# json files must be opened in text mode, all others in binary as
# they may be zip files or xml.
if fromfile.endswith('.json'):
mode_str = 'r'
else:
mode_str = 'rb'
source = open(fromfile, mode_str)
except TypeError:
# so fromfile must be file-like
source = fromfile
final_url = resp_headers = status_code = None
else:
source, final_url, resp_headers, status_code = self.request(
url, params=params, headers=headers)
return source, final_url, resp_headers, status_code
def request(self, url, params={}, headers={}):
"""
Retrieve SDMX messages.
If needed, override in subclasses to support other data providers.
:param url: The URL of the message.
:type url: str
:return: the xml data as file-like object
"""
# Generate current config. Merge in any given headers
cur_config = self.config.copy()
if 'headers' in cur_config:
cur_config['headers'] = cur_config['headers'].copy()
cur_config['headers'].update(headers)
else:
cur_config['headers'] = headers
with closing(requests.get(url, params=params, **cur_config)) as response:
if response.status_code == requests.codes.OK:
# Prepare the temp file. xml content will be
# stored in a binary file, json in a textfile.
if (response.headers.get('Content-Type')
and ('json' in response.headers['Content-Type'])):
enc, fmode = response.encoding, 'w+t'
else:
enc, fmode = None, 'w+b'
# Create temp file ensuring 2to3 compatibility
if str_type == str: # we are on py3
source = STF(
max_size=self.max_size, mode=fmode, encoding=enc)
else:
# On py27 we must omit the 'encoding' kwarg
source = STF(max_size=self.max_size, mode=fmode)
for c in response.iter_content(chunk_size=1000000,
decode_unicode=bool(enc)):
source.write(c)
else:
source = None
code = int(response.status_code)
if 400 <= code <= 499:
raise response.raise_for_status()
return source, response.url, response.headers, code
|
dr-leo/pandaSDMX | pandasdmx/__init__.py | odo_register | python | def odo_register():
'''
Enable conversion of .sdmx files with odo (http://odo.readthedocs.org).
Adds conversion from sdmx to PD.DataFrame to odo graph.
Note that native discovery of sdmx files is not yet supported. odo will thus
convert to PD.DataFrame
and discover the data shape from there.
'''
logger.info('Registering with odo...')
import odo
from odo.utils import keywords
import pandas as PD
from toolz import keyfilter
import toolz.curried.operator as op
class PandaSDMX(object):
def __init__(self, uri):
self.uri = uri
@odo.resource.register(r'.*\.sdmx')
def resource_sdmx(uri, **kwargs):
return PandaSDMX(uri)
@odo.discover.register(PandaSDMX)
def _(sdmx):
return odo.discover(Request().get(fromfile=sdmx.uri).write())
@odo.convert.register(PD.DataFrame, PandaSDMX)
def convert_sdmx(sdmx, **kwargs):
write = Request().get(fromfile=sdmx.uri).write
return write(**keyfilter(op.contains(keywords(write)), kwargs))
logger.info('odo registration complete.') | Enable conversion of .sdmx files with odo (http://odo.readthedocs.org).
Adds conversion from sdmx to PD.DataFrame to odo graph.
Note that native discovery of sdmx files is not yet supported. odo will thus
convert to PD.DataFrame
and discover the data shape from there. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/__init__.py#L38-L70 | null | # encoding: utf-8
# pandaSDMX is licensed under the Apache 2.0 license a copy of which
# is included in the source distribution of pandaSDMX.
# This is notwithstanding any licenses of third-party software included in
# this distribution.
# (c) 2014-2016 Dr. Leo <fhaxbox66qgmail.com>
'''
pandaSDMX - a Python package for SDMX - Statistical Data and Metadata eXchange
'''
from pandasdmx.api import Request
import logging
__all__ = ['Request']
__version__ = '0.7.0'
def _init_logger():
logger = logging.getLogger('pandasdmx')
handler = logging.StreamHandler()
fmt = logging.Formatter(
'%(asctime)s %(name)s - %(levelname)s: %(message)s')
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
return logger
logger = _init_logger()
|
dr-leo/pandaSDMX | pandasdmx/utils/__init__.py | concat_namedtuples | python | def concat_namedtuples(*tup, **kwargs):
'''
Concatenate 2 or more namedtuples. The new namedtuple type
is provided by :class:`NamedTupleFactory`
return new namedtuple instance
'''
name = kwargs['name'] if 'name' in kwargs else None
# filter out empty elements
filtered = [i for i in filter(None, tup)]
if filtered:
if len(filtered) == 1:
return filtered[0]
else:
fields = chain(*(t._fields for t in filtered))
values = chain(*(t for t in filtered))
if not name:
name = 'SDMXNamedTuple'
ConcatType = namedtuple_factory(name, fields)
return ConcatType(*values)
else:
return () | Concatenate 2 or more namedtuples. The new namedtuple type
is provided by :class:`NamedTupleFactory`
return new namedtuple instance | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/utils/__init__.py#L99-L120 | null | # pandaSDMX is licensed under the Apache 2.0 license a copy of which
# is included in the source distribution of pandaSDMX.
# This is notwithstanding any licenses of third-party software included in
# this distribution.
# (c) 2014, 2015 Dr. Leo <fhaxbox66qgmail.com>
'''
module pandasdmx.utils - helper classes and functions
'''
from .aadict import aadict
from pandasdmx.utils.anynamedtuple import namedtuple
from itertools import chain
import sys
class DictLike(aadict):
'''Thin wrapper around dict type
It allows attribute-like item access, has a :meth:`find` method and inherits other
useful features from aadict.
'''
def aslist(self):
'''
return values() as unordered list
'''
return list(self.values())
def any(self):
'''
return an arbitrary or the only value. If dict is empty,
raise KeyError.
'''
try:
return next(iter(self.values()))
except StopIteration:
raise KeyError('DictLike is empty.')
def find(self, search_str, by='name', language='en'):
'''Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored.
'''
s = search_str.lower()
# We distinguish between international strings stored as dict such as
# name.en, name.fr, and normal strings.
if by in ['name', 'description']:
get_field = lambda obj: getattr(obj, by)[language]
else: # normal string
get_field = lambda obj: getattr(obj, by)
return DictLike(result for result in self.items()
if s in get_field(result[1]).lower())
class NamedTupleFactory:
"""
Wrap namedtuple function from the collections stdlib module
to return a singleton if a nametuple with the same field names
has already been created.
"""
cache = {}
def __call__(self, name, fields):
"""
return namedtuple class as singleton
"""
fields = tuple(fields)
if not fields in self.cache:
self.cache[fields] = namedtuple(
name, fields)
return self.cache[fields]
namedtuple_factory = NamedTupleFactory()
def concat_namedtuples(*tup, **kwargs):
'''
Concatenate 2 or more namedtuples. The new namedtuple type
is provided by :class:`NamedTupleFactory`
return new namedtuple instance
'''
name = kwargs['name'] if 'name' in kwargs else None
# filter out empty elements
filtered = [i for i in filter(None, tup)]
if filtered:
if len(filtered) == 1:
return filtered[0]
else:
fields = chain(*(t._fields for t in filtered))
values = chain(*(t for t in filtered))
if not name:
name = 'SDMXNamedTuple'
ConcatType = namedtuple_factory(name, fields)
return ConcatType(*values)
else:
return ()
# 2to3 compatibility
if sys.version[0] == '3':
str_type = str
else:
str_type = unicode
|
dr-leo/pandaSDMX | pandasdmx/utils/__init__.py | DictLike.find | python | def find(self, search_str, by='name', language='en'):
'''Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored.
'''
s = search_str.lower()
# We distinguish between international strings stored as dict such as
# name.en, name.fr, and normal strings.
if by in ['name', 'description']:
get_field = lambda obj: getattr(obj, by)[language]
else: # normal string
get_field = lambda obj: getattr(obj, by)
return DictLike(result for result in self.items()
if s in get_field(result[1]).lower()) | Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/utils/__init__.py#L44-L72 | null | class DictLike(aadict):
'''Thin wrapper around dict type
It allows attribute-like item access, has a :meth:`find` method and inherits other
useful features from aadict.
'''
def aslist(self):
'''
return values() as unordered list
'''
return list(self.values())
def any(self):
'''
return an arbitrary or the only value. If dict is empty,
raise KeyError.
'''
try:
return next(iter(self.values()))
except StopIteration:
raise KeyError('DictLike is empty.')
def find(self, search_str, by='name', language='en'):
'''Select values by attribute
Args:
searchstr(str): the string to search for
by(str): the name of the attribute to search by, defaults to 'name'
The specified attribute must be either a string
or a dict mapping language codes to strings.
Such attributes occur, e.g. in :class:`pandasdmx.model.NameableArtefact` which is
a base class for :class:`pandasdmx.model.DataFlowDefinition` and many others.
language(str): language code specifying the language of the text to be searched, defaults to 'en'
Returns:
DictLike: items where value.<by> contains the search_str. International strings
stored as dict with language codes as keys are
searched. Capitalization is ignored.
'''
s = search_str.lower()
# We distinguish between international strings stored as dict such as
# name.en, name.fr, and normal strings.
if by in ['name', 'description']:
get_field = lambda obj: getattr(obj, by)[language]
else: # normal string
get_field = lambda obj: getattr(obj, by)
return DictLike(result for result in self.items()
if s in get_field(result[1]).lower())
|
dr-leo/pandaSDMX | pandasdmx/reader/sdmxml.py | Reader.write_source | python | def write_source(self, filename):
'''
Save XML source to file by calling `write` on the root element.
'''
return self.message._elem.getroottree().write(filename, encoding='utf8') | Save XML source to file by calling `write` on the root element. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/reader/sdmxml.py#L52-L56 | null | class Reader(BaseReader):
"""
Read SDMX-ML 2.1 and expose it as instances from pandasdmx.model
"""
_nsmap = {
'com': 'http://www.sdmx.org/resources/sdmxml/schemas/v2_1/common',
'str': 'http://www.sdmx.org/resources/sdmxml/schemas/v2_1/structure',
'mes': 'http://www.sdmx.org/resources/sdmxml/schemas/v2_1/message',
'gen': 'http://www.sdmx.org/resources/sdmxml/schemas/v2_1/data/generic',
'footer': 'http://www.sdmx.org/resources/sdmxml/schemas/v2_1/message/footer'
}
def initialize(self, source):
tree = etree.parse(source)
root = tree.getroot()
if root.tag.endswith('Structure'):
cls = model.StructureMessage
elif root.tag.endswith('Data'):
cls = model.DataMessage
else:
raise ValueError('Unsupported root tag: %s' % root.tag)
self.message = cls(self, root)
return self.message
# flag to prevent multiple compiling. See BaseReader.__init__
_compiled = False
def write_source(self, filename):
'''
Save XML source to file by calling `write` on the root element.
'''
return self.message._elem.getroottree().write(filename, encoding='utf8')
_paths = {
'footer_text': 'com:Text/text()',
'footer_code': '@code',
'footer_severity': '@severity',
'dataflow_from_msg': 'mes:Structures/str:Dataflows',
'constraint_attachment': 'str:ConstraintAttachment',
'include': '@include',
'id': '@id',
'urn': '@urn',
'url': '@url',
'uri': '@uri',
'agencyID': '@agencyID',
'maintainable_parent_id': '@maintainableParentID',
'value': 'com:Value/text()',
'headerID': 'mes:ID/text()',
'header_prepared': 'mes:Prepared/text()',
'header_sender': 'mes:Sender/@*',
'header_receiver': 'mes:Receiver/@*',
'assignment_status': '@assignmentStatus',
'error': 'mes:error/@*',
'ref_version': '@version',
'concept_identity': 'str:ConceptIdentity',
'position': '@position',
'isfinal': '@isfinal',
'ref_package': '@package',
'ref_class': '@class',
'ref_target': 'str:Target',
'ref_source': 'str:Source',
'ref_structure': 'str:Structure',
'annotationtype': 'com:AnnotationType/text()',
'structured_by': 'mes:Structure/@structureID',
'dim_at_obs': '//mes:Header/mes:Structure/@dimensionAtObservation',
'generic_obs_path': 'gen:Obs',
'obs_key_id_path': 'gen:ObsKey/gen:Value/@id',
'obs_key_values_path': 'gen:ObsKey/gen:Value/@value',
'series_key_values_path': 'gen:SeriesKey/gen:Value/@value',
'series_key_id_path': 'gen:SeriesKey/gen:Value/@id',
'generic_series_dim_path': 'gen:ObsDimension/@value',
'group_key_values_path': 'gen:GroupKey/gen:Value/@value',
'group_key_id_path': 'gen:GroupKey/gen:Value/@id',
'obs_value_path': 'gen:ObsValue/@value',
'attr_id_path': 'gen:Attributes/gen:Value/@id',
'attr_values_path': 'gen:Attributes/gen:Value/@value',
model.Code: 'str:Code',
model.Categorisation: 'str:Categorisation',
model.CategoryScheme: 'mes:Structures/str:CategorySchemes/str:CategoryScheme',
model.DataStructureDefinition: 'mes:Structures/str:DataStructures/str:DataStructure',
model.DataflowDefinition: 'str:Dataflow',
model.ConceptScheme: 'mes:Structures/str:Concepts/str:ConceptScheme',
model.ContentConstraint: 'mes:Structures/str:Constraints/str:ContentConstraint',
model.Concept: 'str:Concept',
model.Codelist: 'mes:Structures/str:Codelists/str:Codelist',
model.Categorisations: 'mes:Structures/str:Categorisations',
model.Footer: 'footer:Footer/footer:Message',
model.Category: 'str:Category',
model.DimensionDescriptor: 'str:DataStructureComponents/str:DimensionList',
model.Dimension: 'str:Dimension',
model.TimeDimension: 'str:TimeDimension',
model.MeasureDimension: 'str:MeasureDimension',
model.MeasureDescriptor: 'str:DataStructureComponents/str:MeasureList',
model.PrimaryMeasure: 'str:PrimaryMeasure',
model.AttributeDescriptor: 'str:DataStructureComponents/str:AttributeList',
model.DataAttribute: 'str:Attribute',
model.CubeRegion: 'str:CubeRegion',
model.KeyValue: 'com:KeyValue',
model.Ref: 'Ref',
model.Header: 'mes:Header',
model.Annotation: 'com:Annotations/com:Annotation',
model.Group: 'gen:Group',
model.Series: 'gen:Series',
model.DataSet: 'mes:DataSet',
'int_str_names': './*[local-name() = $name]/@xml:lang',
model.Representation: 'str:LocalRepresentation',
'int_str_values': './*[local-name() = $name]/text()',
'enumeration': 'str:Enumeration',
'texttype': 'str:TextFormat/@textType',
'maxlength': 'str:TextFormat/@maxLength',
# need this? It is just a non-offset Ref
'attr_relationship': '*/Ref/@id',
'cat_scheme_id': '../@id'
}
@classmethod
def _compile_paths(cls):
for key, path in cls._paths.items():
cls._paths[key] = XPath(
path, namespaces=cls._nsmap, smart_strings=False)
def international_str(self, name, sdmxobj):
'''
return DictLike of xml:lang attributes. If node has no attributes,
assume that language is 'en'.
'''
# Get language tokens like 'en', 'fr'...
elem_attrib = self._paths['int_str_names'](sdmxobj._elem, name=name)
values = self._paths['int_str_values'](sdmxobj._elem, name=name)
# Unilingual strings have no attributes. Assume 'en' instead.
if not elem_attrib:
elem_attrib = ['en']
return DictLike(zip(elem_attrib, values))
def header_error(self, sdmxobj):
try:
return DictLike(sdmxobj._elem.Error.attrib)
except AttributeError:
return None
def dim_at_obs(self, sdmxobj):
return self.read_as_str('dim_at_obs', sdmxobj)
def structured_by(self, sdmxobj):
return self.read_as_str('structured_by', sdmxobj)
# Types for generic observations
_ObsTuple = namedtuple_factory(
'GenericObservation', ('key', 'value', 'attrib'))
_SeriesObsTuple = namedtuple_factory(
'SeriesObservation', ('dim', 'value', 'attrib'))
def iter_generic_obs(self, sdmxobj, with_value, with_attributes):
for obs in self._paths['generic_obs_path'](sdmxobj._elem):
# Construct the namedtuple for the ObsKey.
# The namedtuple class is created on first iteration.
obs_key_values = self._paths['obs_key_values_path'](obs)
try:
obs_key = ObsKeyTuple._make(obs_key_values)
except NameError:
obs_key_id = self._paths['obs_key_id_path'](obs)
ObsKeyTuple = namedtuple_factory('ObsKey', obs_key_id)
obs_key = ObsKeyTuple._make(obs_key_values)
if with_value:
obs_value = self._paths['obs_value_path'](obs)[0]
else:
obs_value = None
if with_attributes:
obs_attr_values = self._paths['attr_values_path'](obs)
obs_attr_id = self._paths['attr_id_path'](obs)
obs_attr_type = namedtuple_factory(
'ObsAttributes', obs_attr_id)
obs_attr = obs_attr_type(*obs_attr_values)
else:
obs_attr = None
yield self._ObsTuple(obs_key, obs_value, obs_attr)
def generic_series(self, sdmxobj):
path = self._paths[model.Series]
for series in path(sdmxobj._elem):
yield model.Series(self, series, dataset=sdmxobj)
def generic_groups(self, sdmxobj):
path = self._paths[model.Group]
for series in path(sdmxobj._elem):
yield model.Group(self, series)
def series_key(self, sdmxobj):
series_key_id = self._paths['series_key_id_path'](sdmxobj._elem)
series_key_values = self._paths[
'series_key_values_path'](sdmxobj._elem)
SeriesKeyTuple = namedtuple_factory('SeriesKey', series_key_id)
return SeriesKeyTuple._make(series_key_values)
def group_key(self, sdmxobj):
group_key_id = self._paths['group_key_id_path'](sdmxobj._elem)
group_key_values = self._paths[
'group_key_values_path'](sdmxobj._elem)
GroupKeyTuple = namedtuple_factory('GroupKey', group_key_id)
return GroupKeyTuple._make(group_key_values)
def series_attrib(self, sdmxobj):
attr_id = self._paths['attr_id_path'](sdmxobj._elem)
attr_values = self._paths['attr_values_path'](sdmxobj._elem)
return namedtuple_factory('Attrib', attr_id)(*attr_values)
dataset_attrib = series_attrib
def iter_generic_series_obs(self, sdmxobj, with_value, with_attributes,
reverse_obs=False):
for obs in sdmxobj._elem.iterchildren(
'{http://www.sdmx.org/resources/sdmxml/schemas/v2_1/data/generic}Obs',
reversed=reverse_obs):
obs_dim = self._paths['generic_series_dim_path'](obs)[0]
if with_value:
obs_value = self._paths['obs_value_path'](obs)[0]
else:
obs_value = None
if with_attributes:
obs_attr_values = self._paths['attr_values_path'](obs)
obs_attr_id = self._paths['attr_id_path'](obs)
obs_attr_type = namedtuple_factory(
'ObsAttributes', obs_attr_id)
obs_attr = obs_attr_type(*obs_attr_values)
else:
obs_attr = None
yield self._SeriesObsTuple(obs_dim, obs_value, obs_attr)
|
dr-leo/pandaSDMX | pandasdmx/model.py | DataSet.obs | python | def obs(self, with_values=True, with_attributes=True):
'''
return an iterator over observations in a flat dataset.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions. Its field names represent dimension names,
its values the dimension values.
obs.value is a string that can in in most cases be interpreted as float64
obs.attrib is a namedtuple of attribute names and values.
with_values and with_attributes: If one or both of these flags
is False, the respective value will be None. Use these flags to
increase performance. The flags default to True.
'''
# distinguish between generic and structure-specific observations
# only generic ones are currently implemented.
return self._reader.iter_generic_obs(
self, with_values, with_attributes) | return an iterator over observations in a flat dataset.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions. Its field names represent dimension names,
its values the dimension values.
obs.value is a string that can in in most cases be interpreted as float64
obs.attrib is a namedtuple of attribute names and values.
with_values and with_attributes: If one or both of these flags
is False, the respective value will be None. Use these flags to
increase performance. The flags default to True. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/model.py#L585-L603 | null | class DataSet(SDMXObject):
# reporting_begin = Any
# reporting_end = Any
# valid_from = Any
# valid_to = Any
# data_extraction_date = Any
# publication_year = Any
# publication_period = Any
# set_id = Unicode
# action = Enum(('update', 'append', 'delete'))
# described_by = Instance(DataflowDefinition)
# structured_by = Instance(DataStructureDefinition)
# published_by = Any
# attached_attribute = Any
def __init__(self, *args, **kwargs):
super(DataSet, self).__init__(*args, **kwargs)
self.attrib = self._reader.dataset_attrib(self)
self.groups = tuple(self.iter_groups)
@property
def dim_at_obs(self):
return self._reader.dim_at_obs(self)
def obs(self, with_values=True, with_attributes=True):
'''
return an iterator over observations in a flat dataset.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions. Its field names represent dimension names,
its values the dimension values.
obs.value is a string that can in in most cases be interpreted as float64
obs.attrib is a namedtuple of attribute names and values.
with_values and with_attributes: If one or both of these flags
is False, the respective value will be None. Use these flags to
increase performance. The flags default to True.
'''
# distinguish between generic and structure-specific observations
# only generic ones are currently implemented.
return self._reader.iter_generic_obs(
self, with_values, with_attributes)
@property
def series(self):
'''
return an iterator over Series instances in this DataSet.
Note that DataSets in flat format, i.e.
header.dim_at_obs = "AllDimensions", have no series. Use DataSet.obs() instead.
'''
return self._reader.generic_series(self)
@property
def iter_groups(self):
return self._reader.generic_groups(self)
|
dr-leo/pandaSDMX | pandasdmx/model.py | Series.group_attrib | python | def group_attrib(self):
'''
return a namedtuple containing all attributes attached
to groups of which the given series is a member
for each group of which the series is a member
'''
group_attributes = [g.attrib for g in self.dataset.groups if self in g]
if group_attributes:
return concat_namedtuples(*group_attributes) | return a namedtuple containing all attributes attached
to groups of which the given series is a member
for each group of which the series is a member | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/model.py#L632-L640 | [
"def concat_namedtuples(*tup, **kwargs):\n '''\n Concatenate 2 or more namedtuples. The new namedtuple type\n is provided by :class:`NamedTupleFactory`\n return new namedtuple instance\n '''\n name = kwargs['name'] if 'name' in kwargs else None\n\n # filter out empty elements\n filtered = [i... | class Series(SDMXObject):
def __init__(self, *args, **kwargs):
super(Series, self).__init__(*args)
self.key = self._reader.series_key(self)
self.attrib = self._reader.series_attrib(self)
dataset = kwargs.get('dataset')
if not isinstance(dataset, DataSet):
raise TypeError("'dataset' must be a DataSet instance, got %s"
% dataset.__class__.__name__)
self.dataset = dataset
@property
def group_attrib(self):
'''
return a namedtuple containing all attributes attached
to groups of which the given series is a member
for each group of which the series is a member
'''
group_attributes = [g.attrib for g in self.dataset.groups if self in g]
if group_attributes:
return concat_namedtuples(*group_attributes)
def obs(self, with_values=True, with_attributes=True, reverse_obs=False):
'''
return an iterator over observations in a series.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions, obs.value is a string value and
obs.attrib is a namedtuple of attributes. If with_values or with_attributes
is False, the respective value is None. Use these flags to
increase performance. The flags default to True.
'''
return self._reader.iter_generic_series_obs(self,
with_values, with_attributes, reverse_obs)
|
dr-leo/pandaSDMX | pandasdmx/model.py | Series.obs | python | def obs(self, with_values=True, with_attributes=True, reverse_obs=False):
'''
return an iterator over observations in a series.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions, obs.value is a string value and
obs.attrib is a namedtuple of attributes. If with_values or with_attributes
is False, the respective value is None. Use these flags to
increase performance. The flags default to True.
'''
return self._reader.iter_generic_series_obs(self,
with_values, with_attributes, reverse_obs) | return an iterator over observations in a series.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions, obs.value is a string value and
obs.attrib is a namedtuple of attributes. If with_values or with_attributes
is False, the respective value is None. Use these flags to
increase performance. The flags default to True. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/model.py#L642-L652 | null | class Series(SDMXObject):
def __init__(self, *args, **kwargs):
super(Series, self).__init__(*args)
self.key = self._reader.series_key(self)
self.attrib = self._reader.series_attrib(self)
dataset = kwargs.get('dataset')
if not isinstance(dataset, DataSet):
raise TypeError("'dataset' must be a DataSet instance, got %s"
% dataset.__class__.__name__)
self.dataset = dataset
@property
def group_attrib(self):
'''
return a namedtuple containing all attributes attached
to groups of which the given series is a member
for each group of which the series is a member
'''
group_attributes = [g.attrib for g in self.dataset.groups if self in g]
if group_attributes:
return concat_namedtuples(*group_attributes)
def obs(self, with_values=True, with_attributes=True, reverse_obs=False):
'''
return an iterator over observations in a series.
An observation is represented as a namedtuple with 3 fields ('key', 'value', 'attrib').
obs.key is a namedtuple of dimensions, obs.value is a string value and
obs.attrib is a namedtuple of attributes. If with_values or with_attributes
is False, the respective value is None. Use these flags to
increase performance. The flags default to True.
'''
return self._reader.iter_generic_series_obs(self,
with_values, with_attributes, reverse_obs)
|
dr-leo/pandaSDMX | pandasdmx/writer/data2pandas.py | Writer.write | python | def write(self, source=None, asframe=True, dtype=NP.float64,
attributes='', reverse_obs=False, fromfreq=False, parse_time=True):
'''Transfform a :class:`pandasdmx.model.DataMessage` instance to a pandas DataFrame
or iterator over pandas Series.
Args:
source(pandasdmx.model.DataMessage): a pandasdmx.model.DataSet or iterator
of pandasdmx.model.Series
asframe(bool): if True, merge the series of values and/or attributes
into one or two multi-indexed
pandas.DataFrame(s), otherwise return an iterator of pandas.Series.
(default: True)
dtype(str, NP.dtype, None): datatype for values. Defaults to NP.float64
if None, do not return the values of a series. In this case,
attributes must not be an empty string so that some attribute is returned.
attributes(str, None): string determining which attributes, if any,
should be returned in separate series or a separate DataFrame.
Allowed values: '', 'o', 's', 'g', 'd'
or any combination thereof such as 'os', 'go'. Defaults to 'osgd'.
Where 'o', 's', 'g', and 'd' mean that attributes at observation,
series, group and dataset level will be returned as members of
per-observation namedtuples.
reverse_obs(bool): if True, return observations in
reverse order. Default: False
fromfreq(bool): if True, extrapolate time periods
from the first item and FREQ dimension. Default: False
parse_time(bool): if True (default), try to generate datetime index, provided that
dim_at_obs is 'TIME' or 'TIME_PERIOD'. Otherwise, ``parse_time`` is ignored. If False,
always generate index of strings.
Set it to False to increase performance and avoid
parsing errors for exotic date-time formats unsupported by pandas.
'''
# Preparations
dim_at_obs = self.msg.header.dim_at_obs
# validate 'attributes'
if attributes is None or attributes == False:
attributes = ''
else:
try:
attributes = attributes.lower()
except AttributeError:
raise TypeError("'attributes' argument must be of type str.")
if set(attributes) - {'o', 's', 'g', 'd'}:
raise ValueError(
"'attributes' must only contain 'o', 's', 'd' or 'g'.")
# Allow source to be either an iterable or a model.DataSet instance
if hasattr(source, '__iter__'):
iter_series = source
elif hasattr(source, 'series'):
iter_series = source.series
elif hasattr(source, 'data') and dim_at_obs != 'AllDimensions':
iter_series = source.data.series
# Is 'data' a flat dataset with just a list of obs?
if dim_at_obs == 'AllDimensions':
obs_zip = iter(zip(*source.data.obs()))
dimensions = next(obs_zip)
idx = PD.MultiIndex.from_tuples(
dimensions, names=dimensions[0]._fields)
if dtype:
values_series = PD.Series(
next(obs_zip), dtype=dtype, index=idx)
if attributes:
obs_attrib = NP.asarray(next(obs_zip), dtype='object')
attrib_series = PD.Series(
obs_attrib, dtype='object', index=idx)
# Decide what to return
if dtype and attributes:
return values_series, attrib_series
elif dtype:
return values_series
elif attributes:
return attrib_series
# So dataset has series:
else:
if asframe:
series_list = list(s for s in self.iter_pd_series(
iter_series, dim_at_obs, dtype, attributes,
reverse_obs, fromfreq, parse_time))
if dtype and attributes:
# series_list is actually a list of pairs of series
# containing data and metadata respectively
key_fields = series_list[0][0].name._fields
pd_series, pd_attributes = zip(*series_list)
elif dtype:
key_fields = series_list[0].name._fields
pd_series = series_list
elif attributes:
key_fields = series_list[0].name._fields
pd_attributes = series_list
if dtype:
# Merge series into multi-indexed DataFrame and return it.
d_frame = PD.concat(list(pd_series), axis=1, copy=False)
d_frame.columns.set_names(key_fields, inplace=True)
if attributes:
a_frame = PD.concat(pd_attributes, axis=1, copy=False)
a_frame.columns.set_names(key_fields, inplace=True)
# decide what to return
if dtype and attributes:
return d_frame, a_frame
elif dtype:
return d_frame
else:
return a_frame
# return an iterator
else:
return self.iter_pd_series(iter_series, dim_at_obs, dtype,
attributes, reverse_obs, fromfreq, parse_time) | Transfform a :class:`pandasdmx.model.DataMessage` instance to a pandas DataFrame
or iterator over pandas Series.
Args:
source(pandasdmx.model.DataMessage): a pandasdmx.model.DataSet or iterator
of pandasdmx.model.Series
asframe(bool): if True, merge the series of values and/or attributes
into one or two multi-indexed
pandas.DataFrame(s), otherwise return an iterator of pandas.Series.
(default: True)
dtype(str, NP.dtype, None): datatype for values. Defaults to NP.float64
if None, do not return the values of a series. In this case,
attributes must not be an empty string so that some attribute is returned.
attributes(str, None): string determining which attributes, if any,
should be returned in separate series or a separate DataFrame.
Allowed values: '', 'o', 's', 'g', 'd'
or any combination thereof such as 'os', 'go'. Defaults to 'osgd'.
Where 'o', 's', 'g', and 'd' mean that attributes at observation,
series, group and dataset level will be returned as members of
per-observation namedtuples.
reverse_obs(bool): if True, return observations in
reverse order. Default: False
fromfreq(bool): if True, extrapolate time periods
from the first item and FREQ dimension. Default: False
parse_time(bool): if True (default), try to generate datetime index, provided that
dim_at_obs is 'TIME' or 'TIME_PERIOD'. Otherwise, ``parse_time`` is ignored. If False,
always generate index of strings.
Set it to False to increase performance and avoid
parsing errors for exotic date-time formats unsupported by pandas. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/writer/data2pandas.py#L24-L141 | [
"def iter_pd_series(self, iter_series, dim_at_obs, dtype,\n attributes, reverse_obs, fromfreq, parse_time):\n\n for series in iter_series:\n # Generate the 3 main columns: index, values and attributes\n obs_zip = iter(zip(*series.obs(dtype, attributes, reverse_obs)))\n obs_... | class Writer(BaseWriter):
def write(self, source=None, asframe=True, dtype=NP.float64,
attributes='', reverse_obs=False, fromfreq=False, parse_time=True):
'''Transfform a :class:`pandasdmx.model.DataMessage` instance to a pandas DataFrame
or iterator over pandas Series.
Args:
source(pandasdmx.model.DataMessage): a pandasdmx.model.DataSet or iterator
of pandasdmx.model.Series
asframe(bool): if True, merge the series of values and/or attributes
into one or two multi-indexed
pandas.DataFrame(s), otherwise return an iterator of pandas.Series.
(default: True)
dtype(str, NP.dtype, None): datatype for values. Defaults to NP.float64
if None, do not return the values of a series. In this case,
attributes must not be an empty string so that some attribute is returned.
attributes(str, None): string determining which attributes, if any,
should be returned in separate series or a separate DataFrame.
Allowed values: '', 'o', 's', 'g', 'd'
or any combination thereof such as 'os', 'go'. Defaults to 'osgd'.
Where 'o', 's', 'g', and 'd' mean that attributes at observation,
series, group and dataset level will be returned as members of
per-observation namedtuples.
reverse_obs(bool): if True, return observations in
reverse order. Default: False
fromfreq(bool): if True, extrapolate time periods
from the first item and FREQ dimension. Default: False
parse_time(bool): if True (default), try to generate datetime index, provided that
dim_at_obs is 'TIME' or 'TIME_PERIOD'. Otherwise, ``parse_time`` is ignored. If False,
always generate index of strings.
Set it to False to increase performance and avoid
parsing errors for exotic date-time formats unsupported by pandas.
'''
# Preparations
dim_at_obs = self.msg.header.dim_at_obs
# validate 'attributes'
if attributes is None or attributes == False:
attributes = ''
else:
try:
attributes = attributes.lower()
except AttributeError:
raise TypeError("'attributes' argument must be of type str.")
if set(attributes) - {'o', 's', 'g', 'd'}:
raise ValueError(
"'attributes' must only contain 'o', 's', 'd' or 'g'.")
# Allow source to be either an iterable or a model.DataSet instance
if hasattr(source, '__iter__'):
iter_series = source
elif hasattr(source, 'series'):
iter_series = source.series
elif hasattr(source, 'data') and dim_at_obs != 'AllDimensions':
iter_series = source.data.series
# Is 'data' a flat dataset with just a list of obs?
if dim_at_obs == 'AllDimensions':
obs_zip = iter(zip(*source.data.obs()))
dimensions = next(obs_zip)
idx = PD.MultiIndex.from_tuples(
dimensions, names=dimensions[0]._fields)
if dtype:
values_series = PD.Series(
next(obs_zip), dtype=dtype, index=idx)
if attributes:
obs_attrib = NP.asarray(next(obs_zip), dtype='object')
attrib_series = PD.Series(
obs_attrib, dtype='object', index=idx)
# Decide what to return
if dtype and attributes:
return values_series, attrib_series
elif dtype:
return values_series
elif attributes:
return attrib_series
# So dataset has series:
else:
if asframe:
series_list = list(s for s in self.iter_pd_series(
iter_series, dim_at_obs, dtype, attributes,
reverse_obs, fromfreq, parse_time))
if dtype and attributes:
# series_list is actually a list of pairs of series
# containing data and metadata respectively
key_fields = series_list[0][0].name._fields
pd_series, pd_attributes = zip(*series_list)
elif dtype:
key_fields = series_list[0].name._fields
pd_series = series_list
elif attributes:
key_fields = series_list[0].name._fields
pd_attributes = series_list
if dtype:
# Merge series into multi-indexed DataFrame and return it.
d_frame = PD.concat(list(pd_series), axis=1, copy=False)
d_frame.columns.set_names(key_fields, inplace=True)
if attributes:
a_frame = PD.concat(pd_attributes, axis=1, copy=False)
a_frame.columns.set_names(key_fields, inplace=True)
# decide what to return
if dtype and attributes:
return d_frame, a_frame
elif dtype:
return d_frame
else:
return a_frame
# return an iterator
else:
return self.iter_pd_series(iter_series, dim_at_obs, dtype,
attributes, reverse_obs, fromfreq, parse_time)
def iter_pd_series(self, iter_series, dim_at_obs, dtype,
attributes, reverse_obs, fromfreq, parse_time):
for series in iter_series:
# Generate the 3 main columns: index, values and attributes
obs_zip = iter(zip(*series.obs(dtype, attributes, reverse_obs)))
obs_dim = next(obs_zip)
l = len(obs_dim)
obs_values = NP.array(next(obs_zip), dtype=dtype)
if attributes:
obs_attrib = next(obs_zip)
# Generate the index
if parse_time and dim_at_obs == 'TIME_PERIOD':
# Check if we can build the index based on start and freq
# Constructing the index from the first value and FREQ should only
# occur if 'fromfreq' is True
# and there is a FREQ dimension at all.
# Check for common frequency field names
# Initialize with dummy value first to avoid UnboundLocalError
freq_key = ''
if 'FREQ' in series.key._fields or 'FREQ' in series.attrib._fields:
freq_key = 'FREQ'
elif 'FREQUENCY' in series.key._fields or 'FREQUENCY' in series.attrib._fields:
freq_key = 'FREQUENCY'
if fromfreq and freq_key in series.key._fields:
f = getattr(series.key, freq_key)
od0 = obs_dim[0]
year, subdiv = map(int, (od0[:4], od0[-1]))
if f == 'Q':
start_date = PD.datetime(year, (subdiv - 1) * 3 + 1, 1)
series_index = PD.period_range(
start=start_date, periods=l, freq='Q',
name=dim_at_obs)
elif 'S' in od0:
# pandas cannot represent semesters as periods. So we
# use date_range.
start_date = PD.datetime(year, (subdiv - 1) * 6 + 1, 1)
series_index = PD.date_range(
start=start_date, periods=l, freq='6M', name=dim_at_obs)
else:
series_index = PD.period_range(start=od0, periods=l,
freq=f, name=dim_at_obs)
elif freq_key in series.key._fields or freq_key in series.attrib._fields:
# fromfreq is False. So generate the index from all the
# strings
if freq_key in series.key._fields:
f = getattr(series.key, freq_key)
elif freq_key in series.attrib._fields:
f = getattr(series.attrib, freq_key)
else:
# Data set has neither a frequency dimension nor a frequency attribute.
# At this point, no DateTimeIndex or PeriodIndex can be generated.
# This should be improved in future versions. For now, a
# a gentle error is raised to inform the user of a
# work-around.
raise ValueError("Cannot generate DateTimeIndex from this data set.\
Try again with `parse_time=False`")
# Generate arrays for years and subdivisions (quarters or
# semesters
if f == 'Q':
series_index = PD.Index((PD.Period(year=int(d[:4]), quarter=int(d[-1]), freq='Q')
for d in obs_dim), name=dim_at_obs)
elif f == 'H':
series_index = PD.Index(
(PD.datetime(
int(d[:4]), (int(d[-1]) - 1) * 6 + 1, 1) for d in obs_dim),
name=dim_at_obs)
else: # other freq such as 'A' or 'M'
series_index = PD.PeriodIndex(obs_dim,
freq=f, name=dim_at_obs)
elif parse_time and dim_at_obs == 'TIME':
if fromfreq and freq_key in series.key._fields:
f = getattr(series.key, freq_key)
series_index = PD.date_range(
start=obs_dim[0], periods=l, freq=f, name=dim_at_obs)
else:
series_index = PD.DatetimeIndex(obs_dim, name=dim_at_obs)
# Not a datetime or period index or don't parse it
else:
series_index = PD.Index(obs_dim, name=dim_at_obs)
if dtype:
value_series = PD.Series(
obs_values, index=series_index, name=series.key)
if attributes:
# Assemble attributes of dataset, group and series if needed
gen_attrib = [attr
for flag, attr in (('s', series.attrib),
('g', series.group_attrib), ('d', series.dataset.attrib))
if (flag in attributes) and attr]
if gen_attrib:
gen_attrib = concat_namedtuples(*gen_attrib)
else:
gen_attrib = None
if 'o' in attributes:
# concat with general attributes if any
if gen_attrib:
attrib_iter = (concat_namedtuples(a, gen_attrib,
name='Attrib') for a in obs_attrib)
else:
# Simply take the obs attributes
attrib_iter = obs_attrib
else:
# Make iterator yielding the constant general attribute set
# It may be None.
# for each obs
attrib_iter = (gen_attrib for d in obs_attrib)
attrib_series = PD.Series(attrib_iter,
index=series_index, dtype='object', name=series.key)
# decide what to yield
if dtype and attributes:
yield value_series, attrib_series
elif dtype:
yield value_series
elif attributes:
yield attrib_series
else:
raise ValueError(
"At least one of 'dtype' or 'attributes' args must be True.")
|
dr-leo/pandaSDMX | pandasdmx/reader/__init__.py | BaseReader.read_identifiables | python | def read_identifiables(self, cls, sdmxobj, offset=None):
'''
If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike.
'''
path = self._paths[cls]
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = {e.get('id'): cls(self, e) for e in path(base)}
if isinstance(sdmxobj, dict):
sdmxobj.update(result)
else:
return DictLike(result) | If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/reader/__init__.py#L33-L52 | null | class BaseReader:
def __init__(self, request, **kwargs):
self.request = request
# subclasses must declare '_compiled' flag and '_paths' dict
# and '_compile_paths' function
# Check if we need to compile path expressions
if not self._compiled:
self._compile_paths()
self.__class__._compiled = True
def initialize(self, source):
raise NotImplemented
def read_identifiables(self, cls, sdmxobj, offset=None):
'''
If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike.
'''
path = self._paths[cls]
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = {e.get('id'): cls(self, e) for e in path(base)}
if isinstance(sdmxobj, dict):
sdmxobj.update(result)
else:
return DictLike(result)
def read_instance(self, cls, sdmxobj, offset=None, first_only=True):
'''
If cls in _paths and matches,
return an instance of cls with the first XML element,
or, if first_only is False, a list of cls instances
for all elements found,
If no matches were found, return None.
'''
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = self._paths[cls](base)
if result:
if first_only:
return cls(self, result[0])
else:
return [cls(self, i) for i in result]
def read_as_str(self, name, sdmxobj, first_only=True):
result = self._paths[name](sdmxobj._elem)
if result:
if first_only:
return result[0]
else:
return result
|
dr-leo/pandaSDMX | pandasdmx/reader/__init__.py | BaseReader.read_instance | python | def read_instance(self, cls, sdmxobj, offset=None, first_only=True):
'''
If cls in _paths and matches,
return an instance of cls with the first XML element,
or, if first_only is False, a list of cls instances
for all elements found,
If no matches were found, return None.
'''
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = self._paths[cls](base)
if result:
if first_only:
return cls(self, result[0])
else:
return [cls(self, i) for i in result] | If cls in _paths and matches,
return an instance of cls with the first XML element,
or, if first_only is False, a list of cls instances
for all elements found,
If no matches were found, return None. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/reader/__init__.py#L54-L74 | null | class BaseReader:
def __init__(self, request, **kwargs):
self.request = request
# subclasses must declare '_compiled' flag and '_paths' dict
# and '_compile_paths' function
# Check if we need to compile path expressions
if not self._compiled:
self._compile_paths()
self.__class__._compiled = True
def initialize(self, source):
raise NotImplemented
def read_identifiables(self, cls, sdmxobj, offset=None):
'''
If sdmxobj inherits from dict: update it with modelized elements.
These must be instances of model.IdentifiableArtefact,
i.e. have an 'id' attribute. This will be used as dict keys.
If sdmxobj does not inherit from dict: return a new DictLike.
'''
path = self._paths[cls]
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = {e.get('id'): cls(self, e) for e in path(base)}
if isinstance(sdmxobj, dict):
sdmxobj.update(result)
else:
return DictLike(result)
def read_instance(self, cls, sdmxobj, offset=None, first_only=True):
'''
If cls in _paths and matches,
return an instance of cls with the first XML element,
or, if first_only is False, a list of cls instances
for all elements found,
If no matches were found, return None.
'''
if offset:
try:
base = self._paths[offset](sdmxobj._elem)[0]
except IndexError:
return None
else:
base = sdmxobj._elem
result = self._paths[cls](base)
if result:
if first_only:
return cls(self, result[0])
else:
return [cls(self, i) for i in result]
def read_as_str(self, name, sdmxobj, first_only=True):
result = self._paths[name](sdmxobj._elem)
if result:
if first_only:
return result[0]
else:
return result
|
dr-leo/pandaSDMX | pandasdmx/writer/structure2pd.py | Writer.write | python | def write(self, source=None, rows=None, **kwargs):
'''
Transfform structural metadata, i.e. codelists, concept-schemes,
lists of dataflow definitions or category-schemes
from a :class:`pandasdmx.model.StructureMessage` instance into a pandas DataFrame.
This method is called by :meth:`pandasdmx.api.Response.write` . It is not
part of the public-facing API. Yet, certain kwargs are
propagated from there.
Args:
source(pandasdmx.model.StructureMessage): a :class:`pandasdmx.model.StructureMessage` instance.
rows(str): sets the desired content
to be extracted from the StructureMessage.
Must be a name of an attribute of the StructureMessage. The attribute must
be an instance of `dict` whose keys are strings. These will be
interpreted as ID's and used for the MultiIndex of the DataFrame
to be returned. Values can be either instances of `dict` such as for codelists and categoryscheme,
or simple nameable objects
such as for dataflows. In the latter case, the DataFrame will have a flat index.
(default: depends on content found in Message.
Common is 'codelist')
columns(str, list): if str, it denotes the attribute of attributes of the
values (nameable SDMX objects such as Code or ConceptScheme) that will be stored in the
DataFrame. If a list, it must contain strings
that are valid attibute values. Defaults to: ['name', 'description']
lang(str): locale identifier. Specifies the preferred
language for international strings such as names.
Default is 'en'.
'''
# Set convenient default values for args
# is rows a string?
if rows is not None and not isinstance(rows, (list, tuple)):
rows = [rows]
return_df = True
elif isinstance(rows, (list, tuple)) and len(rows) == 1:
return_df = True
else:
return_df = False
if rows is None:
rows = [i for i in self._row_content if hasattr(source, i)]
# Generate the DataFrame or -Frames and store them in a DictLike with
# content-type names as keys
frames = DictLike(
{r: self._make_dataframe(source, r, **kwargs) for r in rows})
if return_df:
# There is only one item. So return the only value.
return frames.any()
else:
return frames | Transfform structural metadata, i.e. codelists, concept-schemes,
lists of dataflow definitions or category-schemes
from a :class:`pandasdmx.model.StructureMessage` instance into a pandas DataFrame.
This method is called by :meth:`pandasdmx.api.Response.write` . It is not
part of the public-facing API. Yet, certain kwargs are
propagated from there.
Args:
source(pandasdmx.model.StructureMessage): a :class:`pandasdmx.model.StructureMessage` instance.
rows(str): sets the desired content
to be extracted from the StructureMessage.
Must be a name of an attribute of the StructureMessage. The attribute must
be an instance of `dict` whose keys are strings. These will be
interpreted as ID's and used for the MultiIndex of the DataFrame
to be returned. Values can be either instances of `dict` such as for codelists and categoryscheme,
or simple nameable objects
such as for dataflows. In the latter case, the DataFrame will have a flat index.
(default: depends on content found in Message.
Common is 'codelist')
columns(str, list): if str, it denotes the attribute of attributes of the
values (nameable SDMX objects such as Code or ConceptScheme) that will be stored in the
DataFrame. If a list, it must contain strings
that are valid attibute values. Defaults to: ['name', 'description']
lang(str): locale identifier. Specifies the preferred
language for international strings such as names.
Default is 'en'. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/writer/structure2pd.py#L28-L78 | [
"def any(self):\n '''\n return an arbitrary or the only value. If dict is empty,\n raise KeyError.\n '''\n try:\n return next(iter(self.values()))\n except StopIteration:\n raise KeyError('DictLike is empty.')\n"
] | class Writer(BaseWriter):
_row_content = {'codelist', 'conceptscheme', 'dataflow',
'categoryscheme'}
def write(self, source=None, rows=None, **kwargs):
'''
Transfform structural metadata, i.e. codelists, concept-schemes,
lists of dataflow definitions or category-schemes
from a :class:`pandasdmx.model.StructureMessage` instance into a pandas DataFrame.
This method is called by :meth:`pandasdmx.api.Response.write` . It is not
part of the public-facing API. Yet, certain kwargs are
propagated from there.
Args:
source(pandasdmx.model.StructureMessage): a :class:`pandasdmx.model.StructureMessage` instance.
rows(str): sets the desired content
to be extracted from the StructureMessage.
Must be a name of an attribute of the StructureMessage. The attribute must
be an instance of `dict` whose keys are strings. These will be
interpreted as ID's and used for the MultiIndex of the DataFrame
to be returned. Values can be either instances of `dict` such as for codelists and categoryscheme,
or simple nameable objects
such as for dataflows. In the latter case, the DataFrame will have a flat index.
(default: depends on content found in Message.
Common is 'codelist')
columns(str, list): if str, it denotes the attribute of attributes of the
values (nameable SDMX objects such as Code or ConceptScheme) that will be stored in the
DataFrame. If a list, it must contain strings
that are valid attibute values. Defaults to: ['name', 'description']
lang(str): locale identifier. Specifies the preferred
language for international strings such as names.
Default is 'en'.
'''
# Set convenient default values for args
# is rows a string?
if rows is not None and not isinstance(rows, (list, tuple)):
rows = [rows]
return_df = True
elif isinstance(rows, (list, tuple)) and len(rows) == 1:
return_df = True
else:
return_df = False
if rows is None:
rows = [i for i in self._row_content if hasattr(source, i)]
# Generate the DataFrame or -Frames and store them in a DictLike with
# content-type names as keys
frames = DictLike(
{r: self._make_dataframe(source, r, **kwargs) for r in rows})
if return_df:
# There is only one item. So return the only value.
return frames.any()
else:
return frames
def _make_dataframe(self, source, rows, constraint=None,
columns=['name'], lang='en'):
def make_column(scheme, item):
if codelist_and_dsd:
# scheme is a (dimension or attribute, codelist) pair
dim_attr, scheme = scheme
# first row of a scheme, DSD-less codelist, conceptscheme etc.
if item is None:
# take the column attributes from the scheme itself
item = scheme
raw = [getattr(item, s) for s in columns]
# Select language for international strings represented as dict
translated = [s[lang] if lang in s
else (s.get('en') or ((s or None) and s.any())) for s in raw]
# for codelists, prepend dim_or_attr flag
if codelist_and_dsd:
if dim_attr in dim2cl:
translated.insert(0, 'D')
else:
translated.insert(0, 'A')
if len(translated) > 1:
return tuple(translated)
else:
return translated[0]
def iter_keys(container):
if codelist_and_dsd:
if (constraint
and container[1] in dim2cl.values()):
result = (v for v in container[1].values()
if (v.id, container[0].id) in constraint)
else:
result = container[1].values()
else:
result = container.values()
return sorted(result, key=attrgetter('id'))
def iter_schemes():
if codelist_and_dsd:
return chain(dim2cl.items(), attr2cl.items())
else:
return content.values()
def container2id(container, item):
if codelist_and_dsd:
# For first index level, get dimension or attribute ID instead of
# codelist ID
container_id = container[0].id
# 2nd index col: first row
# contains the concept, all subsequent rows are codes.
item_id = item.id
else:
# any other structure or codelist without DSD
container_id = container.id
item_id = item.id if item else None # None in first row
return container_id, item_id
def row1_col2(container):
if codelist_and_dsd:
# return the concept of the dimension or attribute
# instead of the (dim, codelist) pair
return container[0].concept
# all other cases: return None as there is nothing
# interesting about, e.g. dataflow.
return None
if rows == 'codelist':
# Assuming a msg contains only one DSD
try:
dsd = source.datastructure.any()
# Relate dimensions and attributes to corresponding codelists to
# show this relation in the resulting dataframe
dim2cl = {d: d.local_repr.enum for d in dsd.dimensions.values()
if d.local_repr.enum}
attr2cl = {a: a.local_repr.enum for a in dsd.attributes.values()
if a.local_repr.enum}
except:
dsd = None
if constraint:
try:
# use any user-provided constraint
constraint = constraint.constraints.any()
except AttributeError:
# So the Message must containe a constraint
# the following is buggy: Should be linked to a dataflow,
# DSD or provision-agreement
constraint = source.constraints.any()
# pre-compute bool value to test for DSD-related codelists
codelist_and_dsd = (rows == 'codelist' and dsd)
# allow `columns` arg to be a str
if not isinstance(columns, (list, tuple)):
columns = [columns]
# Get the structures to be written, e.g. codelist, dataflow,
# conceptscheme
content = getattr(source, rows) # 'source' is the SDMX message
# Distinguish hierarchical content consisting of a dict of dicts, and
# flat content consisting of a dict of atomic model instances. In the former case,
# the resulting DataFrame will have 2 index levels.
if isinstance(content.any(), dict):
# generate pairs of model instances, e.g. codelist
# and code. Their structure resembles the multi-index
# tuples. The model instances will be replaced
# by their id-attributes later. For now
# we need the model instances as we want to gleen
# from them other attributes for the dataframe columns.
raw_tuples = chain.from_iterable(zip(
# 1st index level eg ID of dimension
# represented by codelist, or or ConceptScheme etc.
repeat(container),
# 2nd index level: first row in each codelist is the corresponding
# container id. The following rows are item ID's. .
chain((row1_col2(container),), iter_keys(container)))
for container in iter_schemes())
# Now actually generate the index and related data for the columns
raw_idx, data = zip(*[(container2id(i, j),
make_column(i, j))
for i, j in raw_tuples])
idx = PD.MultiIndex.from_tuples(raw_idx) # set names?
else:
# flatt structure, e.g., dataflow definitions
raw_tuples = sorted(content.values(), key=attrgetter('id'))
raw_idx, data = zip(*((t.id, make_column(t, None))
for t in raw_tuples))
idx = PD.Index(raw_idx, name=rows)
# For codelists, if there is a dsd, prepend 'dim_or_attr' as synthetic column
# See corresponding insert in the make_columns function above
if codelist_and_dsd:
# make local copy to avoid side effect
columns = columns[:]
columns.insert(0, 'dim_or_attr')
return PD.DataFrame(NP.array(data), index=idx, columns=columns)
|
dr-leo/pandaSDMX | pandasdmx/api.py | Request.load_agency_profile | python | def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies) | Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L63-L77 | null | class Request(object):
"""Get SDMX data and metadata from remote servers or local files.
"""
# Load built-in agency metadata
s = resource_string('pandasdmx', 'agencies.json').decode('utf8')
_agencies = json.loads(s)
del s
@classmethod
@classmethod
def list_agencies(cls):
'''
Return a sorted list of valid agency IDs. These can be used to create ``Request`` instances.
'''
return sorted(list(cls._agencies))
_resources = ['dataflow', 'datastructure', 'data', 'categoryscheme',
'codelist', 'conceptscheme']
@classmethod
def _make_get_wrappers(cls):
for r in cls._resources:
setattr(cls, r, ResourceGetter(r))
def __init__(self, agency='', cache=None, log_level=None,
**http_cfg):
'''
Set the SDMX agency, and configure http requests for this instance.
Args:
agency(str): identifier of a data provider.
Must be one of the dict keys in Request._agencies such as
'ESTAT', 'ECB', ''GSR' or ''.
An empty string has the effect that the instance can only
load data or metadata from files or a pre-fabricated URL. .
defaults to '', i.e. no agency.
cache(dict): args to be passed on to
``requests_cache.install_cache()``. Default is None (no caching).
log_level(int): set log level for lib-wide logger as set up in pandasdmx.__init__.py.
For details see the docs on the
logging package from the standard lib. Default: None (= do nothing).
**http_cfg: used to configure http requests. E.g., you can
specify proxies, authentication information and more.
See also the docs of the ``requests`` package at
http://www.python-requests.org/en/latest/.
'''
# If needed, generate wrapper properties for get method
if not hasattr(self, 'data'):
self._make_get_wrappers()
self.client = remote.REST(cache, http_cfg)
self.agency = agency.upper()
if log_level:
logging.getLogger('pandasdmx').setLevel(log_level)
@property
def agency(self):
return self._agency
@agency.setter
def agency(self, value):
if value in self._agencies:
self._agency = value
else:
raise ValueError('If given, agency must be one of {0}'.format(
list(self._agencies)))
self.cache = {} # for SDMX messages and other stuff.
def clear_cache(self):
self.cache.clear()
@property
def timeout(self):
return self.client.config['timeout']
@timeout.setter
def timeout(self, value):
self.client.config['timeout'] = value
def series_keys(self, flow_id, cache=True):
'''
Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow.
'''
# Check if requested series keys are already cached
cache_id = 'series_keys_' + flow_id
if cache_id in self.cache:
return self.cache[cache_id]
else:
# download an empty dataset with all available series keys
resp = self.data(flow_id, params={'detail': 'serieskeysonly'})
l = list(s.key for s in resp.data.series)
df = PD.DataFrame(l, columns=l[0]._fields, dtype='category')
if cache:
self.cache[cache_id] = df
return df
def get(self, resource_type='', resource_id='', agency='',
version=None, key='',
params={}, headers={},
fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),
memcache=None, writer=None):
'''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message.
'''
# Try to get resource from memory cache if specified
if memcache in self.cache:
return self.cache[memcache]
if url:
base_url = url
else:
# Construct URL from args unless ``tofile`` is given
# Validate args
agency = agency or self._agencies[self.agency]['id']
# Validate resource if no filename is specified
if not fromfile and resource_type not in self._resources:
raise ValueError(
'resource must be one of {0}'.format(self._resources))
# resource_id: if it is not a str or unicode type,
# but, e.g., an invalid Dataflow Definition,
# extract its ID
if resource_id and not isinstance(resource_id, (str_type, str)):
resource_id = resource_id.id
# If key is a dict, validate items against the DSD
# and construct the key string which becomes part of the URL
# Otherwise, do nothing as key must be a str confirming to the REST
# API spec.
if resource_type == 'data' and isinstance(key, dict):
# select validation method based on agency capabilities
if self._agencies[self.agency].get('supports_series_keys_only'):
key = self._make_key_from_series(resource_id, key)
else:
key = self._make_key_from_dsd(resource_id, key)
# Get http headers from agency config if not given by the caller
if not (fromfile or headers):
# Check for default headers
resource_cfg = self._agencies[self.agency][
'resources'].get(resource_type)
if resource_cfg:
headers = resource_cfg.get('headers') or {}
# Construct URL from the given non-empty substrings.
# if data is requested, omit the agency part. See the query
# examples
if resource_type in ['data', 'categoryscheme']:
agency_id = None
else:
agency_id = agency
if (version is None) and (resource_type != 'data'):
version = 'latest'
# Remove None's and '' first. Then join them to form the base URL.
# Any parameters are appended by remote module.
if self.agency:
parts = [self._agencies[self.agency]['url'],
resource_type,
agency_id,
resource_id, version, key]
base_url = '/'.join(filter(None, parts))
# Set references to sensible defaults
if 'references' not in params:
if resource_type in [
'dataflow', 'datastructure'] and resource_id:
params['references'] = 'all'
elif resource_type == 'categoryscheme':
params['references'] = 'parentsandsiblings'
elif fromfile:
base_url = ''
else:
raise ValueError(
'If `` url`` is not specified, either agency or fromfile must be given.')
# Now get the SDMX message either via http or as local file
logger.info(
'Requesting resource from URL/file %s', (base_url or fromfile))
source, url, resp_headers, status_code = self.client.get(
base_url, params=params, headers=headers, fromfile=fromfile)
if source is None:
raise SDMXException('Server error:', status_code, url)
logger.info(
'Loaded file into memory from URL/file: %s', (url or fromfile))
# write msg to file and unzip it as required, then parse it
with source:
if tofile:
logger.info('Writing to file %s', tofile)
with open(tofile, 'wb') as dest:
source.seek(0)
dest.write(source.read())
source.seek(0)
# handle zip files
if is_zipfile(source):
temp = source
with ZipFile(temp, mode='r') as zf:
info = zf.infolist()[0]
source = zf.open(info)
else:
# undo side effect of is_zipfile
source.seek(0)
# select reader class
if ((fromfile and fromfile.endswith('.json'))
or (self.agency and self._agencies[self.agency]['resources'].get(resource_type)
and self._agencies[self.agency]['resources'][resource_type].get('json'))):
reader_module = import_module('pandasdmx.reader.sdmxjson')
else:
reader_module = import_module('pandasdmx.reader.sdmxml')
reader_cls = reader_module.Reader
msg = reader_cls(self).initialize(source)
# Check for URL in a footer and get the real data if so configured
if get_footer_url and hasattr(msg, 'footer'):
logger.info('Footer found in SDMX message.')
# Retrieve the first URL in the footer, if any
url_l = [
i for i in msg.footer.text if remote.is_url(i)]
if url_l:
# found an URL. Wait and try to request it
footer_url = url_l[0]
seconds, attempts = get_footer_url
logger.info(
'Found URL in footer. Making %i requests, waiting %i seconds in between.', attempts, seconds)
for a in range(attempts):
sleep(seconds)
try:
return self.get(tofile=tofile, url=footer_url, headers=headers)
except Exception as e:
logger.info(
'Attempt #%i raised the following exeption: %s', a, str(e))
# Select default writer
if not writer:
if hasattr(msg, 'data'):
writer = 'pandasdmx.writer.data2pandas'
else:
writer = 'pandasdmx.writer.structure2pd'
r = Response(msg, url, resp_headers, status_code, writer=writer)
# store in memory cache if needed
if memcache and r.status_code == 200:
self.cache[memcache] = r
return r
def _make_key_from_dsd(self, flow_id, key):
'''
Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str)
'''
# get the dataflow and the DSD ID
dataflow = self.get('dataflow', flow_id,
memcache='dataflow' + flow_id)
dsd_id = dataflow.msg.dataflow[flow_id].structure.id
dsd_resp = self.get('datastructure', dsd_id,
memcache='datastructure' + dsd_id)
dsd = dsd_resp.msg.datastructure[dsd_id]
# Extract dimensions excluding the dimension at observation (time, time-period)
# as we are only interested in dimensions for columns, not rows.
dimensions = [d for d in dsd.dimensions.aslist() if d.id not in
['TIME', 'TIME_PERIOD']]
dim_names = [d.id for d in dimensions]
# Retrieve any ContentConstraint
try:
constraint_l = [c for c in dataflow.constraint.aslist()
if c.constraint_attachment.id == flow_id]
if constraint_l:
constraint = constraint_l[0]
except:
constraint = None
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key.keys()
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
parts = []
# Iterate over the dimensions. If the key dict
# contains a value for the dimension, append it to the 'parts' list. Otherwise
# append ''. Then join the parts to form the dotted str.
for d in dimensions:
try:
values = key[d.id]
values_l = values.split('+')
codelist = d.local_repr.enum
codes = codelist.keys()
invalid = [v for v in values_l if v not in codes]
if invalid:
# ToDo: attach codelist to exception.
raise ValueError("'{0}' is not in codelist for dimension '{1}: {2}'".
format(invalid, d.id, codes))
# Check if values are in Contentconstraint if present
if constraint:
try:
invalid = [
v for v in values_l if (d.id, v) not in constraint]
if invalid:
raise ValueError("'{0}' out of content_constraint for '{1}'.".
format(invalid, d.id))
except NotImplementedError:
pass
part = values
except KeyError:
part = ''
parts.append(part)
return '.'.join(parts)
def _make_key_from_series(self, flow_id, key):
'''
Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str)
'''
# get all series keys
all_keys = self.series_keys(flow_id)
dim_names = list(all_keys)
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Pre-process key by expanding multiple values as list
key = {k: v.split('+') if '+' in v else v for k, v in key.items()}
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
# First, wrap each single dim value in a list to allow
# uniform treatment of single and multiple dim values.
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# Iterate over the dimensions. If the key dict
# contains an allowed value for the dimension,
# it will become part of the string.
invalid = list(chain.from_iterable((((k, v) for v in vl if v not in all_keys[k].values)
for k, vl in key_l.items())))
if invalid:
raise ValueError("The following dimension values are invalid: {0}".
format(invalid))
# Generate the 'Val1+Val2' notation for multiple dim values and remove the
# lists
for k, v in key_l.items():
key_l[k] = '+'.join(v)
# assemble the key string which goes into the URL
parts = [key_l.get(name, '') for name in dim_names]
return '.'.join(parts)
def preview_data(self, flow_id, key=None, count=True, total=True):
'''
Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame.
'''
all_keys = self.series_keys(flow_id)
# Handle the special case that no key is provided
if not key:
if count:
return all_keys.shape[0]
else:
return all_keys
# So there is a key specifying at least one dimension value.
# Wrap single values in 1-elem list for uniform treatment
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# order dim_names that are present in the key
dim_names = [k for k in all_keys if k in key]
# Drop columns that are not in the key
key_df = all_keys.loc[:, dim_names]
if total:
# DataFrame with matching series keys
bool_series = reduce(
and_, (key_df.isin(key_l)[col] for col in dim_names))
if count:
return bool_series.value_counts()[True]
else:
return all_keys[bool_series]
else:
# Dict of value combinations as dict keys
key_product = product(*(key_l[k] for k in dim_names))
# Replace key tuples by namedtuples
PartialKey = namedtuple_factory('PartialKey', dim_names)
matches = {PartialKey(k): reduce(and_, (key_df.isin({k1: [v1]
for k1, v1 in zip(dim_names, k)})[col]
for col in dim_names))
for k in key_product}
if not count:
# dict mapping each key to DataFrame with selected key-set
return {k: all_keys[v] for k, v in matches.items()}
else:
# Number of series per key
return {k: v.value_counts()[True] for k, v in matches.items()}
|
dr-leo/pandaSDMX | pandasdmx/api.py | Request.series_keys | python | def series_keys(self, flow_id, cache=True):
'''
Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow.
'''
# Check if requested series keys are already cached
cache_id = 'series_keys_' + flow_id
if cache_id in self.cache:
return self.cache[cache_id]
else:
# download an empty dataset with all available series keys
resp = self.data(flow_id, params={'detail': 'serieskeysonly'})
l = list(s.key for s in resp.data.series)
df = PD.DataFrame(l, columns=l[0]._fields, dtype='category')
if cache:
self.cache[cache_id] = df
return df | Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L150-L170 | null | class Request(object):
"""Get SDMX data and metadata from remote servers or local files.
"""
# Load built-in agency metadata
s = resource_string('pandasdmx', 'agencies.json').decode('utf8')
_agencies = json.loads(s)
del s
@classmethod
def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies)
@classmethod
def list_agencies(cls):
'''
Return a sorted list of valid agency IDs. These can be used to create ``Request`` instances.
'''
return sorted(list(cls._agencies))
_resources = ['dataflow', 'datastructure', 'data', 'categoryscheme',
'codelist', 'conceptscheme']
@classmethod
def _make_get_wrappers(cls):
for r in cls._resources:
setattr(cls, r, ResourceGetter(r))
def __init__(self, agency='', cache=None, log_level=None,
**http_cfg):
'''
Set the SDMX agency, and configure http requests for this instance.
Args:
agency(str): identifier of a data provider.
Must be one of the dict keys in Request._agencies such as
'ESTAT', 'ECB', ''GSR' or ''.
An empty string has the effect that the instance can only
load data or metadata from files or a pre-fabricated URL. .
defaults to '', i.e. no agency.
cache(dict): args to be passed on to
``requests_cache.install_cache()``. Default is None (no caching).
log_level(int): set log level for lib-wide logger as set up in pandasdmx.__init__.py.
For details see the docs on the
logging package from the standard lib. Default: None (= do nothing).
**http_cfg: used to configure http requests. E.g., you can
specify proxies, authentication information and more.
See also the docs of the ``requests`` package at
http://www.python-requests.org/en/latest/.
'''
# If needed, generate wrapper properties for get method
if not hasattr(self, 'data'):
self._make_get_wrappers()
self.client = remote.REST(cache, http_cfg)
self.agency = agency.upper()
if log_level:
logging.getLogger('pandasdmx').setLevel(log_level)
@property
def agency(self):
return self._agency
@agency.setter
def agency(self, value):
if value in self._agencies:
self._agency = value
else:
raise ValueError('If given, agency must be one of {0}'.format(
list(self._agencies)))
self.cache = {} # for SDMX messages and other stuff.
def clear_cache(self):
self.cache.clear()
@property
def timeout(self):
return self.client.config['timeout']
@timeout.setter
def timeout(self, value):
self.client.config['timeout'] = value
def get(self, resource_type='', resource_id='', agency='',
version=None, key='',
params={}, headers={},
fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),
memcache=None, writer=None):
'''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message.
'''
# Try to get resource from memory cache if specified
if memcache in self.cache:
return self.cache[memcache]
if url:
base_url = url
else:
# Construct URL from args unless ``tofile`` is given
# Validate args
agency = agency or self._agencies[self.agency]['id']
# Validate resource if no filename is specified
if not fromfile and resource_type not in self._resources:
raise ValueError(
'resource must be one of {0}'.format(self._resources))
# resource_id: if it is not a str or unicode type,
# but, e.g., an invalid Dataflow Definition,
# extract its ID
if resource_id and not isinstance(resource_id, (str_type, str)):
resource_id = resource_id.id
# If key is a dict, validate items against the DSD
# and construct the key string which becomes part of the URL
# Otherwise, do nothing as key must be a str confirming to the REST
# API spec.
if resource_type == 'data' and isinstance(key, dict):
# select validation method based on agency capabilities
if self._agencies[self.agency].get('supports_series_keys_only'):
key = self._make_key_from_series(resource_id, key)
else:
key = self._make_key_from_dsd(resource_id, key)
# Get http headers from agency config if not given by the caller
if not (fromfile or headers):
# Check for default headers
resource_cfg = self._agencies[self.agency][
'resources'].get(resource_type)
if resource_cfg:
headers = resource_cfg.get('headers') or {}
# Construct URL from the given non-empty substrings.
# if data is requested, omit the agency part. See the query
# examples
if resource_type in ['data', 'categoryscheme']:
agency_id = None
else:
agency_id = agency
if (version is None) and (resource_type != 'data'):
version = 'latest'
# Remove None's and '' first. Then join them to form the base URL.
# Any parameters are appended by remote module.
if self.agency:
parts = [self._agencies[self.agency]['url'],
resource_type,
agency_id,
resource_id, version, key]
base_url = '/'.join(filter(None, parts))
# Set references to sensible defaults
if 'references' not in params:
if resource_type in [
'dataflow', 'datastructure'] and resource_id:
params['references'] = 'all'
elif resource_type == 'categoryscheme':
params['references'] = 'parentsandsiblings'
elif fromfile:
base_url = ''
else:
raise ValueError(
'If `` url`` is not specified, either agency or fromfile must be given.')
# Now get the SDMX message either via http or as local file
logger.info(
'Requesting resource from URL/file %s', (base_url or fromfile))
source, url, resp_headers, status_code = self.client.get(
base_url, params=params, headers=headers, fromfile=fromfile)
if source is None:
raise SDMXException('Server error:', status_code, url)
logger.info(
'Loaded file into memory from URL/file: %s', (url or fromfile))
# write msg to file and unzip it as required, then parse it
with source:
if tofile:
logger.info('Writing to file %s', tofile)
with open(tofile, 'wb') as dest:
source.seek(0)
dest.write(source.read())
source.seek(0)
# handle zip files
if is_zipfile(source):
temp = source
with ZipFile(temp, mode='r') as zf:
info = zf.infolist()[0]
source = zf.open(info)
else:
# undo side effect of is_zipfile
source.seek(0)
# select reader class
if ((fromfile and fromfile.endswith('.json'))
or (self.agency and self._agencies[self.agency]['resources'].get(resource_type)
and self._agencies[self.agency]['resources'][resource_type].get('json'))):
reader_module = import_module('pandasdmx.reader.sdmxjson')
else:
reader_module = import_module('pandasdmx.reader.sdmxml')
reader_cls = reader_module.Reader
msg = reader_cls(self).initialize(source)
# Check for URL in a footer and get the real data if so configured
if get_footer_url and hasattr(msg, 'footer'):
logger.info('Footer found in SDMX message.')
# Retrieve the first URL in the footer, if any
url_l = [
i for i in msg.footer.text if remote.is_url(i)]
if url_l:
# found an URL. Wait and try to request it
footer_url = url_l[0]
seconds, attempts = get_footer_url
logger.info(
'Found URL in footer. Making %i requests, waiting %i seconds in between.', attempts, seconds)
for a in range(attempts):
sleep(seconds)
try:
return self.get(tofile=tofile, url=footer_url, headers=headers)
except Exception as e:
logger.info(
'Attempt #%i raised the following exeption: %s', a, str(e))
# Select default writer
if not writer:
if hasattr(msg, 'data'):
writer = 'pandasdmx.writer.data2pandas'
else:
writer = 'pandasdmx.writer.structure2pd'
r = Response(msg, url, resp_headers, status_code, writer=writer)
# store in memory cache if needed
if memcache and r.status_code == 200:
self.cache[memcache] = r
return r
def _make_key_from_dsd(self, flow_id, key):
'''
Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str)
'''
# get the dataflow and the DSD ID
dataflow = self.get('dataflow', flow_id,
memcache='dataflow' + flow_id)
dsd_id = dataflow.msg.dataflow[flow_id].structure.id
dsd_resp = self.get('datastructure', dsd_id,
memcache='datastructure' + dsd_id)
dsd = dsd_resp.msg.datastructure[dsd_id]
# Extract dimensions excluding the dimension at observation (time, time-period)
# as we are only interested in dimensions for columns, not rows.
dimensions = [d for d in dsd.dimensions.aslist() if d.id not in
['TIME', 'TIME_PERIOD']]
dim_names = [d.id for d in dimensions]
# Retrieve any ContentConstraint
try:
constraint_l = [c for c in dataflow.constraint.aslist()
if c.constraint_attachment.id == flow_id]
if constraint_l:
constraint = constraint_l[0]
except:
constraint = None
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key.keys()
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
parts = []
# Iterate over the dimensions. If the key dict
# contains a value for the dimension, append it to the 'parts' list. Otherwise
# append ''. Then join the parts to form the dotted str.
for d in dimensions:
try:
values = key[d.id]
values_l = values.split('+')
codelist = d.local_repr.enum
codes = codelist.keys()
invalid = [v for v in values_l if v not in codes]
if invalid:
# ToDo: attach codelist to exception.
raise ValueError("'{0}' is not in codelist for dimension '{1}: {2}'".
format(invalid, d.id, codes))
# Check if values are in Contentconstraint if present
if constraint:
try:
invalid = [
v for v in values_l if (d.id, v) not in constraint]
if invalid:
raise ValueError("'{0}' out of content_constraint for '{1}'.".
format(invalid, d.id))
except NotImplementedError:
pass
part = values
except KeyError:
part = ''
parts.append(part)
return '.'.join(parts)
def _make_key_from_series(self, flow_id, key):
'''
Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str)
'''
# get all series keys
all_keys = self.series_keys(flow_id)
dim_names = list(all_keys)
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Pre-process key by expanding multiple values as list
key = {k: v.split('+') if '+' in v else v for k, v in key.items()}
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
# First, wrap each single dim value in a list to allow
# uniform treatment of single and multiple dim values.
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# Iterate over the dimensions. If the key dict
# contains an allowed value for the dimension,
# it will become part of the string.
invalid = list(chain.from_iterable((((k, v) for v in vl if v not in all_keys[k].values)
for k, vl in key_l.items())))
if invalid:
raise ValueError("The following dimension values are invalid: {0}".
format(invalid))
# Generate the 'Val1+Val2' notation for multiple dim values and remove the
# lists
for k, v in key_l.items():
key_l[k] = '+'.join(v)
# assemble the key string which goes into the URL
parts = [key_l.get(name, '') for name in dim_names]
return '.'.join(parts)
def preview_data(self, flow_id, key=None, count=True, total=True):
'''
Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame.
'''
all_keys = self.series_keys(flow_id)
# Handle the special case that no key is provided
if not key:
if count:
return all_keys.shape[0]
else:
return all_keys
# So there is a key specifying at least one dimension value.
# Wrap single values in 1-elem list for uniform treatment
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# order dim_names that are present in the key
dim_names = [k for k in all_keys if k in key]
# Drop columns that are not in the key
key_df = all_keys.loc[:, dim_names]
if total:
# DataFrame with matching series keys
bool_series = reduce(
and_, (key_df.isin(key_l)[col] for col in dim_names))
if count:
return bool_series.value_counts()[True]
else:
return all_keys[bool_series]
else:
# Dict of value combinations as dict keys
key_product = product(*(key_l[k] for k in dim_names))
# Replace key tuples by namedtuples
PartialKey = namedtuple_factory('PartialKey', dim_names)
matches = {PartialKey(k): reduce(and_, (key_df.isin({k1: [v1]
for k1, v1 in zip(dim_names, k)})[col]
for col in dim_names))
for k in key_product}
if not count:
# dict mapping each key to DataFrame with selected key-set
return {k: all_keys[v] for k, v in matches.items()}
else:
# Number of series per key
return {k: v.value_counts()[True] for k, v in matches.items()}
|
dr-leo/pandaSDMX | pandasdmx/api.py | Request.get | python | def get(self, resource_type='', resource_id='', agency='',
version=None, key='',
params={}, headers={},
fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),
memcache=None, writer=None):
'''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message.
'''
# Try to get resource from memory cache if specified
if memcache in self.cache:
return self.cache[memcache]
if url:
base_url = url
else:
# Construct URL from args unless ``tofile`` is given
# Validate args
agency = agency or self._agencies[self.agency]['id']
# Validate resource if no filename is specified
if not fromfile and resource_type not in self._resources:
raise ValueError(
'resource must be one of {0}'.format(self._resources))
# resource_id: if it is not a str or unicode type,
# but, e.g., an invalid Dataflow Definition,
# extract its ID
if resource_id and not isinstance(resource_id, (str_type, str)):
resource_id = resource_id.id
# If key is a dict, validate items against the DSD
# and construct the key string which becomes part of the URL
# Otherwise, do nothing as key must be a str confirming to the REST
# API spec.
if resource_type == 'data' and isinstance(key, dict):
# select validation method based on agency capabilities
if self._agencies[self.agency].get('supports_series_keys_only'):
key = self._make_key_from_series(resource_id, key)
else:
key = self._make_key_from_dsd(resource_id, key)
# Get http headers from agency config if not given by the caller
if not (fromfile or headers):
# Check for default headers
resource_cfg = self._agencies[self.agency][
'resources'].get(resource_type)
if resource_cfg:
headers = resource_cfg.get('headers') or {}
# Construct URL from the given non-empty substrings.
# if data is requested, omit the agency part. See the query
# examples
if resource_type in ['data', 'categoryscheme']:
agency_id = None
else:
agency_id = agency
if (version is None) and (resource_type != 'data'):
version = 'latest'
# Remove None's and '' first. Then join them to form the base URL.
# Any parameters are appended by remote module.
if self.agency:
parts = [self._agencies[self.agency]['url'],
resource_type,
agency_id,
resource_id, version, key]
base_url = '/'.join(filter(None, parts))
# Set references to sensible defaults
if 'references' not in params:
if resource_type in [
'dataflow', 'datastructure'] and resource_id:
params['references'] = 'all'
elif resource_type == 'categoryscheme':
params['references'] = 'parentsandsiblings'
elif fromfile:
base_url = ''
else:
raise ValueError(
'If `` url`` is not specified, either agency or fromfile must be given.')
# Now get the SDMX message either via http or as local file
logger.info(
'Requesting resource from URL/file %s', (base_url or fromfile))
source, url, resp_headers, status_code = self.client.get(
base_url, params=params, headers=headers, fromfile=fromfile)
if source is None:
raise SDMXException('Server error:', status_code, url)
logger.info(
'Loaded file into memory from URL/file: %s', (url or fromfile))
# write msg to file and unzip it as required, then parse it
with source:
if tofile:
logger.info('Writing to file %s', tofile)
with open(tofile, 'wb') as dest:
source.seek(0)
dest.write(source.read())
source.seek(0)
# handle zip files
if is_zipfile(source):
temp = source
with ZipFile(temp, mode='r') as zf:
info = zf.infolist()[0]
source = zf.open(info)
else:
# undo side effect of is_zipfile
source.seek(0)
# select reader class
if ((fromfile and fromfile.endswith('.json'))
or (self.agency and self._agencies[self.agency]['resources'].get(resource_type)
and self._agencies[self.agency]['resources'][resource_type].get('json'))):
reader_module = import_module('pandasdmx.reader.sdmxjson')
else:
reader_module = import_module('pandasdmx.reader.sdmxml')
reader_cls = reader_module.Reader
msg = reader_cls(self).initialize(source)
# Check for URL in a footer and get the real data if so configured
if get_footer_url and hasattr(msg, 'footer'):
logger.info('Footer found in SDMX message.')
# Retrieve the first URL in the footer, if any
url_l = [
i for i in msg.footer.text if remote.is_url(i)]
if url_l:
# found an URL. Wait and try to request it
footer_url = url_l[0]
seconds, attempts = get_footer_url
logger.info(
'Found URL in footer. Making %i requests, waiting %i seconds in between.', attempts, seconds)
for a in range(attempts):
sleep(seconds)
try:
return self.get(tofile=tofile, url=footer_url, headers=headers)
except Exception as e:
logger.info(
'Attempt #%i raised the following exeption: %s', a, str(e))
# Select default writer
if not writer:
if hasattr(msg, 'data'):
writer = 'pandasdmx.writer.data2pandas'
else:
writer = 'pandasdmx.writer.structure2pd'
r = Response(msg, url, resp_headers, status_code, writer=writer)
# store in memory cache if needed
if memcache and r.status_code == 200:
self.cache[memcache] = r
return r | get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L172-L381 | [
"def get(self, resource_type='', resource_id='', agency='',\n version=None, key='',\n params={}, headers={},\n fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),\n memcache=None, writer=None):\n '''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response`... | class Request(object):
"""Get SDMX data and metadata from remote servers or local files.
"""
# Load built-in agency metadata
s = resource_string('pandasdmx', 'agencies.json').decode('utf8')
_agencies = json.loads(s)
del s
@classmethod
def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies)
@classmethod
def list_agencies(cls):
'''
Return a sorted list of valid agency IDs. These can be used to create ``Request`` instances.
'''
return sorted(list(cls._agencies))
_resources = ['dataflow', 'datastructure', 'data', 'categoryscheme',
'codelist', 'conceptscheme']
@classmethod
def _make_get_wrappers(cls):
for r in cls._resources:
setattr(cls, r, ResourceGetter(r))
def __init__(self, agency='', cache=None, log_level=None,
**http_cfg):
'''
Set the SDMX agency, and configure http requests for this instance.
Args:
agency(str): identifier of a data provider.
Must be one of the dict keys in Request._agencies such as
'ESTAT', 'ECB', ''GSR' or ''.
An empty string has the effect that the instance can only
load data or metadata from files or a pre-fabricated URL. .
defaults to '', i.e. no agency.
cache(dict): args to be passed on to
``requests_cache.install_cache()``. Default is None (no caching).
log_level(int): set log level for lib-wide logger as set up in pandasdmx.__init__.py.
For details see the docs on the
logging package from the standard lib. Default: None (= do nothing).
**http_cfg: used to configure http requests. E.g., you can
specify proxies, authentication information and more.
See also the docs of the ``requests`` package at
http://www.python-requests.org/en/latest/.
'''
# If needed, generate wrapper properties for get method
if not hasattr(self, 'data'):
self._make_get_wrappers()
self.client = remote.REST(cache, http_cfg)
self.agency = agency.upper()
if log_level:
logging.getLogger('pandasdmx').setLevel(log_level)
@property
def agency(self):
return self._agency
@agency.setter
def agency(self, value):
if value in self._agencies:
self._agency = value
else:
raise ValueError('If given, agency must be one of {0}'.format(
list(self._agencies)))
self.cache = {} # for SDMX messages and other stuff.
def clear_cache(self):
self.cache.clear()
@property
def timeout(self):
return self.client.config['timeout']
@timeout.setter
def timeout(self, value):
self.client.config['timeout'] = value
def series_keys(self, flow_id, cache=True):
'''
Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow.
'''
# Check if requested series keys are already cached
cache_id = 'series_keys_' + flow_id
if cache_id in self.cache:
return self.cache[cache_id]
else:
# download an empty dataset with all available series keys
resp = self.data(flow_id, params={'detail': 'serieskeysonly'})
l = list(s.key for s in resp.data.series)
df = PD.DataFrame(l, columns=l[0]._fields, dtype='category')
if cache:
self.cache[cache_id] = df
return df
def _make_key_from_dsd(self, flow_id, key):
'''
Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str)
'''
# get the dataflow and the DSD ID
dataflow = self.get('dataflow', flow_id,
memcache='dataflow' + flow_id)
dsd_id = dataflow.msg.dataflow[flow_id].structure.id
dsd_resp = self.get('datastructure', dsd_id,
memcache='datastructure' + dsd_id)
dsd = dsd_resp.msg.datastructure[dsd_id]
# Extract dimensions excluding the dimension at observation (time, time-period)
# as we are only interested in dimensions for columns, not rows.
dimensions = [d for d in dsd.dimensions.aslist() if d.id not in
['TIME', 'TIME_PERIOD']]
dim_names = [d.id for d in dimensions]
# Retrieve any ContentConstraint
try:
constraint_l = [c for c in dataflow.constraint.aslist()
if c.constraint_attachment.id == flow_id]
if constraint_l:
constraint = constraint_l[0]
except:
constraint = None
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key.keys()
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
parts = []
# Iterate over the dimensions. If the key dict
# contains a value for the dimension, append it to the 'parts' list. Otherwise
# append ''. Then join the parts to form the dotted str.
for d in dimensions:
try:
values = key[d.id]
values_l = values.split('+')
codelist = d.local_repr.enum
codes = codelist.keys()
invalid = [v for v in values_l if v not in codes]
if invalid:
# ToDo: attach codelist to exception.
raise ValueError("'{0}' is not in codelist for dimension '{1}: {2}'".
format(invalid, d.id, codes))
# Check if values are in Contentconstraint if present
if constraint:
try:
invalid = [
v for v in values_l if (d.id, v) not in constraint]
if invalid:
raise ValueError("'{0}' out of content_constraint for '{1}'.".
format(invalid, d.id))
except NotImplementedError:
pass
part = values
except KeyError:
part = ''
parts.append(part)
return '.'.join(parts)
def _make_key_from_series(self, flow_id, key):
'''
Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str)
'''
# get all series keys
all_keys = self.series_keys(flow_id)
dim_names = list(all_keys)
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Pre-process key by expanding multiple values as list
key = {k: v.split('+') if '+' in v else v for k, v in key.items()}
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
# First, wrap each single dim value in a list to allow
# uniform treatment of single and multiple dim values.
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# Iterate over the dimensions. If the key dict
# contains an allowed value for the dimension,
# it will become part of the string.
invalid = list(chain.from_iterable((((k, v) for v in vl if v not in all_keys[k].values)
for k, vl in key_l.items())))
if invalid:
raise ValueError("The following dimension values are invalid: {0}".
format(invalid))
# Generate the 'Val1+Val2' notation for multiple dim values and remove the
# lists
for k, v in key_l.items():
key_l[k] = '+'.join(v)
# assemble the key string which goes into the URL
parts = [key_l.get(name, '') for name in dim_names]
return '.'.join(parts)
def preview_data(self, flow_id, key=None, count=True, total=True):
'''
Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame.
'''
all_keys = self.series_keys(flow_id)
# Handle the special case that no key is provided
if not key:
if count:
return all_keys.shape[0]
else:
return all_keys
# So there is a key specifying at least one dimension value.
# Wrap single values in 1-elem list for uniform treatment
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# order dim_names that are present in the key
dim_names = [k for k in all_keys if k in key]
# Drop columns that are not in the key
key_df = all_keys.loc[:, dim_names]
if total:
# DataFrame with matching series keys
bool_series = reduce(
and_, (key_df.isin(key_l)[col] for col in dim_names))
if count:
return bool_series.value_counts()[True]
else:
return all_keys[bool_series]
else:
# Dict of value combinations as dict keys
key_product = product(*(key_l[k] for k in dim_names))
# Replace key tuples by namedtuples
PartialKey = namedtuple_factory('PartialKey', dim_names)
matches = {PartialKey(k): reduce(and_, (key_df.isin({k1: [v1]
for k1, v1 in zip(dim_names, k)})[col]
for col in dim_names))
for k in key_product}
if not count:
# dict mapping each key to DataFrame with selected key-set
return {k: all_keys[v] for k, v in matches.items()}
else:
# Number of series per key
return {k: v.value_counts()[True] for k, v in matches.items()}
|
dr-leo/pandaSDMX | pandasdmx/api.py | Request._make_key_from_dsd | python | def _make_key_from_dsd(self, flow_id, key):
'''
Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str)
'''
# get the dataflow and the DSD ID
dataflow = self.get('dataflow', flow_id,
memcache='dataflow' + flow_id)
dsd_id = dataflow.msg.dataflow[flow_id].structure.id
dsd_resp = self.get('datastructure', dsd_id,
memcache='datastructure' + dsd_id)
dsd = dsd_resp.msg.datastructure[dsd_id]
# Extract dimensions excluding the dimension at observation (time, time-period)
# as we are only interested in dimensions for columns, not rows.
dimensions = [d for d in dsd.dimensions.aslist() if d.id not in
['TIME', 'TIME_PERIOD']]
dim_names = [d.id for d in dimensions]
# Retrieve any ContentConstraint
try:
constraint_l = [c for c in dataflow.constraint.aslist()
if c.constraint_attachment.id == flow_id]
if constraint_l:
constraint = constraint_l[0]
except:
constraint = None
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key.keys()
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
parts = []
# Iterate over the dimensions. If the key dict
# contains a value for the dimension, append it to the 'parts' list. Otherwise
# append ''. Then join the parts to form the dotted str.
for d in dimensions:
try:
values = key[d.id]
values_l = values.split('+')
codelist = d.local_repr.enum
codes = codelist.keys()
invalid = [v for v in values_l if v not in codes]
if invalid:
# ToDo: attach codelist to exception.
raise ValueError("'{0}' is not in codelist for dimension '{1}: {2}'".
format(invalid, d.id, codes))
# Check if values are in Contentconstraint if present
if constraint:
try:
invalid = [
v for v in values_l if (d.id, v) not in constraint]
if invalid:
raise ValueError("'{0}' out of content_constraint for '{1}'.".
format(invalid, d.id))
except NotImplementedError:
pass
part = values
except KeyError:
part = ''
parts.append(part)
return '.'.join(parts) | Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str) | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L383-L448 | null | class Request(object):
"""Get SDMX data and metadata from remote servers or local files.
"""
# Load built-in agency metadata
s = resource_string('pandasdmx', 'agencies.json').decode('utf8')
_agencies = json.loads(s)
del s
@classmethod
def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies)
@classmethod
def list_agencies(cls):
'''
Return a sorted list of valid agency IDs. These can be used to create ``Request`` instances.
'''
return sorted(list(cls._agencies))
_resources = ['dataflow', 'datastructure', 'data', 'categoryscheme',
'codelist', 'conceptscheme']
@classmethod
def _make_get_wrappers(cls):
for r in cls._resources:
setattr(cls, r, ResourceGetter(r))
def __init__(self, agency='', cache=None, log_level=None,
**http_cfg):
'''
Set the SDMX agency, and configure http requests for this instance.
Args:
agency(str): identifier of a data provider.
Must be one of the dict keys in Request._agencies such as
'ESTAT', 'ECB', ''GSR' or ''.
An empty string has the effect that the instance can only
load data or metadata from files or a pre-fabricated URL. .
defaults to '', i.e. no agency.
cache(dict): args to be passed on to
``requests_cache.install_cache()``. Default is None (no caching).
log_level(int): set log level for lib-wide logger as set up in pandasdmx.__init__.py.
For details see the docs on the
logging package from the standard lib. Default: None (= do nothing).
**http_cfg: used to configure http requests. E.g., you can
specify proxies, authentication information and more.
See also the docs of the ``requests`` package at
http://www.python-requests.org/en/latest/.
'''
# If needed, generate wrapper properties for get method
if not hasattr(self, 'data'):
self._make_get_wrappers()
self.client = remote.REST(cache, http_cfg)
self.agency = agency.upper()
if log_level:
logging.getLogger('pandasdmx').setLevel(log_level)
@property
def agency(self):
return self._agency
@agency.setter
def agency(self, value):
if value in self._agencies:
self._agency = value
else:
raise ValueError('If given, agency must be one of {0}'.format(
list(self._agencies)))
self.cache = {} # for SDMX messages and other stuff.
def clear_cache(self):
self.cache.clear()
@property
def timeout(self):
return self.client.config['timeout']
@timeout.setter
def timeout(self, value):
self.client.config['timeout'] = value
def series_keys(self, flow_id, cache=True):
'''
Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow.
'''
# Check if requested series keys are already cached
cache_id = 'series_keys_' + flow_id
if cache_id in self.cache:
return self.cache[cache_id]
else:
# download an empty dataset with all available series keys
resp = self.data(flow_id, params={'detail': 'serieskeysonly'})
l = list(s.key for s in resp.data.series)
df = PD.DataFrame(l, columns=l[0]._fields, dtype='category')
if cache:
self.cache[cache_id] = df
return df
def get(self, resource_type='', resource_id='', agency='',
version=None, key='',
params={}, headers={},
fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),
memcache=None, writer=None):
'''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message.
'''
# Try to get resource from memory cache if specified
if memcache in self.cache:
return self.cache[memcache]
if url:
base_url = url
else:
# Construct URL from args unless ``tofile`` is given
# Validate args
agency = agency or self._agencies[self.agency]['id']
# Validate resource if no filename is specified
if not fromfile and resource_type not in self._resources:
raise ValueError(
'resource must be one of {0}'.format(self._resources))
# resource_id: if it is not a str or unicode type,
# but, e.g., an invalid Dataflow Definition,
# extract its ID
if resource_id and not isinstance(resource_id, (str_type, str)):
resource_id = resource_id.id
# If key is a dict, validate items against the DSD
# and construct the key string which becomes part of the URL
# Otherwise, do nothing as key must be a str confirming to the REST
# API spec.
if resource_type == 'data' and isinstance(key, dict):
# select validation method based on agency capabilities
if self._agencies[self.agency].get('supports_series_keys_only'):
key = self._make_key_from_series(resource_id, key)
else:
key = self._make_key_from_dsd(resource_id, key)
# Get http headers from agency config if not given by the caller
if not (fromfile or headers):
# Check for default headers
resource_cfg = self._agencies[self.agency][
'resources'].get(resource_type)
if resource_cfg:
headers = resource_cfg.get('headers') or {}
# Construct URL from the given non-empty substrings.
# if data is requested, omit the agency part. See the query
# examples
if resource_type in ['data', 'categoryscheme']:
agency_id = None
else:
agency_id = agency
if (version is None) and (resource_type != 'data'):
version = 'latest'
# Remove None's and '' first. Then join them to form the base URL.
# Any parameters are appended by remote module.
if self.agency:
parts = [self._agencies[self.agency]['url'],
resource_type,
agency_id,
resource_id, version, key]
base_url = '/'.join(filter(None, parts))
# Set references to sensible defaults
if 'references' not in params:
if resource_type in [
'dataflow', 'datastructure'] and resource_id:
params['references'] = 'all'
elif resource_type == 'categoryscheme':
params['references'] = 'parentsandsiblings'
elif fromfile:
base_url = ''
else:
raise ValueError(
'If `` url`` is not specified, either agency or fromfile must be given.')
# Now get the SDMX message either via http or as local file
logger.info(
'Requesting resource from URL/file %s', (base_url or fromfile))
source, url, resp_headers, status_code = self.client.get(
base_url, params=params, headers=headers, fromfile=fromfile)
if source is None:
raise SDMXException('Server error:', status_code, url)
logger.info(
'Loaded file into memory from URL/file: %s', (url or fromfile))
# write msg to file and unzip it as required, then parse it
with source:
if tofile:
logger.info('Writing to file %s', tofile)
with open(tofile, 'wb') as dest:
source.seek(0)
dest.write(source.read())
source.seek(0)
# handle zip files
if is_zipfile(source):
temp = source
with ZipFile(temp, mode='r') as zf:
info = zf.infolist()[0]
source = zf.open(info)
else:
# undo side effect of is_zipfile
source.seek(0)
# select reader class
if ((fromfile and fromfile.endswith('.json'))
or (self.agency and self._agencies[self.agency]['resources'].get(resource_type)
and self._agencies[self.agency]['resources'][resource_type].get('json'))):
reader_module = import_module('pandasdmx.reader.sdmxjson')
else:
reader_module = import_module('pandasdmx.reader.sdmxml')
reader_cls = reader_module.Reader
msg = reader_cls(self).initialize(source)
# Check for URL in a footer and get the real data if so configured
if get_footer_url and hasattr(msg, 'footer'):
logger.info('Footer found in SDMX message.')
# Retrieve the first URL in the footer, if any
url_l = [
i for i in msg.footer.text if remote.is_url(i)]
if url_l:
# found an URL. Wait and try to request it
footer_url = url_l[0]
seconds, attempts = get_footer_url
logger.info(
'Found URL in footer. Making %i requests, waiting %i seconds in between.', attempts, seconds)
for a in range(attempts):
sleep(seconds)
try:
return self.get(tofile=tofile, url=footer_url, headers=headers)
except Exception as e:
logger.info(
'Attempt #%i raised the following exeption: %s', a, str(e))
# Select default writer
if not writer:
if hasattr(msg, 'data'):
writer = 'pandasdmx.writer.data2pandas'
else:
writer = 'pandasdmx.writer.structure2pd'
r = Response(msg, url, resp_headers, status_code, writer=writer)
# store in memory cache if needed
if memcache and r.status_code == 200:
self.cache[memcache] = r
return r
def _make_key_from_series(self, flow_id, key):
'''
Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str)
'''
# get all series keys
all_keys = self.series_keys(flow_id)
dim_names = list(all_keys)
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Pre-process key by expanding multiple values as list
key = {k: v.split('+') if '+' in v else v for k, v in key.items()}
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
# First, wrap each single dim value in a list to allow
# uniform treatment of single and multiple dim values.
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# Iterate over the dimensions. If the key dict
# contains an allowed value for the dimension,
# it will become part of the string.
invalid = list(chain.from_iterable((((k, v) for v in vl if v not in all_keys[k].values)
for k, vl in key_l.items())))
if invalid:
raise ValueError("The following dimension values are invalid: {0}".
format(invalid))
# Generate the 'Val1+Val2' notation for multiple dim values and remove the
# lists
for k, v in key_l.items():
key_l[k] = '+'.join(v)
# assemble the key string which goes into the URL
parts = [key_l.get(name, '') for name in dim_names]
return '.'.join(parts)
def preview_data(self, flow_id, key=None, count=True, total=True):
'''
Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame.
'''
all_keys = self.series_keys(flow_id)
# Handle the special case that no key is provided
if not key:
if count:
return all_keys.shape[0]
else:
return all_keys
# So there is a key specifying at least one dimension value.
# Wrap single values in 1-elem list for uniform treatment
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# order dim_names that are present in the key
dim_names = [k for k in all_keys if k in key]
# Drop columns that are not in the key
key_df = all_keys.loc[:, dim_names]
if total:
# DataFrame with matching series keys
bool_series = reduce(
and_, (key_df.isin(key_l)[col] for col in dim_names))
if count:
return bool_series.value_counts()[True]
else:
return all_keys[bool_series]
else:
# Dict of value combinations as dict keys
key_product = product(*(key_l[k] for k in dim_names))
# Replace key tuples by namedtuples
PartialKey = namedtuple_factory('PartialKey', dim_names)
matches = {PartialKey(k): reduce(and_, (key_df.isin({k1: [v1]
for k1, v1 in zip(dim_names, k)})[col]
for col in dim_names))
for k in key_product}
if not count:
# dict mapping each key to DataFrame with selected key-set
return {k: all_keys[v] for k, v in matches.items()}
else:
# Number of series per key
return {k: v.value_counts()[True] for k, v in matches.items()}
|
dr-leo/pandaSDMX | pandasdmx/api.py | Request._make_key_from_series | python | def _make_key_from_series(self, flow_id, key):
'''
Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str)
'''
# get all series keys
all_keys = self.series_keys(flow_id)
dim_names = list(all_keys)
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Pre-process key by expanding multiple values as list
key = {k: v.split('+') if '+' in v else v for k, v in key.items()}
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
# First, wrap each single dim value in a list to allow
# uniform treatment of single and multiple dim values.
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# Iterate over the dimensions. If the key dict
# contains an allowed value for the dimension,
# it will become part of the string.
invalid = list(chain.from_iterable((((k, v) for v in vl if v not in all_keys[k].values)
for k, vl in key_l.items())))
if invalid:
raise ValueError("The following dimension values are invalid: {0}".
format(invalid))
# Generate the 'Val1+Val2' notation for multiple dim values and remove the
# lists
for k, v in key_l.items():
key_l[k] = '+'.join(v)
# assemble the key string which goes into the URL
parts = [key_l.get(name, '') for name in dim_names]
return '.'.join(parts) | Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str) | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L450-L493 | null | class Request(object):
"""Get SDMX data and metadata from remote servers or local files.
"""
# Load built-in agency metadata
s = resource_string('pandasdmx', 'agencies.json').decode('utf8')
_agencies = json.loads(s)
del s
@classmethod
def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies)
@classmethod
def list_agencies(cls):
'''
Return a sorted list of valid agency IDs. These can be used to create ``Request`` instances.
'''
return sorted(list(cls._agencies))
_resources = ['dataflow', 'datastructure', 'data', 'categoryscheme',
'codelist', 'conceptscheme']
@classmethod
def _make_get_wrappers(cls):
for r in cls._resources:
setattr(cls, r, ResourceGetter(r))
def __init__(self, agency='', cache=None, log_level=None,
**http_cfg):
'''
Set the SDMX agency, and configure http requests for this instance.
Args:
agency(str): identifier of a data provider.
Must be one of the dict keys in Request._agencies such as
'ESTAT', 'ECB', ''GSR' or ''.
An empty string has the effect that the instance can only
load data or metadata from files or a pre-fabricated URL. .
defaults to '', i.e. no agency.
cache(dict): args to be passed on to
``requests_cache.install_cache()``. Default is None (no caching).
log_level(int): set log level for lib-wide logger as set up in pandasdmx.__init__.py.
For details see the docs on the
logging package from the standard lib. Default: None (= do nothing).
**http_cfg: used to configure http requests. E.g., you can
specify proxies, authentication information and more.
See also the docs of the ``requests`` package at
http://www.python-requests.org/en/latest/.
'''
# If needed, generate wrapper properties for get method
if not hasattr(self, 'data'):
self._make_get_wrappers()
self.client = remote.REST(cache, http_cfg)
self.agency = agency.upper()
if log_level:
logging.getLogger('pandasdmx').setLevel(log_level)
@property
def agency(self):
return self._agency
@agency.setter
def agency(self, value):
if value in self._agencies:
self._agency = value
else:
raise ValueError('If given, agency must be one of {0}'.format(
list(self._agencies)))
self.cache = {} # for SDMX messages and other stuff.
def clear_cache(self):
self.cache.clear()
@property
def timeout(self):
return self.client.config['timeout']
@timeout.setter
def timeout(self, value):
self.client.config['timeout'] = value
def series_keys(self, flow_id, cache=True):
'''
Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow.
'''
# Check if requested series keys are already cached
cache_id = 'series_keys_' + flow_id
if cache_id in self.cache:
return self.cache[cache_id]
else:
# download an empty dataset with all available series keys
resp = self.data(flow_id, params={'detail': 'serieskeysonly'})
l = list(s.key for s in resp.data.series)
df = PD.DataFrame(l, columns=l[0]._fields, dtype='category')
if cache:
self.cache[cache_id] = df
return df
def get(self, resource_type='', resource_id='', agency='',
version=None, key='',
params={}, headers={},
fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),
memcache=None, writer=None):
'''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message.
'''
# Try to get resource from memory cache if specified
if memcache in self.cache:
return self.cache[memcache]
if url:
base_url = url
else:
# Construct URL from args unless ``tofile`` is given
# Validate args
agency = agency or self._agencies[self.agency]['id']
# Validate resource if no filename is specified
if not fromfile and resource_type not in self._resources:
raise ValueError(
'resource must be one of {0}'.format(self._resources))
# resource_id: if it is not a str or unicode type,
# but, e.g., an invalid Dataflow Definition,
# extract its ID
if resource_id and not isinstance(resource_id, (str_type, str)):
resource_id = resource_id.id
# If key is a dict, validate items against the DSD
# and construct the key string which becomes part of the URL
# Otherwise, do nothing as key must be a str confirming to the REST
# API spec.
if resource_type == 'data' and isinstance(key, dict):
# select validation method based on agency capabilities
if self._agencies[self.agency].get('supports_series_keys_only'):
key = self._make_key_from_series(resource_id, key)
else:
key = self._make_key_from_dsd(resource_id, key)
# Get http headers from agency config if not given by the caller
if not (fromfile or headers):
# Check for default headers
resource_cfg = self._agencies[self.agency][
'resources'].get(resource_type)
if resource_cfg:
headers = resource_cfg.get('headers') or {}
# Construct URL from the given non-empty substrings.
# if data is requested, omit the agency part. See the query
# examples
if resource_type in ['data', 'categoryscheme']:
agency_id = None
else:
agency_id = agency
if (version is None) and (resource_type != 'data'):
version = 'latest'
# Remove None's and '' first. Then join them to form the base URL.
# Any parameters are appended by remote module.
if self.agency:
parts = [self._agencies[self.agency]['url'],
resource_type,
agency_id,
resource_id, version, key]
base_url = '/'.join(filter(None, parts))
# Set references to sensible defaults
if 'references' not in params:
if resource_type in [
'dataflow', 'datastructure'] and resource_id:
params['references'] = 'all'
elif resource_type == 'categoryscheme':
params['references'] = 'parentsandsiblings'
elif fromfile:
base_url = ''
else:
raise ValueError(
'If `` url`` is not specified, either agency or fromfile must be given.')
# Now get the SDMX message either via http or as local file
logger.info(
'Requesting resource from URL/file %s', (base_url or fromfile))
source, url, resp_headers, status_code = self.client.get(
base_url, params=params, headers=headers, fromfile=fromfile)
if source is None:
raise SDMXException('Server error:', status_code, url)
logger.info(
'Loaded file into memory from URL/file: %s', (url or fromfile))
# write msg to file and unzip it as required, then parse it
with source:
if tofile:
logger.info('Writing to file %s', tofile)
with open(tofile, 'wb') as dest:
source.seek(0)
dest.write(source.read())
source.seek(0)
# handle zip files
if is_zipfile(source):
temp = source
with ZipFile(temp, mode='r') as zf:
info = zf.infolist()[0]
source = zf.open(info)
else:
# undo side effect of is_zipfile
source.seek(0)
# select reader class
if ((fromfile and fromfile.endswith('.json'))
or (self.agency and self._agencies[self.agency]['resources'].get(resource_type)
and self._agencies[self.agency]['resources'][resource_type].get('json'))):
reader_module = import_module('pandasdmx.reader.sdmxjson')
else:
reader_module = import_module('pandasdmx.reader.sdmxml')
reader_cls = reader_module.Reader
msg = reader_cls(self).initialize(source)
# Check for URL in a footer and get the real data if so configured
if get_footer_url and hasattr(msg, 'footer'):
logger.info('Footer found in SDMX message.')
# Retrieve the first URL in the footer, if any
url_l = [
i for i in msg.footer.text if remote.is_url(i)]
if url_l:
# found an URL. Wait and try to request it
footer_url = url_l[0]
seconds, attempts = get_footer_url
logger.info(
'Found URL in footer. Making %i requests, waiting %i seconds in between.', attempts, seconds)
for a in range(attempts):
sleep(seconds)
try:
return self.get(tofile=tofile, url=footer_url, headers=headers)
except Exception as e:
logger.info(
'Attempt #%i raised the following exeption: %s', a, str(e))
# Select default writer
if not writer:
if hasattr(msg, 'data'):
writer = 'pandasdmx.writer.data2pandas'
else:
writer = 'pandasdmx.writer.structure2pd'
r = Response(msg, url, resp_headers, status_code, writer=writer)
# store in memory cache if needed
if memcache and r.status_code == 200:
self.cache[memcache] = r
return r
def _make_key_from_dsd(self, flow_id, key):
'''
Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str)
'''
# get the dataflow and the DSD ID
dataflow = self.get('dataflow', flow_id,
memcache='dataflow' + flow_id)
dsd_id = dataflow.msg.dataflow[flow_id].structure.id
dsd_resp = self.get('datastructure', dsd_id,
memcache='datastructure' + dsd_id)
dsd = dsd_resp.msg.datastructure[dsd_id]
# Extract dimensions excluding the dimension at observation (time, time-period)
# as we are only interested in dimensions for columns, not rows.
dimensions = [d for d in dsd.dimensions.aslist() if d.id not in
['TIME', 'TIME_PERIOD']]
dim_names = [d.id for d in dimensions]
# Retrieve any ContentConstraint
try:
constraint_l = [c for c in dataflow.constraint.aslist()
if c.constraint_attachment.id == flow_id]
if constraint_l:
constraint = constraint_l[0]
except:
constraint = None
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key.keys()
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
parts = []
# Iterate over the dimensions. If the key dict
# contains a value for the dimension, append it to the 'parts' list. Otherwise
# append ''. Then join the parts to form the dotted str.
for d in dimensions:
try:
values = key[d.id]
values_l = values.split('+')
codelist = d.local_repr.enum
codes = codelist.keys()
invalid = [v for v in values_l if v not in codes]
if invalid:
# ToDo: attach codelist to exception.
raise ValueError("'{0}' is not in codelist for dimension '{1}: {2}'".
format(invalid, d.id, codes))
# Check if values are in Contentconstraint if present
if constraint:
try:
invalid = [
v for v in values_l if (d.id, v) not in constraint]
if invalid:
raise ValueError("'{0}' out of content_constraint for '{1}'.".
format(invalid, d.id))
except NotImplementedError:
pass
part = values
except KeyError:
part = ''
parts.append(part)
return '.'.join(parts)
def preview_data(self, flow_id, key=None, count=True, total=True):
'''
Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame.
'''
all_keys = self.series_keys(flow_id)
# Handle the special case that no key is provided
if not key:
if count:
return all_keys.shape[0]
else:
return all_keys
# So there is a key specifying at least one dimension value.
# Wrap single values in 1-elem list for uniform treatment
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# order dim_names that are present in the key
dim_names = [k for k in all_keys if k in key]
# Drop columns that are not in the key
key_df = all_keys.loc[:, dim_names]
if total:
# DataFrame with matching series keys
bool_series = reduce(
and_, (key_df.isin(key_l)[col] for col in dim_names))
if count:
return bool_series.value_counts()[True]
else:
return all_keys[bool_series]
else:
# Dict of value combinations as dict keys
key_product = product(*(key_l[k] for k in dim_names))
# Replace key tuples by namedtuples
PartialKey = namedtuple_factory('PartialKey', dim_names)
matches = {PartialKey(k): reduce(and_, (key_df.isin({k1: [v1]
for k1, v1 in zip(dim_names, k)})[col]
for col in dim_names))
for k in key_product}
if not count:
# dict mapping each key to DataFrame with selected key-set
return {k: all_keys[v] for k, v in matches.items()}
else:
# Number of series per key
return {k: v.value_counts()[True] for k, v in matches.items()}
|
dr-leo/pandaSDMX | pandasdmx/api.py | Request.preview_data | python | def preview_data(self, flow_id, key=None, count=True, total=True):
'''
Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame.
'''
all_keys = self.series_keys(flow_id)
# Handle the special case that no key is provided
if not key:
if count:
return all_keys.shape[0]
else:
return all_keys
# So there is a key specifying at least one dimension value.
# Wrap single values in 1-elem list for uniform treatment
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# order dim_names that are present in the key
dim_names = [k for k in all_keys if k in key]
# Drop columns that are not in the key
key_df = all_keys.loc[:, dim_names]
if total:
# DataFrame with matching series keys
bool_series = reduce(
and_, (key_df.isin(key_l)[col] for col in dim_names))
if count:
return bool_series.value_counts()[True]
else:
return all_keys[bool_series]
else:
# Dict of value combinations as dict keys
key_product = product(*(key_l[k] for k in dim_names))
# Replace key tuples by namedtuples
PartialKey = namedtuple_factory('PartialKey', dim_names)
matches = {PartialKey(k): reduce(and_, (key_df.isin({k1: [v1]
for k1, v1 in zip(dim_names, k)})[col]
for col in dim_names))
for k in key_product}
if not count:
# dict mapping each key to DataFrame with selected key-set
return {k: all_keys[v] for k, v in matches.items()}
else:
# Number of series per key
return {k: v.value_counts()[True] for k, v in matches.items()} | Get keys or number of series for a prospective dataset query allowing for
keys with multiple values per dimension.
It downloads the complete list of series keys for a dataflow rather than using constraints and DSD. This feature is,
however, not supported by all data providers.
ECB and UNSD are known to work.
Args:
flow_id(str): dataflow id
key(dict): optional key mapping dimension names to values or lists of values.
Must have been validated before. It is not checked if key values
are actually valid dimension names and values. Default: {}
count(bool): if True (default), return the number of series
of the dataset designated by flow_id and key. If False,
the actual keys are returned as a pandas DataFrame or dict of dataframes, depending on
the value of 'total'.
total(bool): if True (default), return the aggregate number
of series or a single dataframe (depending on the value of 'count'). If False,
return a dict mapping keys to dataframes of series keys.
E.g., if key={'COUNTRY':'IT+CA+AU'}, the dict will
have 3 items describing the series keys for each country
respectively. If 'count' is True, dict values will be int rather than
PD.DataFrame. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L495-L564 | [
"def series_keys(self, flow_id, cache=True):\n '''\n Get an empty dataset with all possible series keys.\n\n Return a pandas DataFrame. Each\n column represents a dimension, each row\n a series key of datasets of \n the given dataflow.\n '''\n # Check if requested series keys are already cac... | class Request(object):
"""Get SDMX data and metadata from remote servers or local files.
"""
# Load built-in agency metadata
s = resource_string('pandasdmx', 'agencies.json').decode('utf8')
_agencies = json.loads(s)
del s
@classmethod
def load_agency_profile(cls, source):
'''
Classmethod loading metadata on a data provider. ``source`` must
be a json-formated string or file-like object describing one or more data providers
(URL of the SDMX web API, resource types etc.
The dict ``Request._agencies`` is updated with the metadata from the
source.
Returns None
'''
if not isinstance(source, str_type):
# so it must be a text file
source = source.read()
new_agencies = json.loads(source)
cls._agencies.update(new_agencies)
@classmethod
def list_agencies(cls):
'''
Return a sorted list of valid agency IDs. These can be used to create ``Request`` instances.
'''
return sorted(list(cls._agencies))
_resources = ['dataflow', 'datastructure', 'data', 'categoryscheme',
'codelist', 'conceptscheme']
@classmethod
def _make_get_wrappers(cls):
for r in cls._resources:
setattr(cls, r, ResourceGetter(r))
def __init__(self, agency='', cache=None, log_level=None,
**http_cfg):
'''
Set the SDMX agency, and configure http requests for this instance.
Args:
agency(str): identifier of a data provider.
Must be one of the dict keys in Request._agencies such as
'ESTAT', 'ECB', ''GSR' or ''.
An empty string has the effect that the instance can only
load data or metadata from files or a pre-fabricated URL. .
defaults to '', i.e. no agency.
cache(dict): args to be passed on to
``requests_cache.install_cache()``. Default is None (no caching).
log_level(int): set log level for lib-wide logger as set up in pandasdmx.__init__.py.
For details see the docs on the
logging package from the standard lib. Default: None (= do nothing).
**http_cfg: used to configure http requests. E.g., you can
specify proxies, authentication information and more.
See also the docs of the ``requests`` package at
http://www.python-requests.org/en/latest/.
'''
# If needed, generate wrapper properties for get method
if not hasattr(self, 'data'):
self._make_get_wrappers()
self.client = remote.REST(cache, http_cfg)
self.agency = agency.upper()
if log_level:
logging.getLogger('pandasdmx').setLevel(log_level)
@property
def agency(self):
return self._agency
@agency.setter
def agency(self, value):
if value in self._agencies:
self._agency = value
else:
raise ValueError('If given, agency must be one of {0}'.format(
list(self._agencies)))
self.cache = {} # for SDMX messages and other stuff.
def clear_cache(self):
self.cache.clear()
@property
def timeout(self):
return self.client.config['timeout']
@timeout.setter
def timeout(self, value):
self.client.config['timeout'] = value
def series_keys(self, flow_id, cache=True):
'''
Get an empty dataset with all possible series keys.
Return a pandas DataFrame. Each
column represents a dimension, each row
a series key of datasets of
the given dataflow.
'''
# Check if requested series keys are already cached
cache_id = 'series_keys_' + flow_id
if cache_id in self.cache:
return self.cache[cache_id]
else:
# download an empty dataset with all available series keys
resp = self.data(flow_id, params={'detail': 'serieskeysonly'})
l = list(s.key for s in resp.data.series)
df = PD.DataFrame(l, columns=l[0]._fields, dtype='category')
if cache:
self.cache[cache_id] = df
return df
def get(self, resource_type='', resource_id='', agency='',
version=None, key='',
params={}, headers={},
fromfile=None, tofile=None, url=None, get_footer_url=(30, 3),
memcache=None, writer=None):
'''get SDMX data or metadata and return it as a :class:`pandasdmx.api.Response` instance.
While 'get' can load any SDMX file (also as zip-file) specified by 'fromfile',
it can only construct URLs for the SDMX service set for this instance.
Hence, you have to instantiate a :class:`pandasdmx.api.Request` instance for each data provider you want to access, or
pass a pre-fabricated URL through the ``url`` parameter.
Args:
resource_type(str): the type of resource to be requested. Values must be
one of the items in Request._resources such as 'data', 'dataflow', 'categoryscheme' etc.
It is used for URL construction, not to read the received SDMX file.
Hence, if `fromfile` is given, `resource_type` may be ''.
Defaults to ''.
resource_id(str): the id of the resource to be requested.
It is used for URL construction. Defaults to ''.
agency(str): ID of the agency providing the data or metadata.
Used for URL construction only. It tells the SDMX web service
which agency the requested information originates from. Note that
an SDMX service may provide information from multiple data providers.
may be '' if `fromfile` is given. Not to be confused
with the agency ID passed to :meth:`__init__` which specifies
the SDMX web service to be accessed.
key(str, dict): select columns from a dataset by specifying dimension values.
If type is str, it must conform to the SDMX REST API, i.e. dot-separated dimension values.
If 'key' is of type 'dict', it must map dimension names to allowed dimension values. Two or more
values can be separated by '+' as in the str form. The DSD will be downloaded
and the items are validated against it before downloading the dataset.
params(dict): defines the query part of the URL.
The SDMX web service guidelines (www.sdmx.org) explain the meaning of
permissible parameters. It can be used to restrict the
time range of the data to be delivered (startperiod, endperiod), whether parents, siblings or descendants of the specified
resource should be returned as well (e.g. references='parentsandsiblings'). Sensible defaults
are set automatically
depending on the values of other args such as `resource_type`.
Defaults to {}.
headers(dict): http headers. Given headers will overwrite instance-wide headers passed to the
constructor. Defaults to None, i.e. use defaults
from agency configuration
fromfile(str): path to the file to be loaded instead of
accessing an SDMX web service. Defaults to None. If `fromfile` is
given, args relating to URL construction will be ignored.
tofile(str): file path to write the received SDMX file on the fly. This
is useful if you want to load data offline using
`fromfile` or if you want to open an SDMX file in
an XML editor.
url(str): URL of the resource to download.
If given, any other arguments such as
``resource_type`` or ``resource_id`` are ignored. Default is None.
get_footer_url((int, int)):
tuple of the form (seconds, number_of_attempts). Determines the
behavior in case the received SDMX message has a footer where
one of its lines is a valid URL. ``get_footer_url`` defines how many attempts should be made to
request the resource at that URL after waiting so many seconds before each attempt.
This behavior is useful when requesting large datasets from Eurostat. Other agencies do not seem to
send such footers. Once an attempt to get the resource has been
successful, the original message containing the footer is dismissed and the dataset
is returned. The ``tofile`` argument is propagated. Note that the written file may be
a zip archive. pandaSDMX handles zip archives since version 0.2.1. Defaults to (30, 3).
memcache(str): If given, return Response instance if already in self.cache(dict),
otherwise download resource and cache Response instance.
writer(str): optional custom writer class.
Should inherit from pandasdmx.writer.BaseWriter. Defaults to None,
i.e. one of the included writers is selected as appropriate.
Returns:
pandasdmx.api.Response: instance containing the requested
SDMX Message.
'''
# Try to get resource from memory cache if specified
if memcache in self.cache:
return self.cache[memcache]
if url:
base_url = url
else:
# Construct URL from args unless ``tofile`` is given
# Validate args
agency = agency or self._agencies[self.agency]['id']
# Validate resource if no filename is specified
if not fromfile and resource_type not in self._resources:
raise ValueError(
'resource must be one of {0}'.format(self._resources))
# resource_id: if it is not a str or unicode type,
# but, e.g., an invalid Dataflow Definition,
# extract its ID
if resource_id and not isinstance(resource_id, (str_type, str)):
resource_id = resource_id.id
# If key is a dict, validate items against the DSD
# and construct the key string which becomes part of the URL
# Otherwise, do nothing as key must be a str confirming to the REST
# API spec.
if resource_type == 'data' and isinstance(key, dict):
# select validation method based on agency capabilities
if self._agencies[self.agency].get('supports_series_keys_only'):
key = self._make_key_from_series(resource_id, key)
else:
key = self._make_key_from_dsd(resource_id, key)
# Get http headers from agency config if not given by the caller
if not (fromfile or headers):
# Check for default headers
resource_cfg = self._agencies[self.agency][
'resources'].get(resource_type)
if resource_cfg:
headers = resource_cfg.get('headers') or {}
# Construct URL from the given non-empty substrings.
# if data is requested, omit the agency part. See the query
# examples
if resource_type in ['data', 'categoryscheme']:
agency_id = None
else:
agency_id = agency
if (version is None) and (resource_type != 'data'):
version = 'latest'
# Remove None's and '' first. Then join them to form the base URL.
# Any parameters are appended by remote module.
if self.agency:
parts = [self._agencies[self.agency]['url'],
resource_type,
agency_id,
resource_id, version, key]
base_url = '/'.join(filter(None, parts))
# Set references to sensible defaults
if 'references' not in params:
if resource_type in [
'dataflow', 'datastructure'] and resource_id:
params['references'] = 'all'
elif resource_type == 'categoryscheme':
params['references'] = 'parentsandsiblings'
elif fromfile:
base_url = ''
else:
raise ValueError(
'If `` url`` is not specified, either agency or fromfile must be given.')
# Now get the SDMX message either via http or as local file
logger.info(
'Requesting resource from URL/file %s', (base_url or fromfile))
source, url, resp_headers, status_code = self.client.get(
base_url, params=params, headers=headers, fromfile=fromfile)
if source is None:
raise SDMXException('Server error:', status_code, url)
logger.info(
'Loaded file into memory from URL/file: %s', (url or fromfile))
# write msg to file and unzip it as required, then parse it
with source:
if tofile:
logger.info('Writing to file %s', tofile)
with open(tofile, 'wb') as dest:
source.seek(0)
dest.write(source.read())
source.seek(0)
# handle zip files
if is_zipfile(source):
temp = source
with ZipFile(temp, mode='r') as zf:
info = zf.infolist()[0]
source = zf.open(info)
else:
# undo side effect of is_zipfile
source.seek(0)
# select reader class
if ((fromfile and fromfile.endswith('.json'))
or (self.agency and self._agencies[self.agency]['resources'].get(resource_type)
and self._agencies[self.agency]['resources'][resource_type].get('json'))):
reader_module = import_module('pandasdmx.reader.sdmxjson')
else:
reader_module = import_module('pandasdmx.reader.sdmxml')
reader_cls = reader_module.Reader
msg = reader_cls(self).initialize(source)
# Check for URL in a footer and get the real data if so configured
if get_footer_url and hasattr(msg, 'footer'):
logger.info('Footer found in SDMX message.')
# Retrieve the first URL in the footer, if any
url_l = [
i for i in msg.footer.text if remote.is_url(i)]
if url_l:
# found an URL. Wait and try to request it
footer_url = url_l[0]
seconds, attempts = get_footer_url
logger.info(
'Found URL in footer. Making %i requests, waiting %i seconds in between.', attempts, seconds)
for a in range(attempts):
sleep(seconds)
try:
return self.get(tofile=tofile, url=footer_url, headers=headers)
except Exception as e:
logger.info(
'Attempt #%i raised the following exeption: %s', a, str(e))
# Select default writer
if not writer:
if hasattr(msg, 'data'):
writer = 'pandasdmx.writer.data2pandas'
else:
writer = 'pandasdmx.writer.structure2pd'
r = Response(msg, url, resp_headers, status_code, writer=writer)
# store in memory cache if needed
if memcache and r.status_code == 200:
self.cache[memcache] = r
return r
def _make_key_from_dsd(self, flow_id, key):
'''
Download the dataflow def. and DSD and validate
key(dict) against it.
Return: key(str)
'''
# get the dataflow and the DSD ID
dataflow = self.get('dataflow', flow_id,
memcache='dataflow' + flow_id)
dsd_id = dataflow.msg.dataflow[flow_id].structure.id
dsd_resp = self.get('datastructure', dsd_id,
memcache='datastructure' + dsd_id)
dsd = dsd_resp.msg.datastructure[dsd_id]
# Extract dimensions excluding the dimension at observation (time, time-period)
# as we are only interested in dimensions for columns, not rows.
dimensions = [d for d in dsd.dimensions.aslist() if d.id not in
['TIME', 'TIME_PERIOD']]
dim_names = [d.id for d in dimensions]
# Retrieve any ContentConstraint
try:
constraint_l = [c for c in dataflow.constraint.aslist()
if c.constraint_attachment.id == flow_id]
if constraint_l:
constraint = constraint_l[0]
except:
constraint = None
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key.keys()
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
parts = []
# Iterate over the dimensions. If the key dict
# contains a value for the dimension, append it to the 'parts' list. Otherwise
# append ''. Then join the parts to form the dotted str.
for d in dimensions:
try:
values = key[d.id]
values_l = values.split('+')
codelist = d.local_repr.enum
codes = codelist.keys()
invalid = [v for v in values_l if v not in codes]
if invalid:
# ToDo: attach codelist to exception.
raise ValueError("'{0}' is not in codelist for dimension '{1}: {2}'".
format(invalid, d.id, codes))
# Check if values are in Contentconstraint if present
if constraint:
try:
invalid = [
v for v in values_l if (d.id, v) not in constraint]
if invalid:
raise ValueError("'{0}' out of content_constraint for '{1}'.".
format(invalid, d.id))
except NotImplementedError:
pass
part = values
except KeyError:
part = ''
parts.append(part)
return '.'.join(parts)
def _make_key_from_series(self, flow_id, key):
'''
Get all series keys by calling
self.series_keys, and validate
the key(dict) against it. Raises ValueError if
a value does not occur in the respective
set of dimension values. Multiple values per
dimension can be provided as a list or in 'V1+V2' notation.
Return: key(str)
'''
# get all series keys
all_keys = self.series_keys(flow_id)
dim_names = list(all_keys)
# Validate the key dict
# First, check correctness of dimension names
invalid = [d for d in key
if d not in dim_names]
if invalid:
raise ValueError(
'Invalid dimension name {0}, allowed are: {1}'.format(invalid, dim_names))
# Pre-process key by expanding multiple values as list
key = {k: v.split('+') if '+' in v else v for k, v in key.items()}
# Check for each dimension name if values are correct and construct
# string of the form 'value1.value2.value3+value4' etc.
# First, wrap each single dim value in a list to allow
# uniform treatment of single and multiple dim values.
key_l = {k: [v] if isinstance(v, str_type) else v
for k, v in key.items()}
# Iterate over the dimensions. If the key dict
# contains an allowed value for the dimension,
# it will become part of the string.
invalid = list(chain.from_iterable((((k, v) for v in vl if v not in all_keys[k].values)
for k, vl in key_l.items())))
if invalid:
raise ValueError("The following dimension values are invalid: {0}".
format(invalid))
# Generate the 'Val1+Val2' notation for multiple dim values and remove the
# lists
for k, v in key_l.items():
key_l[k] = '+'.join(v)
# assemble the key string which goes into the URL
parts = [key_l.get(name, '') for name in dim_names]
return '.'.join(parts)
|
dr-leo/pandaSDMX | pandasdmx/api.py | Response.write | python | def write(self, source=None, **kwargs):
'''Wrappe r to call the writer's write method if present.
Args:
source(pandasdmx.model.Message, iterable): stuff to be written.
If a :class:`pandasdmx.model.Message` is given, the writer
itself must determine what to write unless specified in the
keyword arguments. If an iterable is given,
the writer should write each item. Keyword arguments may
specify what to do with the output depending on the writer's API. Defaults to self.msg.
Returns:
type: anything the writer returns.
'''
if not source:
source = self.msg
return self._writer.write(source=source, **kwargs) | Wrappe r to call the writer's write method if present.
Args:
source(pandasdmx.model.Message, iterable): stuff to be written.
If a :class:`pandasdmx.model.Message` is given, the writer
itself must determine what to write unless specified in the
keyword arguments. If an iterable is given,
the writer should write each item. Keyword arguments may
specify what to do with the output depending on the writer's API. Defaults to self.msg.
Returns:
type: anything the writer returns. | train | https://github.com/dr-leo/pandaSDMX/blob/71dd81ebb0d5169e5adcb8b52d516573d193f2d6/pandasdmx/api.py#L618-L635 | null | class Response(object):
'''Container class for SDMX messages.
It is instantiated by .
Attributes:
msg(pandasdmx.model.Message): a pythonic representation
of the SDMX message
status_code(int): the status code from the http response, if any
url(str): the URL, if any, that was sent to the SDMX server
headers(dict): http response headers returned by ''requests''
Methods:
write: wrapper around the writer's write method.
Arguments are propagated to the writer.
'''
def __init__(self, msg, url, headers, status_code, writer=None):
'''
Set the main attributes and instantiate the writer if given.
Args:
msg(pandasdmx.model.Message): the SDMX message
url(str): the URL, if any, that had been sent to the SDMX server
headers(dict): http headers
status_code(int): the status code returned by the server
writer(str): the module path for the writer class
'''
self.msg = msg
self.url = url
self.http_headers = headers
self.status_code = status_code
self._init_writer(writer)
def __getattr__(self, name):
'''
Make Message attributes directly readable from Response instance
'''
return getattr(self.msg, name)
def _init_writer(self, writer):
# Initialize the writer if given
if writer:
writer_module = import_module(writer)
writer_cls = writer_module.Writer
self._writer = writer_cls(self.msg)
else:
self._writer = None
def write_source(self, filename):
'''
write xml file by calling the 'write' method of lxml root element.
Useful to save the xml source file for offline use.
Similar to passing `tofile` arg to :meth:`Request.get`
Args:
filename(str): name/path of target file
Returns:
whatever the LXML deserializer returns.
'''
return self.msg._reader.write_source(filename)
|
aaren/notedown | notedown/contentsmanager.py | NotedownContentsManager._read_notebook | python | def _read_notebook(self, os_path, as_version=4):
with self.open(os_path, 'r', encoding='utf-8') as f:
try:
if ftdetect(os_path) == 'notebook':
return nbformat.read(f, as_version=as_version)
elif ftdetect(os_path) == 'markdown':
nbjson = convert(os_path,
informat='markdown',
outformat='notebook')
return nbformat.reads(nbjson, as_version=as_version)
except Exception as e:
raise web.HTTPError(
400,
u"Unreadable Notebook: %s %r" % (os_path, e),
) | Read a notebook from an os path. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/contentsmanager.py#L35-L50 | null | class NotedownContentsManager(FileContentsManager):
"""Subclass the IPython file manager to use markdown
as the storage format for notebooks.
Intercepts the notebook before read and write to determine the
storage format from the file extension (_read_notebook and
_save_notebook).
We have to override the get method to treat .md as a notebook
file extension. This is the only change to that method.
To use, add the following line to ipython_notebook_config.py:
c.NotebookApp.contents_manager_class = 'notedown.NotedownContentsManager'
Now markdown notebooks can be opened and edited in the browser!
"""
strip_outputs = False
def _save_notebook(self, os_path, nb):
"""Save a notebook to an os_path."""
with self.atomic_writing(os_path, encoding='utf-8') as f:
if ftdetect(os_path) == 'notebook':
nbformat.write(nb, f, version=nbformat.NO_CONVERT)
elif ftdetect(os_path) == 'markdown':
nbjson = nbformat.writes(nb, version=nbformat.NO_CONVERT)
markdown = convert(nbjson,
informat='notebook',
outformat='markdown',
strip_outputs=self.strip_outputs)
f.write(markdown)
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
extension = ('.ipynb', '.md')
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path,
type),
reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith(extension)):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path,
reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
|
aaren/notedown | notedown/contentsmanager.py | NotedownContentsManager._save_notebook | python | def _save_notebook(self, os_path, nb):
with self.atomic_writing(os_path, encoding='utf-8') as f:
if ftdetect(os_path) == 'notebook':
nbformat.write(nb, f, version=nbformat.NO_CONVERT)
elif ftdetect(os_path) == 'markdown':
nbjson = nbformat.writes(nb, version=nbformat.NO_CONVERT)
markdown = convert(nbjson,
informat='notebook',
outformat='markdown',
strip_outputs=self.strip_outputs)
f.write(markdown) | Save a notebook to an os_path. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/contentsmanager.py#L52-L63 | null | class NotedownContentsManager(FileContentsManager):
"""Subclass the IPython file manager to use markdown
as the storage format for notebooks.
Intercepts the notebook before read and write to determine the
storage format from the file extension (_read_notebook and
_save_notebook).
We have to override the get method to treat .md as a notebook
file extension. This is the only change to that method.
To use, add the following line to ipython_notebook_config.py:
c.NotebookApp.contents_manager_class = 'notedown.NotedownContentsManager'
Now markdown notebooks can be opened and edited in the browser!
"""
strip_outputs = False
def _read_notebook(self, os_path, as_version=4):
"""Read a notebook from an os path."""
with self.open(os_path, 'r', encoding='utf-8') as f:
try:
if ftdetect(os_path) == 'notebook':
return nbformat.read(f, as_version=as_version)
elif ftdetect(os_path) == 'markdown':
nbjson = convert(os_path,
informat='markdown',
outformat='notebook')
return nbformat.reads(nbjson, as_version=as_version)
except Exception as e:
raise web.HTTPError(
400,
u"Unreadable Notebook: %s %r" % (os_path, e),
)
def get(self, path, content=True, type=None, format=None):
""" Takes a path for an entity and returns its model
Parameters
----------
path : str
the API path that describes the relative path for the target
content : bool
Whether to include the contents in the reply
type : str, optional
The requested type - 'file', 'notebook', or 'directory'.
Will raise HTTPError 400 if the content doesn't match.
format : str, optional
The requested format for file contents. 'text' or 'base64'.
Ignored if this returns a notebook or directory model.
Returns
-------
model : dict
the contents model. If content=True, returns the contents
of the file or directory as well.
"""
path = path.strip('/')
if not self.exists(path):
raise web.HTTPError(404, u'No such file or directory: %s' % path)
os_path = self._get_os_path(path)
extension = ('.ipynb', '.md')
if os.path.isdir(os_path):
if type not in (None, 'directory'):
raise web.HTTPError(400,
u'%s is a directory, not a %s' % (path,
type),
reason='bad type')
model = self._dir_model(path, content=content)
elif type == 'notebook' or (type is None and path.endswith(extension)):
model = self._notebook_model(path, content=content)
else:
if type == 'directory':
raise web.HTTPError(400,
u'%s is not a directory' % path,
reason='bad type')
model = self._file_model(path, content=content, format=format)
return model
|
aaren/notedown | notedown/main.py | ftdetect | python | def ftdetect(filename):
_, extension = os.path.splitext(filename)
md_exts = ['.md', '.markdown', '.mkd', '.mdown', '.mkdn', '.Rmd']
nb_exts = ['.ipynb']
if extension in md_exts:
return 'markdown'
elif extension in nb_exts:
return 'notebook'
else:
return None | Determine if filename is markdown or notebook,
based on the file extension. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/main.py#L107-L119 | null | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import pkg_resources
import io
import logging
import nbformat as nbformat
from nbconvert.utils.io import unicode_std_stream
from .notedown import (MarkdownReader,
MarkdownWriter,
Knitr,
run,
strip)
try:
__version__ = pkg_resources.require('notedown')[0].version
except pkg_resources.DistributionNotFound:
__version__ = 'testing'
markdown_template \
= pkg_resources.resource_filename('notedown',
'templates/markdown.tpl')
markdown_figure_template \
= pkg_resources.resource_filename('notedown',
'templates/markdown_outputs.tpl')
examples = """
Example usage of notedown
-------------------------
Convert markdown into notebook:
notedown input.md > output.ipynb
notedown input.md --output output.ipynb
Convert a notebook into markdown, with outputs intact:
notedown input.ipynb --from notebook --to markdown > output_with_outputs.md
Convert a notebook into markdown, stripping all outputs:
notedown input.ipynb --from notebook --to markdown --strip > output.md
Strip the output cells from markdown:
notedown with_output_cells.md --to markdown --strip > no_output_cells.md
Convert from markdown and execute:
notedown input.md --run > executed_notebook.ipynb
Convert r-markdown into markdown:
notedown input.Rmd --to markdown --knit > output.md
Convert r-markdown into an IPython notebook:
notedown input.Rmd --knit > output.ipynb
Convert r-markdown into a notebook with the outputs computed, using
the rmagic extension to execute the code blocks:
notedown input.Rmd --knit --rmagic --run > executed_output.ipynb
"""
def convert(content, informat, outformat, strip_outputs=False):
if os.path.exists(content):
with io.open(content, 'r', encoding='utf-8') as f:
contents = f.read()
else:
contents = content
readers = {'notebook': nbformat,
'markdown': MarkdownReader(precode='',
magic=False,
match='fenced')
}
writers = {'notebook': nbformat,
'markdown': MarkdownWriter(markdown_template,
strip_outputs=strip_outputs)
}
reader = readers[informat]
writer = writers[outformat]
notebook = reader.reads(contents, as_version=4)
return writer.writes(notebook)
def command_line_parser():
"""Create parser for command line usage."""
description = "Create an IPython notebook from markdown."
example_use = "Example: notedown some_markdown.md > new_notebook.ipynb"
parser = argparse.ArgumentParser(description=description,
epilog=example_use)
parser.add_argument('input_file',
help="markdown input file (default STDIN)",
nargs="?",
default='-')
parser.add_argument('-o', '--output',
help=("output file, (default STDOUT). "
"If flag used but no file given, use "
"the name of the input file to "
"determine the output filename. "
"This will OVERWRITE if input and output "
"formats are the same."),
nargs="?",
default='-',
const='')
parser.add_argument('--from',
dest='informat',
choices=('notebook', 'markdown'),
help=("format to convert from, defaults to markdown "
"or file extension"))
parser.add_argument('--to',
dest='outformat',
choices=('notebook', 'markdown'),
help=("format to convert to, defaults to notebook "
"or file extension. Setting --render forces "
"this to 'markdown'"))
parser.add_argument('--run', '--execute',
action='store_true',
help=("run the notebook, executing the "
"contents of each cell"))
parser.add_argument('--timeout',
default=30,
type=int,
help=("set the cell execution timeout (in seconds)"))
parser.add_argument('--strip',
action='store_true',
dest='strip_outputs',
help=("strip output cells"))
parser.add_argument('--precode',
nargs='+',
default=[],
help=("additional code to place at the start of the "
"notebook, e.g. --pre '%%matplotlib inline' "
"'import numpy as np'"))
parser.add_argument('--knit',
nargs='?',
help=("pre-process the markdown with knitr. "
"Default chunk options are 'eval=FALSE' "
"but you can change this by passing a string. "
"Requires R in your path and knitr installed."),
const='eval=FALSE')
parser.add_argument('--rmagic',
action='store_true',
help=("autoload the rmagic extension. Synonym for "
"--precode '%%load_ext rpy2.ipython'"))
parser.add_argument('--nomagic',
action='store_false',
dest='magic',
help=("disable code magic."))
parser.add_argument('--render',
help=('render outputs, forcing markdown output'),
action='store_true')
parser.add_argument('--template',
help=('template file'))
parser.add_argument('--match',
default='all',
help=("determine kind of code blocks that get "
"converted into code cells. "
"choose from 'all' (default), 'fenced', "
"'strict' or a specific language to match on"))
parser.add_argument('--examples',
help=('show example usage'),
action='store_true')
parser.add_argument('--version',
help=('print version number'),
action='store_true')
parser.add_argument('--debug',
help=('show logging output'),
action='store_true')
return parser
def main(args, help=''):
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.version:
print(__version__)
sys.exit()
if args.examples:
print(examples)
sys.exit()
# if no stdin and no input file
if args.input_file == '-' and sys.stdin.isatty():
sys.stdout.write(help)
sys.exit()
elif args.input_file == '-':
input_file = sys.stdin
elif args.input_file != '-':
input_file = io.open(args.input_file, 'r', encoding='utf-8')
else:
sys.exit('malformed input')
# pre-process markdown by using knitr on it
if args.knit:
knitr = Knitr()
input_file = knitr.knit(input_file, opts_chunk=args.knit)
if args.rmagic:
args.precode.append(r"%load_ext rpy2.ipython")
if args.render:
template_file = markdown_figure_template
else:
template_file = markdown_template
template_file = args.template or template_file
# reader and writer classes with args and kwargs to
# instantiate with
readers = {'notebook': nbformat,
'markdown': MarkdownReader(precode='\n'.join(args.precode),
magic=args.magic,
match=args.match,
caption_comments=args.render)
}
writers = {'notebook': nbformat,
'markdown': MarkdownWriter(template_file,
strip_outputs=args.strip_outputs)
}
informat = args.informat or ftdetect(input_file.name) or 'markdown'
outformat = args.outformat or ftdetect(args.output) or 'notebook'
if args.render:
outformat = 'markdown'
reader = readers[informat]
writer = writers[outformat]
with input_file as ip:
notebook = reader.read(ip, as_version=4)
if args.run:
run(notebook, timeout=args.timeout)
if args.strip_outputs:
strip(notebook)
output_ext = {'markdown': '.md',
'notebook': '.ipynb'}
if not args.output and args.input_file != '-':
# overwrite
fout = os.path.splitext(args.input_file)[0] + output_ext[outformat]
# grab the output here so we don't obliterate the file if
# there is an error
output = writer.writes(notebook)
with io.open(fout, 'w', encoding='utf-8') as op:
op.write(output)
elif not args.output and args.input_file == '-':
# overwrite error (input is stdin)
sys.exit('Cannot overwrite with no input file given.')
elif args.output == '-':
# write stdout
writer.write(notebook, unicode_std_stream('stdout'))
elif args.output != '-':
# write to filename
with io.open(args.output, 'w', encoding='utf-8') as op:
writer.write(notebook, op)
def app():
parser = command_line_parser()
args = parser.parse_args()
main(args, help=parser.format_help())
if __name__ == '__main__':
app()
|
aaren/notedown | notedown/main.py | command_line_parser | python | def command_line_parser():
description = "Create an IPython notebook from markdown."
example_use = "Example: notedown some_markdown.md > new_notebook.ipynb"
parser = argparse.ArgumentParser(description=description,
epilog=example_use)
parser.add_argument('input_file',
help="markdown input file (default STDIN)",
nargs="?",
default='-')
parser.add_argument('-o', '--output',
help=("output file, (default STDOUT). "
"If flag used but no file given, use "
"the name of the input file to "
"determine the output filename. "
"This will OVERWRITE if input and output "
"formats are the same."),
nargs="?",
default='-',
const='')
parser.add_argument('--from',
dest='informat',
choices=('notebook', 'markdown'),
help=("format to convert from, defaults to markdown "
"or file extension"))
parser.add_argument('--to',
dest='outformat',
choices=('notebook', 'markdown'),
help=("format to convert to, defaults to notebook "
"or file extension. Setting --render forces "
"this to 'markdown'"))
parser.add_argument('--run', '--execute',
action='store_true',
help=("run the notebook, executing the "
"contents of each cell"))
parser.add_argument('--timeout',
default=30,
type=int,
help=("set the cell execution timeout (in seconds)"))
parser.add_argument('--strip',
action='store_true',
dest='strip_outputs',
help=("strip output cells"))
parser.add_argument('--precode',
nargs='+',
default=[],
help=("additional code to place at the start of the "
"notebook, e.g. --pre '%%matplotlib inline' "
"'import numpy as np'"))
parser.add_argument('--knit',
nargs='?',
help=("pre-process the markdown with knitr. "
"Default chunk options are 'eval=FALSE' "
"but you can change this by passing a string. "
"Requires R in your path and knitr installed."),
const='eval=FALSE')
parser.add_argument('--rmagic',
action='store_true',
help=("autoload the rmagic extension. Synonym for "
"--precode '%%load_ext rpy2.ipython'"))
parser.add_argument('--nomagic',
action='store_false',
dest='magic',
help=("disable code magic."))
parser.add_argument('--render',
help=('render outputs, forcing markdown output'),
action='store_true')
parser.add_argument('--template',
help=('template file'))
parser.add_argument('--match',
default='all',
help=("determine kind of code blocks that get "
"converted into code cells. "
"choose from 'all' (default), 'fenced', "
"'strict' or a specific language to match on"))
parser.add_argument('--examples',
help=('show example usage'),
action='store_true')
parser.add_argument('--version',
help=('print version number'),
action='store_true')
parser.add_argument('--debug',
help=('show logging output'),
action='store_true')
return parser | Create parser for command line usage. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/main.py#L122-L207 | null | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import argparse
import pkg_resources
import io
import logging
import nbformat as nbformat
from nbconvert.utils.io import unicode_std_stream
from .notedown import (MarkdownReader,
MarkdownWriter,
Knitr,
run,
strip)
try:
__version__ = pkg_resources.require('notedown')[0].version
except pkg_resources.DistributionNotFound:
__version__ = 'testing'
markdown_template \
= pkg_resources.resource_filename('notedown',
'templates/markdown.tpl')
markdown_figure_template \
= pkg_resources.resource_filename('notedown',
'templates/markdown_outputs.tpl')
examples = """
Example usage of notedown
-------------------------
Convert markdown into notebook:
notedown input.md > output.ipynb
notedown input.md --output output.ipynb
Convert a notebook into markdown, with outputs intact:
notedown input.ipynb --from notebook --to markdown > output_with_outputs.md
Convert a notebook into markdown, stripping all outputs:
notedown input.ipynb --from notebook --to markdown --strip > output.md
Strip the output cells from markdown:
notedown with_output_cells.md --to markdown --strip > no_output_cells.md
Convert from markdown and execute:
notedown input.md --run > executed_notebook.ipynb
Convert r-markdown into markdown:
notedown input.Rmd --to markdown --knit > output.md
Convert r-markdown into an IPython notebook:
notedown input.Rmd --knit > output.ipynb
Convert r-markdown into a notebook with the outputs computed, using
the rmagic extension to execute the code blocks:
notedown input.Rmd --knit --rmagic --run > executed_output.ipynb
"""
def convert(content, informat, outformat, strip_outputs=False):
if os.path.exists(content):
with io.open(content, 'r', encoding='utf-8') as f:
contents = f.read()
else:
contents = content
readers = {'notebook': nbformat,
'markdown': MarkdownReader(precode='',
magic=False,
match='fenced')
}
writers = {'notebook': nbformat,
'markdown': MarkdownWriter(markdown_template,
strip_outputs=strip_outputs)
}
reader = readers[informat]
writer = writers[outformat]
notebook = reader.reads(contents, as_version=4)
return writer.writes(notebook)
def ftdetect(filename):
"""Determine if filename is markdown or notebook,
based on the file extension.
"""
_, extension = os.path.splitext(filename)
md_exts = ['.md', '.markdown', '.mkd', '.mdown', '.mkdn', '.Rmd']
nb_exts = ['.ipynb']
if extension in md_exts:
return 'markdown'
elif extension in nb_exts:
return 'notebook'
else:
return None
def main(args, help=''):
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.version:
print(__version__)
sys.exit()
if args.examples:
print(examples)
sys.exit()
# if no stdin and no input file
if args.input_file == '-' and sys.stdin.isatty():
sys.stdout.write(help)
sys.exit()
elif args.input_file == '-':
input_file = sys.stdin
elif args.input_file != '-':
input_file = io.open(args.input_file, 'r', encoding='utf-8')
else:
sys.exit('malformed input')
# pre-process markdown by using knitr on it
if args.knit:
knitr = Knitr()
input_file = knitr.knit(input_file, opts_chunk=args.knit)
if args.rmagic:
args.precode.append(r"%load_ext rpy2.ipython")
if args.render:
template_file = markdown_figure_template
else:
template_file = markdown_template
template_file = args.template or template_file
# reader and writer classes with args and kwargs to
# instantiate with
readers = {'notebook': nbformat,
'markdown': MarkdownReader(precode='\n'.join(args.precode),
magic=args.magic,
match=args.match,
caption_comments=args.render)
}
writers = {'notebook': nbformat,
'markdown': MarkdownWriter(template_file,
strip_outputs=args.strip_outputs)
}
informat = args.informat or ftdetect(input_file.name) or 'markdown'
outformat = args.outformat or ftdetect(args.output) or 'notebook'
if args.render:
outformat = 'markdown'
reader = readers[informat]
writer = writers[outformat]
with input_file as ip:
notebook = reader.read(ip, as_version=4)
if args.run:
run(notebook, timeout=args.timeout)
if args.strip_outputs:
strip(notebook)
output_ext = {'markdown': '.md',
'notebook': '.ipynb'}
if not args.output and args.input_file != '-':
# overwrite
fout = os.path.splitext(args.input_file)[0] + output_ext[outformat]
# grab the output here so we don't obliterate the file if
# there is an error
output = writer.writes(notebook)
with io.open(fout, 'w', encoding='utf-8') as op:
op.write(output)
elif not args.output and args.input_file == '-':
# overwrite error (input is stdin)
sys.exit('Cannot overwrite with no input file given.')
elif args.output == '-':
# write stdout
writer.write(notebook, unicode_std_stream('stdout'))
elif args.output != '-':
# write to filename
with io.open(args.output, 'w', encoding='utf-8') as op:
writer.write(notebook, op)
def app():
parser = command_line_parser()
args = parser.parse_args()
main(args, help=parser.format_help())
if __name__ == '__main__':
app()
|
aaren/notedown | notedown/notedown.py | strip | python | def strip(notebook):
for cell in notebook.cells:
if cell.cell_type == 'code':
cell.outputs = []
cell.execution_count = None | Remove outputs from a notebook. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L38-L43 | null | from __future__ import absolute_import
import json
import logging
import os
import re
import subprocess
import tempfile
from six import PY3
from six.moves import map
from six.moves import range
from six.moves import zip
import nbformat.v4.nbbase as nbbase
import nbformat.v4 as v4
from nbformat.v4.rwbase import NotebookReader
from nbformat.v4.rwbase import NotebookWriter
from nbformat.v4.nbjson import BytesEncoder
from nbconvert.preprocessors.execute import ExecutePreprocessor
from nbconvert import TemplateExporter
from pandocattributes import PandocAttributes
languages = ['python', 'r', 'ruby', 'bash']
def cast_unicode(s, encoding='utf-8'):
"""Python 2/3 compatibility function derived from IPython py3compat."""
if isinstance(s, bytes) and not PY3:
return s.decode(encoding, "replace")
return s
def run(notebook, timeout=30):
executor = ExecutePreprocessor(timeout=timeout)
notebook, resources = executor.preprocess(notebook, resources={})
# you can think of notedown as a document converter that uses the
# ipython notebook as its internal format
class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
class CodeMagician(object):
# aliases to different languages
many_aliases = {('r', 'R'): '%%R\n'}
# convert to many to one lookup (found as self.aliases)
aliases = {}
for k, v in list(many_aliases.items()):
for key in k:
aliases[key] = v
@classmethod
def magic(self, alias):
"""Returns the appropriate IPython code magic when
called with an alias for a language.
"""
if alias in self.aliases:
return self.aliases[alias]
else:
return "%%{}\n".format(alias)
class Knitr(object):
class KnitrError(Exception):
pass
def __init__(self):
# raise exception if R or knitr not installed
cmd = ['Rscript', '-e', 'require(knitr)']
try:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
message = "Rscript was not found on your path."
raise self.KnitrError(message)
stdout, stderr = p.communicate()
# cast to unicode for Python 3 compatibility
stderr = stderr.decode('utf8')
if 'Warning' in stderr:
message = ("Could not load knitr (needs manual installation).\n\n"
"$ {cmd}\n"
"{error}").format(cmd=' '.join(cmd), error=stderr)
raise self.KnitrError(message)
def knit(self, input_file, opts_chunk='eval=FALSE'):
"""Use Knitr to convert the r-markdown input_file
into markdown, returning a file object.
"""
# use temporary files at both ends to allow stdin / stdout
tmp_in = tempfile.NamedTemporaryFile(mode='w+')
tmp_out = tempfile.NamedTemporaryFile(mode='w+')
tmp_in.file.write(input_file.read())
tmp_in.file.flush()
tmp_in.file.seek(0)
self._knit(tmp_in.name, tmp_out.name, opts_chunk)
tmp_out.file.flush()
return tmp_out
@staticmethod
def _knit(fin, fout,
opts_knit='progress=FALSE, verbose=FALSE',
opts_chunk='eval=FALSE'):
"""Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript.
"""
script = ('sink("/dev/null");'
'library(knitr);'
'opts_knit$set({opts_knit});'
'opts_chunk$set({opts_chunk});'
'knit("{input}", output="{output}")')
rcmd = ('Rscript', '-e',
script.format(input=fin, output=fout,
opts_knit=opts_knit, opts_chunk=opts_chunk)
)
p = subprocess.Popen(rcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
def get_caption_comments(content):
"""Retrieve an id and a caption from a code cell.
If the code cell content begins with a commented
block that looks like
## fig:id
# multi-line or single-line
# caption
then the 'fig:id' and the caption will be returned.
The '#' are stripped.
"""
if not content.startswith('## fig:'):
return None, None
content = content.splitlines()
id = content[0].strip('## ')
caption = []
for line in content[1:]:
if not line.startswith('# ') or line.startswith('##'):
break
else:
caption.append(line.lstrip('# ').rstrip())
# add " around the caption. TODO: consider doing this upstream
# in pandoc-attributes
caption = '"' + ' '.join(caption) + '"'
return id, caption
|
aaren/notedown | notedown/notedown.py | get_caption_comments | python | def get_caption_comments(content):
if not content.startswith('## fig:'):
return None, None
content = content.splitlines()
id = content[0].strip('## ')
caption = []
for line in content[1:]:
if not line.startswith('# ') or line.startswith('##'):
break
else:
caption.append(line.lstrip('# ').rstrip())
# add " around the caption. TODO: consider doing this upstream
# in pandoc-attributes
caption = '"' + ' '.join(caption) + '"'
return id, caption | Retrieve an id and a caption from a code cell.
If the code cell content begins with a commented
block that looks like
## fig:id
# multi-line or single-line
# caption
then the 'fig:id' and the caption will be returned.
The '#' are stripped. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L648-L679 | null | from __future__ import absolute_import
import json
import logging
import os
import re
import subprocess
import tempfile
from six import PY3
from six.moves import map
from six.moves import range
from six.moves import zip
import nbformat.v4.nbbase as nbbase
import nbformat.v4 as v4
from nbformat.v4.rwbase import NotebookReader
from nbformat.v4.rwbase import NotebookWriter
from nbformat.v4.nbjson import BytesEncoder
from nbconvert.preprocessors.execute import ExecutePreprocessor
from nbconvert import TemplateExporter
from pandocattributes import PandocAttributes
languages = ['python', 'r', 'ruby', 'bash']
def cast_unicode(s, encoding='utf-8'):
"""Python 2/3 compatibility function derived from IPython py3compat."""
if isinstance(s, bytes) and not PY3:
return s.decode(encoding, "replace")
return s
def strip(notebook):
"""Remove outputs from a notebook."""
for cell in notebook.cells:
if cell.cell_type == 'code':
cell.outputs = []
cell.execution_count = None
def run(notebook, timeout=30):
executor = ExecutePreprocessor(timeout=timeout)
notebook, resources = executor.preprocess(notebook, resources={})
# you can think of notedown as a document converter that uses the
# ipython notebook as its internal format
class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
class CodeMagician(object):
# aliases to different languages
many_aliases = {('r', 'R'): '%%R\n'}
# convert to many to one lookup (found as self.aliases)
aliases = {}
for k, v in list(many_aliases.items()):
for key in k:
aliases[key] = v
@classmethod
def magic(self, alias):
"""Returns the appropriate IPython code magic when
called with an alias for a language.
"""
if alias in self.aliases:
return self.aliases[alias]
else:
return "%%{}\n".format(alias)
class Knitr(object):
class KnitrError(Exception):
pass
def __init__(self):
# raise exception if R or knitr not installed
cmd = ['Rscript', '-e', 'require(knitr)']
try:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
message = "Rscript was not found on your path."
raise self.KnitrError(message)
stdout, stderr = p.communicate()
# cast to unicode for Python 3 compatibility
stderr = stderr.decode('utf8')
if 'Warning' in stderr:
message = ("Could not load knitr (needs manual installation).\n\n"
"$ {cmd}\n"
"{error}").format(cmd=' '.join(cmd), error=stderr)
raise self.KnitrError(message)
def knit(self, input_file, opts_chunk='eval=FALSE'):
"""Use Knitr to convert the r-markdown input_file
into markdown, returning a file object.
"""
# use temporary files at both ends to allow stdin / stdout
tmp_in = tempfile.NamedTemporaryFile(mode='w+')
tmp_out = tempfile.NamedTemporaryFile(mode='w+')
tmp_in.file.write(input_file.read())
tmp_in.file.flush()
tmp_in.file.seek(0)
self._knit(tmp_in.name, tmp_out.name, opts_chunk)
tmp_out.file.flush()
return tmp_out
@staticmethod
def _knit(fin, fout,
opts_knit='progress=FALSE, verbose=FALSE',
opts_chunk='eval=FALSE'):
"""Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript.
"""
script = ('sink("/dev/null");'
'library(knitr);'
'opts_knit$set({opts_knit});'
'opts_chunk$set({opts_chunk});'
'knit("{input}", output="{output}")')
rcmd = ('Rscript', '-e',
script.format(input=fin, output=fout,
opts_knit=opts_knit, opts_chunk=opts_chunk)
)
p = subprocess.Popen(rcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
|
aaren/notedown | notedown/notedown.py | MarkdownReader.new_code_block | python | def new_code_block(self, **kwargs):
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto | Create a new code block. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L147-L154 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.new_text_block | python | def new_text_block(self, **kwargs):
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto | Create a new text block. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L156-L160 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.pre_process_code_block | python | def pre_process_code_block(block):
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE) | Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L169-L178 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.process_code_block | python | def process_code_block(self, block):
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block) | Parse block attributes | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L189-L248 | [
"def get_caption_comments(content):\n \"\"\"Retrieve an id and a caption from a code cell.\n\n If the code cell content begins with a commented\n block that looks like\n\n ## fig:id\n # multi-line or single-line\n # caption\n\n then the 'fig:id' and the caption will be returned.\n The '#' ar... | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.parse_blocks | python | def parse_blocks(self, text):
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks | Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated! | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L250-L302 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.create_code_cell | python | def create_code_cell(block):
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell | Create a notebook code cell from a block. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L305-L319 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.create_markdown_cell | python | def create_markdown_cell(block):
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell | Create a markdown cell from a block. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L322-L327 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.create_cells | python | def create_cells(self, blocks):
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells | Turn the list of blocks into a list of notebook cells. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L337-L358 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def to_notebook(self, s, **kwargs):
"""Convert the markdown string s to an IPython notebook.
Returns a notebook.
"""
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownReader.to_notebook | python | def to_notebook(self, s, **kwargs):
all_blocks = self.parse_blocks(s)
if self.pre_code_block['content']:
# TODO: if first block is markdown, place after?
all_blocks.insert(0, self.pre_code_block)
blocks = [self.process_code_block(block) for block in all_blocks]
cells = self.create_cells(blocks)
nb = nbbase.new_notebook(cells=cells)
return nb | Convert the markdown string s to an IPython notebook.
Returns a notebook. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L360-L376 | null | class MarkdownReader(NotebookReader):
"""Import markdown to IPython Notebook.
The markdown is split into blocks: code and not-code. These
blocks are used as the source for cells in the notebook. Code
blocks become code cells; not-code blocks become markdown cells.
Only supports two kinds of notebook cell: code and markdown.
"""
# type identifiers
code = u'code'
markdown = u'markdown'
python = u'python'
# regular expressions to match a code block, splitting into groups
# N.B you can't share group names between these patterns.
# this is necessary for format agnostic code block detection.
# These two pattern strings are ORed to create a master pattern
# and the python re module doesn't allow sharing group names
# in a single regular expression.
re_flags = re.MULTILINE | re.VERBOSE
# fenced code
fenced_regex = r"""
^(?P<raw>
(?P<fence>`{3,}|~{3,}) # a line starting with a fence of 3 or more ` or ~
[ \t]* # followed by any amount of whitespace,
(?P<attributes>.*) # the group 'attributes',
\n # a newline,
(?P<content> # the 'content' group,
[\s\S]*?) # that includes anything
\n(?P=fence)$\n) # up until the same fence that we started with
"""
# indented code
indented_regex = r"""
^\s*$\n # a blank line followed by
(?P<icontent> # start group 'icontent'
(?P<indent>^([ ]{4,}|\t)) # an indent of at least four spaces or one tab
[\s\S]*?) # any code
\n(\Z| # followed by the end of the string or
^[ \t]*\n) # a blank line that is
(?!((?P=indent)[ \t]*\S+) # not followed by a line beginning with the
# indent
|\n[ \t]*) # or another blank line
"""
def __init__(self, code_regex=None, precode='', magic=True,
match='all', caption_comments=False):
"""
code_regex - Either 'fenced' or 'indented' or
a regular expression that matches code blocks in
markdown text. Will be passed to re.compile with
re.VERBOSE and re.MULTILINE flags.
Default is to look for both indented and fenced
code blocks.
precode - string, lines of code to put at the start of the
document, e.g.
'%matplotlib inline\nimport numpy as np'
magic - whether to use code cell language magic, e.g.
put '%bash' at start of cells that have language
'bash'
match - one of 'all', 'fenced' or 'strict' or a specific
language name
caption_comments - whether to derive a caption and id from the
cell contents
"""
if not code_regex:
self.code_regex = r"({}|{})".format(self.fenced_regex,
self.indented_regex)
elif code_regex == 'fenced':
self.code_regex = self.fenced_regex
elif code_regex == 'indented':
self.code_regex = self.indented_regex
elif code_regex == 'old fenced':
self.code_regex = self.old_fenced_regex
else:
self.code_regex = code_regex
self.code_pattern = re.compile(self.code_regex, self.re_flags)
self.precode = precode
self.magic = magic
self.match = match
self.caption_comments = caption_comments
def new_code_block(self, **kwargs):
"""Create a new code block."""
proto = {'content': '',
'type': self.code,
'IO': '',
'attributes': ''}
proto.update(**kwargs)
return proto
def new_text_block(self, **kwargs):
"""Create a new text block."""
proto = {'content': '', 'type': self.markdown}
proto.update(**kwargs)
return proto
@property
def pre_code_block(self):
"""Code block to place at the start of the document."""
return self.new_code_block(content=self.precode.strip('\n'),
IO='input')
@staticmethod
def pre_process_code_block(block):
"""Preprocess the content of a code block, modifying the code
block in place.
Just dedents indented code.
"""
if 'indent' in block and block['indent']:
indent = r'^' + block['indent']
block['content'] = re.sub(indent, '', block['icontent'],
flags=re.MULTILINE)
@staticmethod
def pre_process_text_block(block):
"""Apply pre processing to text blocks.
Currently just strips whitespace from the beginning
and end of the block.
"""
block['content'] = block['content'].strip()
def process_code_block(self, block):
"""Parse block attributes"""
if block['type'] != self.code:
return block
attr = PandocAttributes(block['attributes'], 'markdown')
if self.match == 'all':
pass
elif self.match == 'fenced' and block.get('indent'):
return self.new_text_block(content=('\n' +
block['icontent'] +
'\n'))
elif self.match == 'strict' and 'input' not in attr.classes:
return self.new_text_block(content=block['raw'])
elif self.match not in list(attr.classes) + ['fenced', 'strict']:
return self.new_text_block(content=block['raw'])
# set input / output status of cell
if 'output' in attr.classes and 'json' in attr.classes:
block['IO'] = 'output'
elif 'input' in attr.classes:
block['IO'] = 'input'
attr.classes.remove('input')
else:
block['IO'] = 'input'
if self.caption_comments:
# override attributes id and caption with those set in
# comments, if they exist
id, caption = get_caption_comments(block['content'])
if id:
attr.id = id
if caption:
attr['caption'] = caption
try:
# determine the language as the first class that
# is in the block attributes and also in the list
# of languages
language = set(attr.classes).intersection(languages).pop()
attr.classes.remove(language)
except KeyError:
language = None
block['language'] = language
block['attributes'] = attr
# ensure one identifier for python code
if language in ('python', 'py', '', None):
block['language'] = self.python
# add alternate language execution magic
elif language != self.python and self.magic:
block['content'] = CodeMagician.magic(language) + block['content']
block['language'] = language
return self.new_code_block(**block)
def parse_blocks(self, text):
"""Extract the code and non-code blocks from given markdown text.
Returns a list of block dictionaries.
Each dictionary has at least the keys 'type' and 'content',
containing the type of the block ('markdown', 'code') and
the contents of the block.
Additional keys may be parsed as well.
We should switch to an external markdown library if this
gets much more complicated!
"""
code_matches = [m for m in self.code_pattern.finditer(text)]
# determine where the limits of the non code bits are
# based on the code block edges
text_starts = [0] + [m.end() for m in code_matches]
text_stops = [m.start() for m in code_matches] + [len(text)]
text_limits = list(zip(text_starts, text_stops))
# list of the groups from the code blocks
code_blocks = [self.new_code_block(**m.groupdict())
for m in code_matches]
text_blocks = [self.new_text_block(content=text[i:j])
for i, j in text_limits]
# remove indents
list(map(self.pre_process_code_block, code_blocks))
# remove blank line at start and end of markdown
list(map(self.pre_process_text_block, text_blocks))
# create a list of the right length
all_blocks = list(range(len(text_blocks) + len(code_blocks)))
# NOTE: the behaviour here is a bit fragile in that we
# assume that cells must alternate between code and
# markdown. This isn't the case, as we could have
# consecutive code cells, and we get around this by
# stripping out empty cells. i.e. two consecutive code cells
# have an empty markdown cell between them which is stripped
# out because it is empty.
# cells must alternate in order
all_blocks[::2] = text_blocks
all_blocks[1::2] = code_blocks
# remove possible empty text cells
all_blocks = [cell for cell in all_blocks if cell['content']]
return all_blocks
@staticmethod
def create_code_cell(block):
"""Create a notebook code cell from a block."""
code_cell = nbbase.new_code_cell(source=block['content'])
attr = block['attributes']
if not attr.is_empty:
code_cell.metadata \
= nbbase.NotebookNode({'attributes': attr.to_dict()})
execution_count = attr.kvs.get('n')
if not execution_count:
code_cell.execution_count = None
else:
code_cell.execution_count = int(execution_count)
return code_cell
@staticmethod
def create_markdown_cell(block):
"""Create a markdown cell from a block."""
kwargs = {'cell_type': block['type'],
'source': block['content']}
markdown_cell = nbbase.new_markdown_cell(**kwargs)
return markdown_cell
@staticmethod
def create_outputs(block):
"""Create a set of outputs from the contents of a json code
block.
"""
return [nbbase.NotebookNode(output)
for output in json.loads(block['content'])]
def create_cells(self, blocks):
"""Turn the list of blocks into a list of notebook cells."""
cells = []
for block in blocks:
if (block['type'] == self.code) and (block['IO'] == 'input'):
code_cell = self.create_code_cell(block)
cells.append(code_cell)
elif (block['type'] == self.code and
block['IO'] == 'output' and
cells[-1].cell_type == 'code'):
cells[-1].outputs = self.create_outputs(block)
elif block['type'] == self.markdown:
markdown_cell = self.create_markdown_cell(block)
cells.append(markdown_cell)
else:
raise NotImplementedError("{} is not supported as a cell"
"type".format(block['type']))
return cells
def reads(self, s, **kwargs):
"""Read string s to notebook. Returns a notebook."""
return self.to_notebook(s, **kwargs)
|
aaren/notedown | notedown/notedown.py | MarkdownWriter.write_resources | python | def write_resources(self, resources):
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data) | Write the output data in resources returned by exporter
to files. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L445-L458 | null | class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
|
aaren/notedown | notedown/notedown.py | MarkdownWriter.string2json | python | def string2json(self, string):
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8') | Convert json into its string representation.
Used for writing outputs to markdown. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L461-L470 | [
"def cast_unicode(s, encoding='utf-8'):\n \"\"\"Python 2/3 compatibility function derived from IPython py3compat.\"\"\"\n if isinstance(s, bytes) and not PY3:\n return s.decode(encoding, \"replace\")\n return s\n"
] | class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
|
aaren/notedown | notedown/notedown.py | MarkdownWriter.create_attributes | python | def create_attributes(self, cell, cell_type=None):
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown() | Turn the attribute dict into an attribute string
for the code block. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L493-L523 | null | class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
|
aaren/notedown | notedown/notedown.py | MarkdownWriter.dequote | python | def dequote(s):
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s | Remove excess quotes from a string. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L526-L533 | null | class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
@staticmethod
def data2uri(data, data_type):
"""Convert base64 data into a data uri with the given data_type."""
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', ''))
|
aaren/notedown | notedown/notedown.py | MarkdownWriter.data2uri | python | def data2uri(data, data_type):
MIME_MAP = {
'image/jpeg': 'jpeg',
'image/png': 'png',
'text/plain': 'text',
'text/html': 'html',
'text/latex': 'latex',
'application/javascript': 'html',
'image/svg+xml': 'svg',
}
inverse_map = {v: k for k, v in list(MIME_MAP.items())}
mime_type = inverse_map[data_type]
uri = r"data:{mime};base64,{data}"
return uri.format(mime=mime_type,
data=data[mime_type].replace('\n', '')) | Convert base64 data into a data uri with the given data_type. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L536-L551 | null | class MarkdownWriter(NotebookWriter):
"""Write a notebook into markdown."""
def __init__(self, template_file, strip_outputs=True,
write_outputs=False, output_dir='./figures'):
"""template_file - location of jinja template to use for export
strip_outputs - whether to remove output cells from the output
"""
filters = [
('string2json', self.string2json),
('create_input_codeblock', self.create_input_codeblock),
('create_output_codeblock', self.create_output_codeblock),
('create_output_block', self.create_output_block),
('create_attributes', self.create_attributes),
('dequote', self.dequote),
('data2uri', self.data2uri)
]
import jinja2
# need to create a jinja loader that looks in whatever
# arbitrary path we have passed in for the template_file
direct_loader = jinja2.FileSystemLoader(os.path.dirname(template_file))
self.exporter = TemplateExporter(extra_loaders=[direct_loader])
self.exporter.output_mimetype = 'text/markdown'
self.exporter.file_extension = '.md'
# have to register filters before setting template file for
# ipython 3 compatibility
for name, filter in filters:
self.exporter.register_filter(name, filter)
self.exporter.template_file = os.path.basename(template_file)
logging.debug("Creating MarkdownWriter")
logging.debug(("MarkdownWriter: template_file = %s"
% template_file))
logging.debug(("MarkdownWriter.exporter.template_file = %s"
% self.exporter.template_file))
logging.debug(("MarkdownWriter.exporter.filters = %s"
% self.exporter.environment.filters.keys()))
self.strip_outputs = strip_outputs
self.write_outputs = write_outputs
self.output_dir = output_dir
def write_from_json(self, notebook_json):
notebook = v4.reads_json(notebook_json)
return self.write(notebook)
def writes(self, notebook):
body, resources = self.exporter.from_notebook_node(notebook)
self.resources = resources
if self.write_outputs:
self.write_resources(resources)
# remove any blank lines added at start and end by template
text = re.sub(r'\A\s*\n|^\s*\Z', '', body)
return cast_unicode(text, 'utf-8')
def write_resources(self, resources):
"""Write the output data in resources returned by exporter
to files.
"""
for filename, data in list(resources.get('outputs', {}).items()):
# Determine where to write the file to
dest = os.path.join(self.output_dir, filename)
path = os.path.dirname(dest)
if path and not os.path.isdir(path):
os.makedirs(path)
# Write file
with open(dest, 'wb') as f:
f.write(data)
# --- filter functions to be used in the output template --- #
def string2json(self, string):
"""Convert json into its string representation.
Used for writing outputs to markdown."""
kwargs = {
'cls': BytesEncoder, # use the IPython bytes encoder
'indent': 1,
'sort_keys': True,
'separators': (',', ': '),
}
return cast_unicode(json.dumps(string, **kwargs), 'utf-8')
def create_input_codeblock(self, cell):
codeblock = ('{fence}{attributes}\n'
'{cell.source}\n'
'{fence}')
attrs = self.create_attributes(cell, cell_type='input')
return codeblock.format(attributes=attrs, fence='```', cell=cell)
def create_output_block(self, cell):
if self.strip_outputs:
return ''
else:
return self.create_output_codeblock(cell)
def create_output_codeblock(self, cell):
codeblock = ('{fence}{{.json .output n={execution_count}}}\n'
'{contents}\n'
'{fence}')
return codeblock.format(fence='```',
execution_count=cell.execution_count,
contents=self.string2json(cell.outputs))
def create_attributes(self, cell, cell_type=None):
"""Turn the attribute dict into an attribute string
for the code block.
"""
if self.strip_outputs or not hasattr(cell, 'execution_count'):
return 'python'
attrs = cell.metadata.get('attributes')
attr = PandocAttributes(attrs, 'dict')
if 'python' in attr.classes:
attr.classes.remove('python')
if 'input' in attr.classes:
attr.classes.remove('input')
if cell_type == 'figure':
attr.kvs.pop('caption', '')
attr.classes.append('figure')
attr.classes.append('output')
return attr.to_html()
elif cell_type == 'input':
# ensure python goes first so that github highlights it
attr.classes.insert(0, 'python')
attr.classes.insert(1, 'input')
if cell.execution_count:
attr.kvs['n'] = cell.execution_count
return attr.to_markdown(format='{classes} {id} {kvs}')
else:
return attr.to_markdown()
@staticmethod
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s
@staticmethod
|
aaren/notedown | notedown/notedown.py | CodeMagician.magic | python | def magic(self, alias):
if alias in self.aliases:
return self.aliases[alias]
else:
return "%%{}\n".format(alias) | Returns the appropriate IPython code magic when
called with an alias for a language. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L565-L572 | null | class CodeMagician(object):
# aliases to different languages
many_aliases = {('r', 'R'): '%%R\n'}
# convert to many to one lookup (found as self.aliases)
aliases = {}
for k, v in list(many_aliases.items()):
for key in k:
aliases[key] = v
@classmethod
|
aaren/notedown | notedown/notedown.py | Knitr.knit | python | def knit(self, input_file, opts_chunk='eval=FALSE'):
# use temporary files at both ends to allow stdin / stdout
tmp_in = tempfile.NamedTemporaryFile(mode='w+')
tmp_out = tempfile.NamedTemporaryFile(mode='w+')
tmp_in.file.write(input_file.read())
tmp_in.file.flush()
tmp_in.file.seek(0)
self._knit(tmp_in.name, tmp_out.name, opts_chunk)
tmp_out.file.flush()
return tmp_out | Use Knitr to convert the r-markdown input_file
into markdown, returning a file object. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L602-L616 | [
"def _knit(fin, fout,\n opts_knit='progress=FALSE, verbose=FALSE',\n opts_chunk='eval=FALSE'):\n \"\"\"Use knitr to convert r markdown (or anything knitr supports)\n to markdown.\n\n fin / fout - strings, input / output filenames.\n opts_knit - string, options to pass to knit\n opts... | class Knitr(object):
class KnitrError(Exception):
pass
def __init__(self):
# raise exception if R or knitr not installed
cmd = ['Rscript', '-e', 'require(knitr)']
try:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
message = "Rscript was not found on your path."
raise self.KnitrError(message)
stdout, stderr = p.communicate()
# cast to unicode for Python 3 compatibility
stderr = stderr.decode('utf8')
if 'Warning' in stderr:
message = ("Could not load knitr (needs manual installation).\n\n"
"$ {cmd}\n"
"{error}").format(cmd=' '.join(cmd), error=stderr)
raise self.KnitrError(message)
@staticmethod
def _knit(fin, fout,
opts_knit='progress=FALSE, verbose=FALSE',
opts_chunk='eval=FALSE'):
"""Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript.
"""
script = ('sink("/dev/null");'
'library(knitr);'
'opts_knit$set({opts_knit});'
'opts_chunk$set({opts_chunk});'
'knit("{input}", output="{output}")')
rcmd = ('Rscript', '-e',
script.format(input=fin, output=fout,
opts_knit=opts_knit, opts_chunk=opts_chunk)
)
p = subprocess.Popen(rcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
|
aaren/notedown | notedown/notedown.py | Knitr._knit | python | def _knit(fin, fout,
opts_knit='progress=FALSE, verbose=FALSE',
opts_chunk='eval=FALSE'):
script = ('sink("/dev/null");'
'library(knitr);'
'opts_knit$set({opts_knit});'
'opts_chunk$set({opts_chunk});'
'knit("{input}", output="{output}")')
rcmd = ('Rscript', '-e',
script.format(input=fin, output=fout,
opts_knit=opts_knit, opts_chunk=opts_chunk)
)
p = subprocess.Popen(rcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate() | Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript. | train | https://github.com/aaren/notedown/blob/1e920c7e4ecbe47420c12eed3d5bcae735121222/notedown/notedown.py#L619-L645 | null | class Knitr(object):
class KnitrError(Exception):
pass
def __init__(self):
# raise exception if R or knitr not installed
cmd = ['Rscript', '-e', 'require(knitr)']
try:
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
message = "Rscript was not found on your path."
raise self.KnitrError(message)
stdout, stderr = p.communicate()
# cast to unicode for Python 3 compatibility
stderr = stderr.decode('utf8')
if 'Warning' in stderr:
message = ("Could not load knitr (needs manual installation).\n\n"
"$ {cmd}\n"
"{error}").format(cmd=' '.join(cmd), error=stderr)
raise self.KnitrError(message)
def knit(self, input_file, opts_chunk='eval=FALSE'):
"""Use Knitr to convert the r-markdown input_file
into markdown, returning a file object.
"""
# use temporary files at both ends to allow stdin / stdout
tmp_in = tempfile.NamedTemporaryFile(mode='w+')
tmp_out = tempfile.NamedTemporaryFile(mode='w+')
tmp_in.file.write(input_file.read())
tmp_in.file.flush()
tmp_in.file.seek(0)
self._knit(tmp_in.name, tmp_out.name, opts_chunk)
tmp_out.file.flush()
return tmp_out
@staticmethod
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetCore._assign_zones | python | def _assign_zones(self):
for zone_id in range(1, 5):
zone = \
RainCloudyFaucetZone(
parent=self._parent,
controller=self._controller,
faucet=self,
zone_id=zone_id)
if zone not in self.zones:
self.zones.append(zone) | Assign all RainCloudyFaucetZone managed by faucet. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L38-L49 | null | class RainCloudyFaucetCore(object):
"""RainCloudyFaucetCore object."""
def __init__(self, parent, controller, faucet_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet_id: faucet ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet_id: string
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._id = faucet_id
# zones associated with faucet
self.zones = []
# load assigned zones
self._assign_zones()
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
@property
def _attributes(self):
"""Callback to self._controller attributes."""
return self._controller.attributes
@property
def serial(self):
"""Return faucet id."""
return self.id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self._id
@property
def current_time(self):
"""Return controller current time."""
return self._controller.current_time
@property
def name(self):
"""Return faucet name."""
return \
find_controller_or_faucet_name(
self._parent.html['home'],
'faucet')
@name.setter
def name(self, value):
"""Set a new name to faucet."""
data = {
'_set_faucet_name': 'Set Name',
'select_faucet': 0,
'faucet_name': value,
}
self._controller.post(data)
@property
def status(self):
"""Return status."""
return self._attributes['faucet_status']
@property
def battery(self):
"""Return faucet battery."""
battery = self._attributes['battery_percent']
if battery == '' or battery is None:
return None
return battery.strip('%')
def update(self):
"""Callback self._controller.update()."""
self._controller.update()
def _find_zone_by_id(self, zone_id):
"""Return zone by id."""
if not self.zones:
return None
zone = list(filter(
lambda zone: zone.id == zone_id, self.zones))
return zone[0] if zone else None
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetCore._find_zone_by_id | python | def _find_zone_by_id(self, zone_id):
if not self.zones:
return None
zone = list(filter(
lambda zone: zone.id == zone_id, self.zones))
return zone[0] if zone else None | Return zone by id. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L114-L122 | null | class RainCloudyFaucetCore(object):
"""RainCloudyFaucetCore object."""
def __init__(self, parent, controller, faucet_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet_id: faucet ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet_id: string
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._id = faucet_id
# zones associated with faucet
self.zones = []
# load assigned zones
self._assign_zones()
def _assign_zones(self):
"""Assign all RainCloudyFaucetZone managed by faucet."""
for zone_id in range(1, 5):
zone = \
RainCloudyFaucetZone(
parent=self._parent,
controller=self._controller,
faucet=self,
zone_id=zone_id)
if zone not in self.zones:
self.zones.append(zone)
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
@property
def _attributes(self):
"""Callback to self._controller attributes."""
return self._controller.attributes
@property
def serial(self):
"""Return faucet id."""
return self.id
# pylint: disable=invalid-name
@property
def id(self):
"""Return controller id."""
return self._id
@property
def current_time(self):
"""Return controller current time."""
return self._controller.current_time
@property
def name(self):
"""Return faucet name."""
return \
find_controller_or_faucet_name(
self._parent.html['home'],
'faucet')
@name.setter
def name(self, value):
"""Set a new name to faucet."""
data = {
'_set_faucet_name': 'Set Name',
'select_faucet': 0,
'faucet_name': value,
}
self._controller.post(data)
@property
def status(self):
"""Return status."""
return self._attributes['faucet_status']
@property
def battery(self):
"""Return faucet battery."""
battery = self._attributes['battery_percent']
if battery == '' or battery is None:
return None
return battery.strip('%')
def update(self):
"""Callback self._controller.update()."""
self._controller.update()
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone._set_zone_name | python | def _set_zone_name(self, zoneid, name):
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data) | Private method to override zone name. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L181-L190 | null | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone._set_watering_time | python | def _set_watering_time(self, zoneid, value):
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata) | Private method to set watering_time per zone. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L202-L220 | [
"def preupdate(self, force_refresh=True):\n \"\"\"Return a dict with all current options prior submitting request.\"\"\"\n ddata = MANUAL_OP_DATA.copy()\n\n # force update to make sure status is accurate\n if force_refresh:\n self.update()\n\n # select current controller and faucet\n ddata[... | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
# TODO in a future release we should break this out. artifact of old API
@property
def watering_time(self):
"""Return watering_time from zone."""
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
tchellomello/raincloudy | raincloudy/faucet.py | RainCloudyFaucetZone.watering_time | python | def watering_time(self):
# zone starts with index 0
index = self.id - 1
auto_watering_time =\
self._attributes['rain_delay_mode'][index]['auto_watering_time']
manual_watering_time =\
self._attributes['rain_delay_mode'][index]['manual_watering_time']
if auto_watering_time > manual_watering_time:
watering_time = auto_watering_time
else:
watering_time = manual_watering_time
return watering_time | Return watering_time from zone. | train | https://github.com/tchellomello/raincloudy/blob/1847fa913e5ba79645d51bf23637860d68c67dbf/raincloudy/faucet.py#L224-L239 | [
"def _set_watering_time(self, zoneid, value):\n \"\"\"Private method to set watering_time per zone.\"\"\"\n if value not in MANUAL_WATERING_ALLOWED:\n raise ValueError(\n 'Valid options are: {}'.format(\n ', '.join(map(str, MANUAL_WATERING_ALLOWED)))\n )\n\n if isins... | class RainCloudyFaucetZone(RainCloudyFaucetCore):
"""RainCloudyFaucetZone object."""
# pylint: disable=super-init-not-called
# needs review later
def __init__(self, parent, controller, faucet, zone_id):
"""
Initialize RainCloudy Controller object.
:param parent: RainCloudy object
:param controller: RainCloudy Controller parent object
:param faucet: faucet assigned controller
:param zone_id: zone ID assigned controller
:type parent: RainCloudy object
:type controller: RainCloudyControler object
:type faucet: RainCloudyFaucet object
:type zone_id: integer
:return: RainCloudyFaucet object
:rtype: RainCloudyFaucet object
"""
self._parent = parent
self._controller = controller
self._faucet = faucet
self._id = zone_id
def __repr__(self):
"""Object representation."""
try:
return "<{0}: {1}>".format(self.__class__.__name__, self.name)
except AttributeError:
return "<{0}: {1}>".format(self.__class__.__name__, self.id)
def _set_zone_name(self, zoneid, name):
"""Private method to override zone name."""
# zone starts with index 0
zoneid -= 1
data = {
'_set_zone_name': 'Set Name',
'select_zone': str(zoneid),
'zone_name': name,
}
self._controller.post(data)
@property
def name(self):
"""Return zone name."""
return find_zone_name(self._parent.html['home'], self.id)
@name.setter
def name(self, value):
"""Set a new zone name to faucet."""
self._set_zone_name(self.id, value)
def _set_watering_time(self, zoneid, value):
"""Private method to set watering_time per zone."""
if value not in MANUAL_WATERING_ALLOWED:
raise ValueError(
'Valid options are: {}'.format(
', '.join(map(str, MANUAL_WATERING_ALLOWED)))
)
if isinstance(value, int) and value == 0:
value = 'OFF'
elif isinstance(value, str):
value = value.upper()
if value == 'ON':
value = MAX_WATERING_MINUTES
ddata = self.preupdate()
attr = 'zone{}_select_manual_mode'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
# TODO in a future release we should break this out. artifact of old API
@property
@watering_time.setter
def watering_time(self, value):
"""Manually turn on water for X minutes."""
return self._set_watering_time(self.id, value)
@property
def droplet(self):
return None
def _set_rain_delay(self, zoneid, value):
"""Generic method to set auto_watering program."""
# current index for rain_delay starts in 0
zoneid -= 1
if isinstance(value, int):
if value > MAX_RAIN_DELAY_DAYS or value < 0:
return None
elif value == 0:
value = 'off'
elif value == 1:
value = '1day'
elif value >= 2:
value = str(value) + 'days'
elif isinstance(value, str):
if value.lower() != 'off':
return None
ddata = self.preupdate()
attr = 'zone{}_rain_delay_select'.format(zoneid)
ddata[attr] = value
self.submit_action(ddata)
return True
@property
def rain_delay(self):
"""Return the rain delay day from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['rain_delay_mode']
@rain_delay.setter
def rain_delay(self, value):
"""Set number of rain delay days for zone."""
return self._set_rain_delay(self.id, value)
@property
def next_cycle(self):
"""Return the time scheduled for next watering from zone."""
index = self.id - 1
return self._attributes['rain_delay_mode'][index]['next_water_cycle']
def _set_auto_watering(self, zoneid, value):
"""Private method to set auto_watering program."""
if not isinstance(value, bool):
return None
ddata = self.preupdate()
attr = 'zone{}_program_toggle'.format(zoneid)
try:
if not value:
ddata.pop(attr)
else:
ddata[attr] = 'on'
except KeyError:
pass
self.submit_action(ddata)
return True
@property
def auto_watering(self):
"""Return if zone is configured to automatic watering."""
value = "zone{}".format(self.id)
return find_program_status(self._parent.html['home'], value)
@auto_watering.setter
def auto_watering(self, value):
"""Enable/disable zone auto_watering program."""
return self._set_auto_watering(self.id, bool(value))
@property
def is_watering(self):
"""Return boolean if zone is watering."""
return bool(self.watering_time > 0)
def _to_dict(self):
"""Method to build zone dict."""
return {
'auto_watering':
getattr(self, "auto_watering"),
'droplet':
getattr(self, "droplet"),
'is_watering':
getattr(self, "is_watering"),
'name':
getattr(self, "name"),
'next_cycle':
getattr(self, "next_cycle"),
'rain_delay':
getattr(self, "rain_delay"),
'watering_time':
getattr(self, "watering_time"),
}
def report(self):
"""Return status from zone."""
return self._to_dict()
def preupdate(self, force_refresh=True):
"""Return a dict with all current options prior submitting request."""
ddata = MANUAL_OP_DATA.copy()
# force update to make sure status is accurate
if force_refresh:
self.update()
# select current controller and faucet
ddata['select_controller'] = \
self._parent.controllers.index(self._controller)
ddata['select_faucet'] = \
self._controller.faucets.index(self._faucet)
# check if zone is scheduled automatically (zone1_program_toggle)
# only add zoneX_program_toogle to ddata when needed,
# otherwise the field will be always on
for zone in self._faucet.zones:
attr = 'zone{}_program_toggle'.format(zone.id)
if zone.auto_watering:
ddata[attr] = 'on'
# check if zone current watering manually (zone1_select_manual_mode)
for zone in self._faucet.zones:
attr = 'zone{}_select_manual_mode'.format(zone.id)
if zone.watering_time and attr in ddata.keys():
ddata[attr] = zone.watering_time
# check if rain delay is selected (zone0_rain_delay_select)
for zone in self._faucet.zones:
attr = 'zone{}_rain_delay_select'.format(zone.id - 1)
value = zone.rain_delay
if value and attr in ddata.keys():
if int(value) >= 2 and int(value) <= 7:
value = str(value) + 'days'
else:
value = str(value) + 'day'
ddata[attr] = value
return ddata
def submit_action(self, ddata):
"""Post data."""
self._controller.post(ddata,
url=HOME_ENDPOINT,
referer=HOME_ENDPOINT)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.