body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
e22a4b574c60be79bdcc2e83a8bd4da8926c1515d82e36285abba3f0dbe96690
def create_summary_file_writer(writer, logdir, max_queue, flush_millis, filename_suffix, name=None): "Creates a summary file writer accessible by the given resource handle.\n\n Args:\n writer: A `Tensor` of type `resource`.\n A handle to the summary writer resource\n logdir: A `Tensor` of type `string`.\n Directory where the event file will be written.\n max_queue: A `Tensor` of type `int32`.\n Size of the queue of pending events and summaries.\n flush_millis: A `Tensor` of type `int32`.\n How often, in milliseconds, to flush the pending events and\n summaries to disk.\n filename_suffix: A `Tensor` of type `string`.\n Every event file's name is suffixed with this suffix.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('CreateSummaryFileWriter', writer=writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) logdir = _ops.convert_to_tensor(logdir, _dtypes.string) max_queue = _ops.convert_to_tensor(max_queue, _dtypes.int32) flush_millis = _ops.convert_to_tensor(flush_millis, _dtypes.int32) filename_suffix = _ops.convert_to_tensor(filename_suffix, _dtypes.string) _inputs_flat = [writer, logdir, max_queue, flush_millis, filename_suffix] _attrs = None _result = _execute.execute(b'CreateSummaryFileWriter', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Creates a summary file writer accessible by the given resource handle. Args: writer: A `Tensor` of type `resource`. A handle to the summary writer resource logdir: A `Tensor` of type `string`. Directory where the event file will be written. max_queue: A `Tensor` of type `int32`. Size of the queue of pending events and summaries. flush_millis: A `Tensor` of type `int32`. How often, in milliseconds, to flush the pending events and summaries to disk. filename_suffix: A `Tensor` of type `string`. Every event file's name is suffixed with this suffix. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
create_summary_file_writer
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def create_summary_file_writer(writer, logdir, max_queue, flush_millis, filename_suffix, name=None): "Creates a summary file writer accessible by the given resource handle.\n\n Args:\n writer: A `Tensor` of type `resource`.\n A handle to the summary writer resource\n logdir: A `Tensor` of type `string`.\n Directory where the event file will be written.\n max_queue: A `Tensor` of type `int32`.\n Size of the queue of pending events and summaries.\n flush_millis: A `Tensor` of type `int32`.\n How often, in milliseconds, to flush the pending events and\n summaries to disk.\n filename_suffix: A `Tensor` of type `string`.\n Every event file's name is suffixed with this suffix.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('CreateSummaryFileWriter', writer=writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) logdir = _ops.convert_to_tensor(logdir, _dtypes.string) max_queue = _ops.convert_to_tensor(max_queue, _dtypes.int32) flush_millis = _ops.convert_to_tensor(flush_millis, _dtypes.int32) filename_suffix = _ops.convert_to_tensor(filename_suffix, _dtypes.string) _inputs_flat = [writer, logdir, max_queue, flush_millis, filename_suffix] _attrs = None _result = _execute.execute(b'CreateSummaryFileWriter', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def create_summary_file_writer(writer, logdir, max_queue, flush_millis, filename_suffix, name=None): "Creates a summary file writer accessible by the given resource handle.\n\n Args:\n writer: A `Tensor` of type `resource`.\n A handle to the summary writer resource\n logdir: A `Tensor` of type `string`.\n Directory where the event file will be written.\n max_queue: A `Tensor` of type `int32`.\n Size of the queue of pending events and summaries.\n flush_millis: A `Tensor` of type `int32`.\n How often, in milliseconds, to flush the pending events and\n summaries to disk.\n filename_suffix: A `Tensor` of type `string`.\n Every event file's name is suffixed with this suffix.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('CreateSummaryFileWriter', writer=writer, logdir=logdir, max_queue=max_queue, flush_millis=flush_millis, filename_suffix=filename_suffix, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) logdir = _ops.convert_to_tensor(logdir, _dtypes.string) max_queue = _ops.convert_to_tensor(max_queue, _dtypes.int32) flush_millis = _ops.convert_to_tensor(flush_millis, _dtypes.int32) filename_suffix = _ops.convert_to_tensor(filename_suffix, _dtypes.string) _inputs_flat = [writer, logdir, max_queue, flush_millis, filename_suffix] _attrs = None _result = _execute.execute(b'CreateSummaryFileWriter', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Creates a summary file writer accessible by the given resource handle. Args: writer: A `Tensor` of type `resource`. A handle to the summary writer resource logdir: A `Tensor` of type `string`. Directory where the event file will be written. max_queue: A `Tensor` of type `int32`. Size of the queue of pending events and summaries. flush_millis: A `Tensor` of type `int32`. How often, in milliseconds, to flush the pending events and summaries to disk. filename_suffix: A `Tensor` of type `string`. Every event file's name is suffixed with this suffix. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
7d9b13ead44542d0d5995bf5fe9763b4376d49290291f795f7071e26c946085b
def flush_summary_writer(writer, name=None): "Flushes the writer's unwritten events.\n\n Args:\n writer: A `Tensor` of type `resource`.\n A handle to the summary writer resource.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('FlushSummaryWriter', writer=writer, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) _inputs_flat = [writer] _attrs = None _result = _execute.execute(b'FlushSummaryWriter', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Flushes the writer's unwritten events. Args: writer: A `Tensor` of type `resource`. A handle to the summary writer resource. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
flush_summary_writer
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def flush_summary_writer(writer, name=None): "Flushes the writer's unwritten events.\n\n Args:\n writer: A `Tensor` of type `resource`.\n A handle to the summary writer resource.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('FlushSummaryWriter', writer=writer, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) _inputs_flat = [writer] _attrs = None _result = _execute.execute(b'FlushSummaryWriter', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def flush_summary_writer(writer, name=None): "Flushes the writer's unwritten events.\n\n Args:\n writer: A `Tensor` of type `resource`.\n A handle to the summary writer resource.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('FlushSummaryWriter', writer=writer, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) _inputs_flat = [writer] _attrs = None _result = _execute.execute(b'FlushSummaryWriter', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Flushes the writer's unwritten events. Args: writer: A `Tensor` of type `resource`. A handle to the summary writer resource. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
cf348f9a42f3e8c9860573f4f279a4666b3dc3805eb697c03ef5a9e78b990602
def summary_writer(shared_name='', container='', name=None): 'Returns a handle to be used to access a summary writer.\n\n The summary writer is an in-graph resource which can be used by ops to write\n summaries to event files.\n\n Args:\n shared_name: An optional `string`. Defaults to `""`.\n container: An optional `string`. Defaults to `""`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`. the summary writer resource. Scalar handle.\n ' if (shared_name is None): shared_name = '' shared_name = _execute.make_str(shared_name, 'shared_name') if (container is None): container = '' container = _execute.make_str(container, 'container') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('SummaryWriter', shared_name=shared_name, container=container, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ('shared_name', _op.get_attr('shared_name'), 'container', _op.get_attr('container')) else: _inputs_flat = [] _attrs = ('shared_name', shared_name, 'container', container) _result = _execute.execute(b'SummaryWriter', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('SummaryWriter', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
Returns a handle to be used to access a summary writer. The summary writer is an in-graph resource which can be used by ops to write summaries to event files. Args: shared_name: An optional `string`. Defaults to `""`. container: An optional `string`. Defaults to `""`. name: A name for the operation (optional). Returns: A `Tensor` of type `resource`. the summary writer resource. Scalar handle.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
summary_writer
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def summary_writer(shared_name=, container=, name=None): 'Returns a handle to be used to access a summary writer.\n\n The summary writer is an in-graph resource which can be used by ops to write\n summaries to event files.\n\n Args:\n shared_name: An optional `string`. Defaults to ``.\n container: An optional `string`. Defaults to ``.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`. the summary writer resource. Scalar handle.\n ' if (shared_name is None): shared_name = shared_name = _execute.make_str(shared_name, 'shared_name') if (container is None): container = container = _execute.make_str(container, 'container') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('SummaryWriter', shared_name=shared_name, container=container, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ('shared_name', _op.get_attr('shared_name'), 'container', _op.get_attr('container')) else: _inputs_flat = [] _attrs = ('shared_name', shared_name, 'container', container) _result = _execute.execute(b'SummaryWriter', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('SummaryWriter', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result
def summary_writer(shared_name=, container=, name=None): 'Returns a handle to be used to access a summary writer.\n\n The summary writer is an in-graph resource which can be used by ops to write\n summaries to event files.\n\n Args:\n shared_name: An optional `string`. Defaults to ``.\n container: An optional `string`. Defaults to ``.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `resource`. the summary writer resource. Scalar handle.\n ' if (shared_name is None): shared_name = shared_name = _execute.make_str(shared_name, 'shared_name') if (container is None): container = container = _execute.make_str(container, 'container') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('SummaryWriter', shared_name=shared_name, container=container, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ('shared_name', _op.get_attr('shared_name'), 'container', _op.get_attr('container')) else: _inputs_flat = [] _attrs = ('shared_name', shared_name, 'container', container) _result = _execute.execute(b'SummaryWriter', 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient('SummaryWriter', _inputs_flat, _attrs, _result, name) (_result,) = _result return _result<|docstring|>Returns a handle to be used to access a summary writer. The summary writer is an in-graph resource which can be used by ops to write summaries to event files. Args: shared_name: An optional `string`. Defaults to `""`. container: An optional `string`. Defaults to `""`. name: A name for the operation (optional). Returns: A `Tensor` of type `resource`. the summary writer resource. Scalar handle.<|endoftext|>
aa4b29b869e403d320c4768d1b6a2fbf5581de03dbb4741118068fd7064dfd5f
def write_audio_summary(writer, global_step, tag, tensor, sample_rate, max_outputs=3, name=None): "Writes a `Summary` protocol buffer with audio.\n\n The summary has up to `max_outputs` summary values containing audio. The\n audio is built from `tensor` which must be 3-D with shape `[batch_size,\n frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are\n assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n * If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Used to build the `tag` attribute of the summary values.\n tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`.\n sample_rate: A `Tensor` of type `float32`.\n The sample rate of the signal in hertz.\n max_outputs: An optional `int` that is `>= 1`. Defaults to `3`.\n Max number of batch elements to generate audio for.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " if (max_outputs is None): max_outputs = 3 max_outputs = _execute.make_int(max_outputs, 'max_outputs') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteAudioSummary', writer=writer, global_step=global_step, tag=tag, tensor=tensor, sample_rate=sample_rate, max_outputs=max_outputs, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) tensor = _ops.convert_to_tensor(tensor, _dtypes.float32) sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.float32) _inputs_flat = [writer, global_step, tag, tensor, sample_rate] _attrs = ('max_outputs', max_outputs) _result = _execute.execute(b'WriteAudioSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Writes a `Summary` protocol buffer with audio. The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`. sample_rate: A `Tensor` of type `float32`. The sample rate of the signal in hertz. max_outputs: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate audio for. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
write_audio_summary
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def write_audio_summary(writer, global_step, tag, tensor, sample_rate, max_outputs=3, name=None): "Writes a `Summary` protocol buffer with audio.\n\n The summary has up to `max_outputs` summary values containing audio. The\n audio is built from `tensor` which must be 3-D with shape `[batch_size,\n frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are\n assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n * If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Used to build the `tag` attribute of the summary values.\n tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`.\n sample_rate: A `Tensor` of type `float32`.\n The sample rate of the signal in hertz.\n max_outputs: An optional `int` that is `>= 1`. Defaults to `3`.\n Max number of batch elements to generate audio for.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " if (max_outputs is None): max_outputs = 3 max_outputs = _execute.make_int(max_outputs, 'max_outputs') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteAudioSummary', writer=writer, global_step=global_step, tag=tag, tensor=tensor, sample_rate=sample_rate, max_outputs=max_outputs, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) tensor = _ops.convert_to_tensor(tensor, _dtypes.float32) sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.float32) _inputs_flat = [writer, global_step, tag, tensor, sample_rate] _attrs = ('max_outputs', max_outputs) _result = _execute.execute(b'WriteAudioSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def write_audio_summary(writer, global_step, tag, tensor, sample_rate, max_outputs=3, name=None): "Writes a `Summary` protocol buffer with audio.\n\n The summary has up to `max_outputs` summary values containing audio. The\n audio is built from `tensor` which must be 3-D with shape `[batch_size,\n frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are\n assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_outputs` is 1, the summary value tag is '*tag*/audio'.\n * If `max_outputs` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Used to build the `tag` attribute of the summary values.\n tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`.\n sample_rate: A `Tensor` of type `float32`.\n The sample rate of the signal in hertz.\n max_outputs: An optional `int` that is `>= 1`. Defaults to `3`.\n Max number of batch elements to generate audio for.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " if (max_outputs is None): max_outputs = 3 max_outputs = _execute.make_int(max_outputs, 'max_outputs') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteAudioSummary', writer=writer, global_step=global_step, tag=tag, tensor=tensor, sample_rate=sample_rate, max_outputs=max_outputs, name=name) return _op else: writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) tensor = _ops.convert_to_tensor(tensor, _dtypes.float32) sample_rate = _ops.convert_to_tensor(sample_rate, _dtypes.float32) _inputs_flat = [writer, global_step, tag, tensor, sample_rate] _attrs = ('max_outputs', max_outputs) _result = _execute.execute(b'WriteAudioSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Writes a `Summary` protocol buffer with audio. The summary has up to `max_outputs` summary values containing audio. The audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_outputs` is 1, the summary value tag is '*tag*/audio'. * If `max_outputs` is greater than 1, the summary value tags are generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor` of type `float32`. 2-D of shape `[batch_size, frames]`. sample_rate: A `Tensor` of type `float32`. The sample rate of the signal in hertz. max_outputs: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate audio for. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
76ed42f633c16b3062ab82940f734af885f002720aea5912e1569afffc60da3c
def write_histogram_summary(writer, global_step, tag, values, name=None): 'Writes a `Summary` protocol buffer with a histogram.\n\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n\n This op reports an `InvalidArgument` error if any value is not finite.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Tag to use for the `Summary.Value`.\n values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.\n Any shape. Values to use to build the histogram.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n ' _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteHistogramSummary', writer=writer, global_step=global_step, tag=tag, values=values, name=name) return _op else: (_attr_T, (values,)) = _execute.args_to_matching_eager([values], _ctx, _dtypes.float32) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, global_step, tag, values] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteHistogramSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Writes a `Summary` protocol buffer with a histogram. The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Scalar. Tag to use for the `Summary.Value`. values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`. Any shape. Values to use to build the histogram. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
write_histogram_summary
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def write_histogram_summary(writer, global_step, tag, values, name=None): 'Writes a `Summary` protocol buffer with a histogram.\n\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n\n This op reports an `InvalidArgument` error if any value is not finite.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Tag to use for the `Summary.Value`.\n values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.\n Any shape. Values to use to build the histogram.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n ' _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteHistogramSummary', writer=writer, global_step=global_step, tag=tag, values=values, name=name) return _op else: (_attr_T, (values,)) = _execute.args_to_matching_eager([values], _ctx, _dtypes.float32) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, global_step, tag, values] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteHistogramSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def write_histogram_summary(writer, global_step, tag, values, name=None): 'Writes a `Summary` protocol buffer with a histogram.\n\n The generated\n [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)\n has one summary value containing a histogram for `values`.\n\n This op reports an `InvalidArgument` error if any value is not finite.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Tag to use for the `Summary.Value`.\n values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.\n Any shape. Values to use to build the histogram.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n ' _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteHistogramSummary', writer=writer, global_step=global_step, tag=tag, values=values, name=name) return _op else: (_attr_T, (values,)) = _execute.args_to_matching_eager([values], _ctx, _dtypes.float32) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, global_step, tag, values] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteHistogramSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Writes a `Summary` protocol buffer with a histogram. The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. This op reports an `InvalidArgument` error if any value is not finite. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Scalar. Tag to use for the `Summary.Value`. values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`. Any shape. Values to use to build the histogram. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
0d244abb896095c3655ef9015f967a48c9fbb52e29736786cc685a0e5ba59121
def write_image_summary(writer, global_step, tag, tensor, bad_color, max_images=3, name=None): "Writes a `Summary` protocol buffer with images.\n\n The summary has up to `max_images` summary values containing images. The\n images are built from `tensor` which must be 4-D with shape `[batch_size,\n height, width, channels]` and where `channels` can be:\n\n * 1: `tensor` is interpreted as Grayscale.\n * 3: `tensor` is interpreted as RGB.\n * 4: `tensor` is interpreted as RGBA.\n\n The images have the same number of channels as the input tensor. For float\n input, the values are normalized one image at a time to fit in the range\n `[0, 255]`. `uint8` values are unchanged. The op uses two different\n normalization algorithms:\n\n * If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n * If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_images` is 1, the summary value tag is '*tag*/image'.\n * If `max_images` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.\n\n The `bad_color` argument is the color to use in the generated images for\n non-finite input values. It is a `unit8` 1-D tensor of length `channels`.\n Each element must be in the range `[0, 255]` (It represents the value of a\n pixel in the output image). Non-finite values in the input tensor are\n replaced by this tensor in the output image. The default value is the color\n red.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Used to build the `tag` attribute of the summary values.\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`.\n 4-D of shape `[batch_size, height, width, channels]` where\n `channels` is 1, 3, or 4.\n bad_color: A `Tensor` of type `uint8`.\n Color to use for pixels with non-finite values.\n max_images: An optional `int` that is `>= 1`. Defaults to `3`.\n Max number of batch elements to generate images for.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " if (max_images is None): max_images = 3 max_images = _execute.make_int(max_images, 'max_images') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteImageSummary', writer=writer, global_step=global_step, tag=tag, tensor=tensor, bad_color=bad_color, max_images=max_images, name=name) return _op else: (_attr_T, (tensor,)) = _execute.args_to_matching_eager([tensor], _ctx, _dtypes.float32) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) bad_color = _ops.convert_to_tensor(bad_color, _dtypes.uint8) _inputs_flat = [writer, global_step, tag, tensor, bad_color] _attrs = ('max_images', max_images, 'T', _attr_T) _result = _execute.execute(b'WriteImageSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Writes a `Summary` protocol buffer with images. The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_images` is 1, the summary value tag is '*tag*/image'. * If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. The `bad_color` argument is the color to use in the generated images for non-finite input values. It is a `unit8` 1-D tensor of length `channels`. Each element must be in the range `[0, 255]` (It represents the value of a pixel in the output image). Non-finite values in the input tensor are replaced by this tensor in the output image. The default value is the color red. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`. 4-D of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. bad_color: A `Tensor` of type `uint8`. Color to use for pixels with non-finite values. max_images: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate images for. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
write_image_summary
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def write_image_summary(writer, global_step, tag, tensor, bad_color, max_images=3, name=None): "Writes a `Summary` protocol buffer with images.\n\n The summary has up to `max_images` summary values containing images. The\n images are built from `tensor` which must be 4-D with shape `[batch_size,\n height, width, channels]` and where `channels` can be:\n\n * 1: `tensor` is interpreted as Grayscale.\n * 3: `tensor` is interpreted as RGB.\n * 4: `tensor` is interpreted as RGBA.\n\n The images have the same number of channels as the input tensor. For float\n input, the values are normalized one image at a time to fit in the range\n `[0, 255]`. `uint8` values are unchanged. The op uses two different\n normalization algorithms:\n\n * If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n * If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_images` is 1, the summary value tag is '*tag*/image'.\n * If `max_images` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.\n\n The `bad_color` argument is the color to use in the generated images for\n non-finite input values. It is a `unit8` 1-D tensor of length `channels`.\n Each element must be in the range `[0, 255]` (It represents the value of a\n pixel in the output image). Non-finite values in the input tensor are\n replaced by this tensor in the output image. The default value is the color\n red.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Used to build the `tag` attribute of the summary values.\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`.\n 4-D of shape `[batch_size, height, width, channels]` where\n `channels` is 1, 3, or 4.\n bad_color: A `Tensor` of type `uint8`.\n Color to use for pixels with non-finite values.\n max_images: An optional `int` that is `>= 1`. Defaults to `3`.\n Max number of batch elements to generate images for.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " if (max_images is None): max_images = 3 max_images = _execute.make_int(max_images, 'max_images') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteImageSummary', writer=writer, global_step=global_step, tag=tag, tensor=tensor, bad_color=bad_color, max_images=max_images, name=name) return _op else: (_attr_T, (tensor,)) = _execute.args_to_matching_eager([tensor], _ctx, _dtypes.float32) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) bad_color = _ops.convert_to_tensor(bad_color, _dtypes.uint8) _inputs_flat = [writer, global_step, tag, tensor, bad_color] _attrs = ('max_images', max_images, 'T', _attr_T) _result = _execute.execute(b'WriteImageSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def write_image_summary(writer, global_step, tag, tensor, bad_color, max_images=3, name=None): "Writes a `Summary` protocol buffer with images.\n\n The summary has up to `max_images` summary values containing images. The\n images are built from `tensor` which must be 4-D with shape `[batch_size,\n height, width, channels]` and where `channels` can be:\n\n * 1: `tensor` is interpreted as Grayscale.\n * 3: `tensor` is interpreted as RGB.\n * 4: `tensor` is interpreted as RGBA.\n\n The images have the same number of channels as the input tensor. For float\n input, the values are normalized one image at a time to fit in the range\n `[0, 255]`. `uint8` values are unchanged. The op uses two different\n normalization algorithms:\n\n * If the input values are all positive, they are rescaled so the largest one\n is 255.\n\n * If any input value is negative, the values are shifted so input value 0.0\n is at 127. They are then rescaled so that either the smallest value is 0,\n or the largest one is 255.\n\n The `tag` argument is a scalar `Tensor` of type `string`. It is used to\n build the `tag` of the summary values:\n\n * If `max_images` is 1, the summary value tag is '*tag*/image'.\n * If `max_images` is greater than 1, the summary value tags are\n generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.\n\n The `bad_color` argument is the color to use in the generated images for\n non-finite input values. It is a `unit8` 1-D tensor of length `channels`.\n Each element must be in the range `[0, 255]` (It represents the value of a\n pixel in the output image). Non-finite values in the input tensor are\n replaced by this tensor in the output image. The default value is the color\n red.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`.\n Scalar. Used to build the `tag` attribute of the summary values.\n tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`.\n 4-D of shape `[batch_size, height, width, channels]` where\n `channels` is 1, 3, or 4.\n bad_color: A `Tensor` of type `uint8`.\n Color to use for pixels with non-finite values.\n max_images: An optional `int` that is `>= 1`. Defaults to `3`.\n Max number of batch elements to generate images for.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " if (max_images is None): max_images = 3 max_images = _execute.make_int(max_images, 'max_images') _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteImageSummary', writer=writer, global_step=global_step, tag=tag, tensor=tensor, bad_color=bad_color, max_images=max_images, name=name) return _op else: (_attr_T, (tensor,)) = _execute.args_to_matching_eager([tensor], _ctx, _dtypes.float32) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) bad_color = _ops.convert_to_tensor(bad_color, _dtypes.uint8) _inputs_flat = [writer, global_step, tag, tensor, bad_color] _attrs = ('max_images', max_images, 'T', _attr_T) _result = _execute.execute(b'WriteImageSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Writes a `Summary` protocol buffer with images. The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, channels]` and where `channels` can be: * 1: `tensor` is interpreted as Grayscale. * 3: `tensor` is interpreted as RGB. * 4: `tensor` is interpreted as RGBA. The images have the same number of channels as the input tensor. For float input, the values are normalized one image at a time to fit in the range `[0, 255]`. `uint8` values are unchanged. The op uses two different normalization algorithms: * If the input values are all positive, they are rescaled so the largest one is 255. * If any input value is negative, the values are shifted so input value 0.0 is at 127. They are then rescaled so that either the smallest value is 0, or the largest one is 255. The `tag` argument is a scalar `Tensor` of type `string`. It is used to build the `tag` of the summary values: * If `max_images` is 1, the summary value tag is '*tag*/image'. * If `max_images` is greater than 1, the summary value tags are generated sequentially as '*tag*/image/0', '*tag*/image/1', etc. The `bad_color` argument is the color to use in the generated images for non-finite input values. It is a `unit8` 1-D tensor of length `channels`. Each element must be in the range `[0, 255]` (It represents the value of a pixel in the output image). Non-finite values in the input tensor are replaced by this tensor in the output image. The default value is the color red. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Scalar. Used to build the `tag` attribute of the summary values. tensor: A `Tensor`. Must be one of the following types: `uint8`, `float32`, `half`. 4-D of shape `[batch_size, height, width, channels]` where `channels` is 1, 3, or 4. bad_color: A `Tensor` of type `uint8`. Color to use for pixels with non-finite values. max_images: An optional `int` that is `>= 1`. Defaults to `3`. Max number of batch elements to generate images for. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
71b41ecf167cdfc23f7318891d01e25cd0d8383581a001ca8f74f73beda121c0
def write_scalar_summary(writer, global_step, tag, value, name=None): 'Writes a `Summary` protocol buffer with scalar values.\n\n The input `tag` and `value` must have the scalars.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`. Tag for the summary.\n value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.\n Value for the summary.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n ' _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteScalarSummary', writer=writer, global_step=global_step, tag=tag, value=value, name=name) return _op else: (_attr_T, (value,)) = _execute.args_to_matching_eager([value], _ctx) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, global_step, tag, value] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteScalarSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Writes a `Summary` protocol buffer with scalar values. The input `tag` and `value` must have the scalars. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Tag for the summary. value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`. Value for the summary. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
write_scalar_summary
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def write_scalar_summary(writer, global_step, tag, value, name=None): 'Writes a `Summary` protocol buffer with scalar values.\n\n The input `tag` and `value` must have the scalars.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`. Tag for the summary.\n value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.\n Value for the summary.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n ' _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteScalarSummary', writer=writer, global_step=global_step, tag=tag, value=value, name=name) return _op else: (_attr_T, (value,)) = _execute.args_to_matching_eager([value], _ctx) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, global_step, tag, value] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteScalarSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def write_scalar_summary(writer, global_step, tag, value, name=None): 'Writes a `Summary` protocol buffer with scalar values.\n\n The input `tag` and `value` must have the scalars.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tag: A `Tensor` of type `string`. Tag for the summary.\n value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.\n Value for the summary.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n ' _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteScalarSummary', writer=writer, global_step=global_step, tag=tag, value=value, name=name) return _op else: (_attr_T, (value,)) = _execute.args_to_matching_eager([value], _ctx) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) _inputs_flat = [writer, global_step, tag, value] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteScalarSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Writes a `Summary` protocol buffer with scalar values. The input `tag` and `value` must have the scalars. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tag: A `Tensor` of type `string`. Tag for the summary. value: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`. Value for the summary. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
7343a1f8b53a8ec76f4789e79d950f2109a975e2676cab733a8dd6ba826d9777
def write_summary(writer, global_step, tensor, tag, summary_metadata, name=None): "Outputs a `Summary` protocol buffer with a tensor.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tensor: A `Tensor`. A tensor to serialize.\n tag: A `Tensor` of type `string`. The summary's tag.\n summary_metadata: A `Tensor` of type `string`.\n Serialized SummaryMetadata protocol buffer containing\n plugin-related metadata for this summary.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteSummary', writer=writer, global_step=global_step, tensor=tensor, tag=tag, summary_metadata=summary_metadata, name=name) return _op else: (_attr_T, (tensor,)) = _execute.args_to_matching_eager([tensor], _ctx) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) summary_metadata = _ops.convert_to_tensor(summary_metadata, _dtypes.string) _inputs_flat = [writer, global_step, tensor, tag, summary_metadata] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
Outputs a `Summary` protocol buffer with a tensor. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tensor: A `Tensor`. A tensor to serialize. tag: A `Tensor` of type `string`. The summary's tag. summary_metadata: A `Tensor` of type `string`. Serialized SummaryMetadata protocol buffer containing plugin-related metadata for this summary. name: A name for the operation (optional). Returns: The created Operation.
lesson7.4/tensorflow/contrib/summary/gen_summary_ops.py
write_summary
magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda
21
python
def write_summary(writer, global_step, tensor, tag, summary_metadata, name=None): "Outputs a `Summary` protocol buffer with a tensor.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tensor: A `Tensor`. A tensor to serialize.\n tag: A `Tensor` of type `string`. The summary's tag.\n summary_metadata: A `Tensor` of type `string`.\n Serialized SummaryMetadata protocol buffer containing\n plugin-related metadata for this summary.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteSummary', writer=writer, global_step=global_step, tensor=tensor, tag=tag, summary_metadata=summary_metadata, name=name) return _op else: (_attr_T, (tensor,)) = _execute.args_to_matching_eager([tensor], _ctx) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) summary_metadata = _ops.convert_to_tensor(summary_metadata, _dtypes.string) _inputs_flat = [writer, global_step, tensor, tag, summary_metadata] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result
def write_summary(writer, global_step, tensor, tag, summary_metadata, name=None): "Outputs a `Summary` protocol buffer with a tensor.\n\n Args:\n writer: A `Tensor` of type `resource`. A handle to a summary writer.\n global_step: A `Tensor` of type `int64`.\n The step to write the summary for.\n tensor: A `Tensor`. A tensor to serialize.\n tag: A `Tensor` of type `string`. The summary's tag.\n summary_metadata: A `Tensor` of type `string`.\n Serialized SummaryMetadata protocol buffer containing\n plugin-related metadata for this summary.\n name: A name for the operation (optional).\n\n Returns:\n The created Operation.\n " _ctx = _context.context() if _ctx.in_graph_mode(): (_, _, _op) = _op_def_lib._apply_op_helper('WriteSummary', writer=writer, global_step=global_step, tensor=tensor, tag=tag, summary_metadata=summary_metadata, name=name) return _op else: (_attr_T, (tensor,)) = _execute.args_to_matching_eager([tensor], _ctx) _attr_T = _attr_T.as_datatype_enum writer = _ops.convert_to_tensor(writer, _dtypes.resource) global_step = _ops.convert_to_tensor(global_step, _dtypes.int64) tag = _ops.convert_to_tensor(tag, _dtypes.string) summary_metadata = _ops.convert_to_tensor(summary_metadata, _dtypes.string) _inputs_flat = [writer, global_step, tensor, tag, summary_metadata] _attrs = ('T', _attr_T) _result = _execute.execute(b'WriteSummary', 0, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) return _result<|docstring|>Outputs a `Summary` protocol buffer with a tensor. Args: writer: A `Tensor` of type `resource`. A handle to a summary writer. global_step: A `Tensor` of type `int64`. The step to write the summary for. tensor: A `Tensor`. A tensor to serialize. tag: A `Tensor` of type `string`. The summary's tag. summary_metadata: A `Tensor` of type `string`. Serialized SummaryMetadata protocol buffer containing plugin-related metadata for this summary. name: A name for the operation (optional). Returns: The created Operation.<|endoftext|>
e379ef81451f342609e95448ab63fddacba175765931386cc1971b97b4b4da97
def handle_exceptions(method): 'Transforms the exception for the volume but keeps its traceback intact.\n ' def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except (keystone_exc.NotFound, cinder_exception.NotFound, cinder_exception.OverLimit) as e: raise exceptions.BackendException(str(e)) return res return wrapper
Transforms the exception for the volume but keeps its traceback intact.
glance_store/common/cinder_utils.py
handle_exceptions
cloudify-ro/glance_store
49
python
def handle_exceptions(method): '\n ' def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except (keystone_exc.NotFound, cinder_exception.NotFound, cinder_exception.OverLimit) as e: raise exceptions.BackendException(str(e)) return res return wrapper
def handle_exceptions(method): '\n ' def wrapper(self, ctx, volume_id, *args, **kwargs): try: res = method(self, ctx, volume_id, *args, **kwargs) except (keystone_exc.NotFound, cinder_exception.NotFound, cinder_exception.OverLimit) as e: raise exceptions.BackendException(str(e)) return res return wrapper<|docstring|>Transforms the exception for the volume but keeps its traceback intact.<|endoftext|>
fb3091b650ebb9148df12ab05da878649a4265feb9f2aca5b83bc64b8aca5fb7
@retrying.retry(stop_max_attempt_number=5, retry_on_exception=_retry_on_bad_request) @handle_exceptions def attachment_create(self, client, volume_id, connector=None, mountpoint=None, mode=None): 'Create a volume attachment. This requires microversion >= 3.54.\n\n The attachment_create call was introduced in microversion 3.27. We\n need 3.54 as minimum here as we need attachment_complete to finish the\n attaching process and it which was introduced in version 3.44 and\n we also pass the attach mode which was introduced in version 3.54.\n\n :param client: cinderclient object\n :param volume_id: UUID of the volume on which to create the attachment.\n :param connector: host connector dict; if None, the attachment will\n be \'reserved\' but not yet attached.\n :param mountpoint: Optional mount device name for the attachment,\n e.g. "/dev/vdb". This is only used if a connector is provided.\n :param mode: The mode in which the attachment is made i.e.\n read only(ro) or read/write(rw)\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' if (connector and mountpoint and ('mountpoint' not in connector)): connector['mountpoint'] = mountpoint try: attachment_ref = client.attachments.create(volume_id, connector, mode=mode) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): if (getattr(ex, 'code', None) != 400): LOG.error(_LE('Create attachment failed for volume %(volume_id)s. Error: %(msg)s Code: %(code)s'), {'volume_id': volume_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
Create a volume attachment. This requires microversion >= 3.54. The attachment_create call was introduced in microversion 3.27. We need 3.54 as minimum here as we need attachment_complete to finish the attaching process and it which was introduced in version 3.44 and we also pass the attach mode which was introduced in version 3.54. :param client: cinderclient object :param volume_id: UUID of the volume on which to create the attachment. :param connector: host connector dict; if None, the attachment will be 'reserved' but not yet attached. :param mountpoint: Optional mount device name for the attachment, e.g. "/dev/vdb". This is only used if a connector is provided. :param mode: The mode in which the attachment is made i.e. read only(ro) or read/write(rw) :returns: a dict created from the cinderclient.v3.attachments.VolumeAttachment object with a backward compatible connection_info dict
glance_store/common/cinder_utils.py
attachment_create
cloudify-ro/glance_store
49
python
@retrying.retry(stop_max_attempt_number=5, retry_on_exception=_retry_on_bad_request) @handle_exceptions def attachment_create(self, client, volume_id, connector=None, mountpoint=None, mode=None): 'Create a volume attachment. This requires microversion >= 3.54.\n\n The attachment_create call was introduced in microversion 3.27. We\n need 3.54 as minimum here as we need attachment_complete to finish the\n attaching process and it which was introduced in version 3.44 and\n we also pass the attach mode which was introduced in version 3.54.\n\n :param client: cinderclient object\n :param volume_id: UUID of the volume on which to create the attachment.\n :param connector: host connector dict; if None, the attachment will\n be \'reserved\' but not yet attached.\n :param mountpoint: Optional mount device name for the attachment,\n e.g. "/dev/vdb". This is only used if a connector is provided.\n :param mode: The mode in which the attachment is made i.e.\n read only(ro) or read/write(rw)\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' if (connector and mountpoint and ('mountpoint' not in connector)): connector['mountpoint'] = mountpoint try: attachment_ref = client.attachments.create(volume_id, connector, mode=mode) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): if (getattr(ex, 'code', None) != 400): LOG.error(_LE('Create attachment failed for volume %(volume_id)s. Error: %(msg)s Code: %(code)s'), {'volume_id': volume_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
@retrying.retry(stop_max_attempt_number=5, retry_on_exception=_retry_on_bad_request) @handle_exceptions def attachment_create(self, client, volume_id, connector=None, mountpoint=None, mode=None): 'Create a volume attachment. This requires microversion >= 3.54.\n\n The attachment_create call was introduced in microversion 3.27. We\n need 3.54 as minimum here as we need attachment_complete to finish the\n attaching process and it which was introduced in version 3.44 and\n we also pass the attach mode which was introduced in version 3.54.\n\n :param client: cinderclient object\n :param volume_id: UUID of the volume on which to create the attachment.\n :param connector: host connector dict; if None, the attachment will\n be \'reserved\' but not yet attached.\n :param mountpoint: Optional mount device name for the attachment,\n e.g. "/dev/vdb". This is only used if a connector is provided.\n :param mode: The mode in which the attachment is made i.e.\n read only(ro) or read/write(rw)\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' if (connector and mountpoint and ('mountpoint' not in connector)): connector['mountpoint'] = mountpoint try: attachment_ref = client.attachments.create(volume_id, connector, mode=mode) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): if (getattr(ex, 'code', None) != 400): LOG.error(_LE('Create attachment failed for volume %(volume_id)s. Error: %(msg)s Code: %(code)s'), {'volume_id': volume_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})<|docstring|>Create a volume attachment. This requires microversion >= 3.54. The attachment_create call was introduced in microversion 3.27. We need 3.54 as minimum here as we need attachment_complete to finish the attaching process and it which was introduced in version 3.44 and we also pass the attach mode which was introduced in version 3.54. :param client: cinderclient object :param volume_id: UUID of the volume on which to create the attachment. :param connector: host connector dict; if None, the attachment will be 'reserved' but not yet attached. :param mountpoint: Optional mount device name for the attachment, e.g. "/dev/vdb". This is only used if a connector is provided. :param mode: The mode in which the attachment is made i.e. read only(ro) or read/write(rw) :returns: a dict created from the cinderclient.v3.attachments.VolumeAttachment object with a backward compatible connection_info dict<|endoftext|>
fdf5d6dc53d08847f907dc8707a8ac2d10f546042813a548591d36561876c482
@handle_exceptions def attachment_get(self, client, attachment_id): 'Gets a volume attachment.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to get.\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' try: attachment_ref = client.attachments.show(attachment_id) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Show attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
Gets a volume attachment. :param client: cinderclient object :param attachment_id: UUID of the volume attachment to get. :returns: a dict created from the cinderclient.v3.attachments.VolumeAttachment object with a backward compatible connection_info dict
glance_store/common/cinder_utils.py
attachment_get
cloudify-ro/glance_store
49
python
@handle_exceptions def attachment_get(self, client, attachment_id): 'Gets a volume attachment.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to get.\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' try: attachment_ref = client.attachments.show(attachment_id) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Show attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
@handle_exceptions def attachment_get(self, client, attachment_id): 'Gets a volume attachment.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to get.\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' try: attachment_ref = client.attachments.show(attachment_id) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Show attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})<|docstring|>Gets a volume attachment. :param client: cinderclient object :param attachment_id: UUID of the volume attachment to get. :returns: a dict created from the cinderclient.v3.attachments.VolumeAttachment object with a backward compatible connection_info dict<|endoftext|>
02509bdf55ef0d012598d29035593fca7d215f3587e1c0b53c72e35ad544eea9
@handle_exceptions def attachment_update(self, client, attachment_id, connector, mountpoint=None): 'Updates the connector on the volume attachment. An attachment\n without a connector is considered reserved but not fully attached.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to update.\n :param connector: host connector dict. This is required when updating\n a volume attachment. To terminate a connection, the volume\n attachment for that connection must be deleted.\n :param mountpoint: Optional mount device name for the attachment,\n e.g. "/dev/vdb". Theoretically this is optional per volume backend,\n but in practice it\'s normally required so it\'s best to always\n provide a value.\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' if (mountpoint and ('mountpoint' not in connector)): connector['mountpoint'] = mountpoint try: attachment_ref = client.attachments.update(attachment_id, connector) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Update attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
Updates the connector on the volume attachment. An attachment without a connector is considered reserved but not fully attached. :param client: cinderclient object :param attachment_id: UUID of the volume attachment to update. :param connector: host connector dict. This is required when updating a volume attachment. To terminate a connection, the volume attachment for that connection must be deleted. :param mountpoint: Optional mount device name for the attachment, e.g. "/dev/vdb". Theoretically this is optional per volume backend, but in practice it's normally required so it's best to always provide a value. :returns: a dict created from the cinderclient.v3.attachments.VolumeAttachment object with a backward compatible connection_info dict
glance_store/common/cinder_utils.py
attachment_update
cloudify-ro/glance_store
49
python
@handle_exceptions def attachment_update(self, client, attachment_id, connector, mountpoint=None): 'Updates the connector on the volume attachment. An attachment\n without a connector is considered reserved but not fully attached.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to update.\n :param connector: host connector dict. This is required when updating\n a volume attachment. To terminate a connection, the volume\n attachment for that connection must be deleted.\n :param mountpoint: Optional mount device name for the attachment,\n e.g. "/dev/vdb". Theoretically this is optional per volume backend,\n but in practice it\'s normally required so it\'s best to always\n provide a value.\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' if (mountpoint and ('mountpoint' not in connector)): connector['mountpoint'] = mountpoint try: attachment_ref = client.attachments.update(attachment_id, connector) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Update attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
@handle_exceptions def attachment_update(self, client, attachment_id, connector, mountpoint=None): 'Updates the connector on the volume attachment. An attachment\n without a connector is considered reserved but not fully attached.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to update.\n :param connector: host connector dict. This is required when updating\n a volume attachment. To terminate a connection, the volume\n attachment for that connection must be deleted.\n :param mountpoint: Optional mount device name for the attachment,\n e.g. "/dev/vdb". Theoretically this is optional per volume backend,\n but in practice it\'s normally required so it\'s best to always\n provide a value.\n :returns: a dict created from the\n cinderclient.v3.attachments.VolumeAttachment object with a backward\n compatible connection_info dict\n ' if (mountpoint and ('mountpoint' not in connector)): connector['mountpoint'] = mountpoint try: attachment_ref = client.attachments.update(attachment_id, connector) return attachment_ref except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Update attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})<|docstring|>Updates the connector on the volume attachment. An attachment without a connector is considered reserved but not fully attached. :param client: cinderclient object :param attachment_id: UUID of the volume attachment to update. :param connector: host connector dict. This is required when updating a volume attachment. To terminate a connection, the volume attachment for that connection must be deleted. :param mountpoint: Optional mount device name for the attachment, e.g. "/dev/vdb". Theoretically this is optional per volume backend, but in practice it's normally required so it's best to always provide a value. :returns: a dict created from the cinderclient.v3.attachments.VolumeAttachment object with a backward compatible connection_info dict<|endoftext|>
1c4cd7c71a9bd14e2fcfc679a872e274e073fafa5fdf95dd024691615431e0c1
@handle_exceptions def attachment_complete(self, client, attachment_id): 'Marks a volume attachment complete.\n\n This call should be used to inform Cinder that a volume attachment is\n fully connected on the host so Cinder can apply the necessary state\n changes to the volume info in its database.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to update.\n ' try: client.attachments.complete(attachment_id) except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Complete attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
Marks a volume attachment complete. This call should be used to inform Cinder that a volume attachment is fully connected on the host so Cinder can apply the necessary state changes to the volume info in its database. :param client: cinderclient object :param attachment_id: UUID of the volume attachment to update.
glance_store/common/cinder_utils.py
attachment_complete
cloudify-ro/glance_store
49
python
@handle_exceptions def attachment_complete(self, client, attachment_id): 'Marks a volume attachment complete.\n\n This call should be used to inform Cinder that a volume attachment is\n fully connected on the host so Cinder can apply the necessary state\n changes to the volume info in its database.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to update.\n ' try: client.attachments.complete(attachment_id) except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Complete attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})
@handle_exceptions def attachment_complete(self, client, attachment_id): 'Marks a volume attachment complete.\n\n This call should be used to inform Cinder that a volume attachment is\n fully connected on the host so Cinder can apply the necessary state\n changes to the volume info in its database.\n\n :param client: cinderclient object\n :param attachment_id: UUID of the volume attachment to update.\n ' try: client.attachments.complete(attachment_id) except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Complete attachment failed for attachment %(id)s. Error: %(msg)s Code: %(code)s'), {'id': attachment_id, 'msg': str(ex), 'code': getattr(ex, 'code', None)})<|docstring|>Marks a volume attachment complete. This call should be used to inform Cinder that a volume attachment is fully connected on the host so Cinder can apply the necessary state changes to the volume info in its database. :param client: cinderclient object :param attachment_id: UUID of the volume attachment to update.<|endoftext|>
3d9daa6490165c76db46c3b2f0fee7f3b6d5d0a549a0fecd9a989fc297cd3b23
@router.get('/info', response_model=List[ServerInfo]) async def server_info() -> List[ServerInfo]: 'Return the list of servers.\n\n This payload contains unchanging properties like the server name or\n region. API consumers are expected to aggressively cache the\n returned data as they will only change with major game updates.\n ' return list(_STATIC_SERVER_DATA.values())
Return the list of servers. This payload contains unchanging properties like the server name or region. API consumers are expected to aggressively cache the returned data as they will only change with major game updates.
server/routes/servers.py
server_info
auto-pl/apl-api
0
python
@router.get('/info', response_model=List[ServerInfo]) async def server_info() -> List[ServerInfo]: 'Return the list of servers.\n\n This payload contains unchanging properties like the server name or\n region. API consumers are expected to aggressively cache the\n returned data as they will only change with major game updates.\n ' return list(_STATIC_SERVER_DATA.values())
@router.get('/info', response_model=List[ServerInfo]) async def server_info() -> List[ServerInfo]: 'Return the list of servers.\n\n This payload contains unchanging properties like the server name or\n region. API consumers are expected to aggressively cache the\n returned data as they will only change with major game updates.\n ' return list(_STATIC_SERVER_DATA.values())<|docstring|>Return the list of servers. This payload contains unchanging properties like the server name or region. API consumers are expected to aggressively cache the returned data as they will only change with major game updates.<|endoftext|>
f8a71eb210bfdbccf291631c0f822922fbfe21301243bd26050701b9ee919f54
@router.get('/status', response_model=List[ServerStatus]) async def server_status() -> List[ServerStatus]: 'Return a momentary status digest for all servers.\n\n This endpoint will likely be moved to or replicated in a WebSocket\n endpoint in future versions.\n ' data: List[ServerStatus] = [] for server in _STATIC_SERVER_DATA.values(): status = ('online' if (random.random() < 0.9) else 'locked') base_pop = random.randint(10, 300) population = Population(vs=(base_pop + random.randint(0, 100)), nc=(base_pop + random.randint(0, 100)), tr=(base_pop + random.randint(0, 100)), nso=int((base_pop * 0.05))) continents = [cast(ContinentId, i) for i in (2, 4, 6, 8) if (random.random() < 0.5)] if (not continents): continents.append(cast(ContinentId, 2)) data.append(ServerStatus(id=server.id, status=status, population=population, open_continents=continents)) return data
Return a momentary status digest for all servers. This endpoint will likely be moved to or replicated in a WebSocket endpoint in future versions.
server/routes/servers.py
server_status
auto-pl/apl-api
0
python
@router.get('/status', response_model=List[ServerStatus]) async def server_status() -> List[ServerStatus]: 'Return a momentary status digest for all servers.\n\n This endpoint will likely be moved to or replicated in a WebSocket\n endpoint in future versions.\n ' data: List[ServerStatus] = [] for server in _STATIC_SERVER_DATA.values(): status = ('online' if (random.random() < 0.9) else 'locked') base_pop = random.randint(10, 300) population = Population(vs=(base_pop + random.randint(0, 100)), nc=(base_pop + random.randint(0, 100)), tr=(base_pop + random.randint(0, 100)), nso=int((base_pop * 0.05))) continents = [cast(ContinentId, i) for i in (2, 4, 6, 8) if (random.random() < 0.5)] if (not continents): continents.append(cast(ContinentId, 2)) data.append(ServerStatus(id=server.id, status=status, population=population, open_continents=continents)) return data
@router.get('/status', response_model=List[ServerStatus]) async def server_status() -> List[ServerStatus]: 'Return a momentary status digest for all servers.\n\n This endpoint will likely be moved to or replicated in a WebSocket\n endpoint in future versions.\n ' data: List[ServerStatus] = [] for server in _STATIC_SERVER_DATA.values(): status = ('online' if (random.random() < 0.9) else 'locked') base_pop = random.randint(10, 300) population = Population(vs=(base_pop + random.randint(0, 100)), nc=(base_pop + random.randint(0, 100)), tr=(base_pop + random.randint(0, 100)), nso=int((base_pop * 0.05))) continents = [cast(ContinentId, i) for i in (2, 4, 6, 8) if (random.random() < 0.5)] if (not continents): continents.append(cast(ContinentId, 2)) data.append(ServerStatus(id=server.id, status=status, population=population, open_continents=continents)) return data<|docstring|>Return a momentary status digest for all servers. This endpoint will likely be moved to or replicated in a WebSocket endpoint in future versions.<|endoftext|>
41a0a778171fcc031b8a3f2978082885b684d7f291a8230732fc35a07d680664
def get_activity_graph(self, activity_uri, granularity='FINE'): '\n Returns the xml string for querying the activity graph\n :param activity_uri:\n :param granularity:\n :return:\n ' result = self.query_modeler.get_activity_graph_request(activity_uri, granularity) return result.toxml('utf-8', element_name='ns1:getActivityGraphRequest').decode('utf-8')
Returns the xml string for querying the activity graph :param activity_uri: :param granularity: :return:
komadu_client/api/query_api.py
get_activity_graph
Data-to-Insight-Center/CKN
0
python
def get_activity_graph(self, activity_uri, granularity='FINE'): '\n Returns the xml string for querying the activity graph\n :param activity_uri:\n :param granularity:\n :return:\n ' result = self.query_modeler.get_activity_graph_request(activity_uri, granularity) return result.toxml('utf-8', element_name='ns1:getActivityGraphRequest').decode('utf-8')
def get_activity_graph(self, activity_uri, granularity='FINE'): '\n Returns the xml string for querying the activity graph\n :param activity_uri:\n :param granularity:\n :return:\n ' result = self.query_modeler.get_activity_graph_request(activity_uri, granularity) return result.toxml('utf-8', element_name='ns1:getActivityGraphRequest').decode('utf-8')<|docstring|>Returns the xml string for querying the activity graph :param activity_uri: :param granularity: :return:<|endoftext|>
8b7245e151ed41c538075bf871f9ad521a0042f64294de6a612f2cdc82fa361c
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: 'Set up YoLink Sensor from a config entry.' coordinator = hass.data[DOMAIN][config_entry.entry_id][ATTR_COORDINATOR] devices = [device for device in coordinator.yl_devices if (device.device_type in DEVICE_TYPE)] entities = [] for device in devices: for description in DEVICE_TYPES: if description.exists_fn(device): entities.append(YoLinkSwitchEntity(config_entry, coordinator, description, device)) async_add_entities(entities)
Set up YoLink Sensor from a config entry.
homeassistant/components/yolink/switch.py
async_setup_entry
RoboGnome/core
2
python
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: coordinator = hass.data[DOMAIN][config_entry.entry_id][ATTR_COORDINATOR] devices = [device for device in coordinator.yl_devices if (device.device_type in DEVICE_TYPE)] entities = [] for device in devices: for description in DEVICE_TYPES: if description.exists_fn(device): entities.append(YoLinkSwitchEntity(config_entry, coordinator, description, device)) async_add_entities(entities)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: coordinator = hass.data[DOMAIN][config_entry.entry_id][ATTR_COORDINATOR] devices = [device for device in coordinator.yl_devices if (device.device_type in DEVICE_TYPE)] entities = [] for device in devices: for description in DEVICE_TYPES: if description.exists_fn(device): entities.append(YoLinkSwitchEntity(config_entry, coordinator, description, device)) async_add_entities(entities)<|docstring|>Set up YoLink Sensor from a config entry.<|endoftext|>
89f75dbb65eed50a8b37f9edf30e1271f725131db1aed8cca26705abb9eddca2
def __init__(self, config_entry: ConfigEntry, coordinator: YoLinkCoordinator, description: YoLinkSwitchEntityDescription, device: YoLinkDevice) -> None: 'Init YoLink Outlet.' super().__init__(coordinator, device) self.config_entry = config_entry self.entity_description = description self._attr_unique_id = f'{device.device_id} {self.entity_description.key}' self._attr_name = f'{device.device_name} ({self.entity_description.name})'
Init YoLink Outlet.
homeassistant/components/yolink/switch.py
__init__
RoboGnome/core
2
python
def __init__(self, config_entry: ConfigEntry, coordinator: YoLinkCoordinator, description: YoLinkSwitchEntityDescription, device: YoLinkDevice) -> None: super().__init__(coordinator, device) self.config_entry = config_entry self.entity_description = description self._attr_unique_id = f'{device.device_id} {self.entity_description.key}' self._attr_name = f'{device.device_name} ({self.entity_description.name})'
def __init__(self, config_entry: ConfigEntry, coordinator: YoLinkCoordinator, description: YoLinkSwitchEntityDescription, device: YoLinkDevice) -> None: super().__init__(coordinator, device) self.config_entry = config_entry self.entity_description = description self._attr_unique_id = f'{device.device_id} {self.entity_description.key}' self._attr_name = f'{device.device_name} ({self.entity_description.name})'<|docstring|>Init YoLink Outlet.<|endoftext|>
a93b9dd44bcf6bd452339eed7fa06eb6227baac7b8ef35acd2eab4edfb6aa6ef
@callback def update_entity_state(self, state: dict) -> None: 'Update HA Entity State.' self._attr_is_on = self.entity_description.value(state[self.entity_description.key]) self.async_write_ha_state()
Update HA Entity State.
homeassistant/components/yolink/switch.py
update_entity_state
RoboGnome/core
2
python
@callback def update_entity_state(self, state: dict) -> None: self._attr_is_on = self.entity_description.value(state[self.entity_description.key]) self.async_write_ha_state()
@callback def update_entity_state(self, state: dict) -> None: self._attr_is_on = self.entity_description.value(state[self.entity_description.key]) self.async_write_ha_state()<|docstring|>Update HA Entity State.<|endoftext|>
6c69a13f2efa8c51894a2fa8f84b986f6cf344beb96b376adadc16de4d58c6b3
async def call_state_change(self, state: str) -> None: 'Call setState api to change outlet state.' try: (await self.device.call_device_http_api('setState', {'state': state})) except YoLinkAuthFailError as yl_auth_err: self.config_entry.async_start_reauth(self.hass) raise HomeAssistantError(yl_auth_err) from yl_auth_err except YoLinkClientError as yl_client_err: self.coordinator.last_update_success = False raise HomeAssistantError(yl_client_err) from yl_client_err self._attr_is_on = self.entity_description.value(state) self.async_write_ha_state()
Call setState api to change outlet state.
homeassistant/components/yolink/switch.py
call_state_change
RoboGnome/core
2
python
async def call_state_change(self, state: str) -> None: try: (await self.device.call_device_http_api('setState', {'state': state})) except YoLinkAuthFailError as yl_auth_err: self.config_entry.async_start_reauth(self.hass) raise HomeAssistantError(yl_auth_err) from yl_auth_err except YoLinkClientError as yl_client_err: self.coordinator.last_update_success = False raise HomeAssistantError(yl_client_err) from yl_client_err self._attr_is_on = self.entity_description.value(state) self.async_write_ha_state()
async def call_state_change(self, state: str) -> None: try: (await self.device.call_device_http_api('setState', {'state': state})) except YoLinkAuthFailError as yl_auth_err: self.config_entry.async_start_reauth(self.hass) raise HomeAssistantError(yl_auth_err) from yl_auth_err except YoLinkClientError as yl_client_err: self.coordinator.last_update_success = False raise HomeAssistantError(yl_client_err) from yl_client_err self._attr_is_on = self.entity_description.value(state) self.async_write_ha_state()<|docstring|>Call setState api to change outlet state.<|endoftext|>
b30c172aaa80c4bafcb53ecc4b4d1f4ceff1939d47c78b53bb84f7f69d82d544
async def async_turn_on(self, **kwargs: Any) -> None: 'Turn the entity on.' (await self.call_state_change('open'))
Turn the entity on.
homeassistant/components/yolink/switch.py
async_turn_on
RoboGnome/core
2
python
async def async_turn_on(self, **kwargs: Any) -> None: (await self.call_state_change('open'))
async def async_turn_on(self, **kwargs: Any) -> None: (await self.call_state_change('open'))<|docstring|>Turn the entity on.<|endoftext|>
0229d6ab0ce83e1a2bbb62600bf6c5c76219444bf28e60820438efdd67b29d56
async def async_turn_off(self, **kwargs: Any) -> None: 'Turn the entity off.' (await self.call_state_change('close'))
Turn the entity off.
homeassistant/components/yolink/switch.py
async_turn_off
RoboGnome/core
2
python
async def async_turn_off(self, **kwargs: Any) -> None: (await self.call_state_change('close'))
async def async_turn_off(self, **kwargs: Any) -> None: (await self.call_state_change('close'))<|docstring|>Turn the entity off.<|endoftext|>
6511bc91356a24df9bba631f8744386a34bfa44578145350c2aeeb55ccb239fb
@commands.group() async def stats(self, ctx): 'Display various Destiny 2 character stats' if (ctx.invoked_subcommand is None): cmd = self.bot.get_command('help') (await ctx.invoke(cmd, 'stats'))
Display various Destiny 2 character stats
cogs/stats.py
stats
LSDicky/Destiny-2
1
python
@commands.group() async def stats(self, ctx): if (ctx.invoked_subcommand is None): cmd = self.bot.get_command('help') (await ctx.invoke(cmd, 'stats'))
@commands.group() async def stats(self, ctx): if (ctx.invoked_subcommand is None): cmd = self.bot.get_command('help') (await ctx.invoke(cmd, 'stats'))<|docstring|>Display various Destiny 2 character stats<|endoftext|>
cde90fd3c1768f80c0f82a191c82187d76701d21edfd020055fadfb3a8c00f0f
@stats.command() async def pvp(self, ctx): 'Display Crucible stats for all characters on an account' manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message]) (await ctx.channel.trigger_typing()) info = self.bot.db.get_d2_info(ctx.author.id) if info: platform = info.get('platform') membership_id = info.get('membership_id') else: (await manager.say(('You must first register your Destiny 2 account with the ' + '`{}register` command.'.format(ctx.prefix)))) return (await manager.clear()) try: res = (await self.destiny.api.get_profile(platform, membership_id, ['Profiles'])) except: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) display_name = res['Response']['profile']['data']['userInfo']['displayName'] try: res = (await self.destiny.api.get_historical_stats(platform, membership_id, modes=[5])) except: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) pvp_stats = res['Response']['allPvP'].get('allTime') if (not len(pvp_stats)): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) time_played = pvp_stats['secondsPlayed']['basic']['displayValue'] kdr = pvp_stats['killsDeathsRatio']['basic']['displayValue'] kda = pvp_stats['killsDeathsAssists']['basic']['displayValue'] best_weapon = pvp_stats['weaponBestType']['basic']['displayValue'] games_played = pvp_stats['activitiesEntered']['basic']['displayValue'] best_kills = pvp_stats['bestSingleGameKills']['basic']['displayValue'] best_spree = pvp_stats['longestKillSpree']['basic']['displayValue'] combat_rating = pvp_stats['combatRating']['basic']['displayValue'] kills = pvp_stats['kills']['basic']['displayValue'] assists = pvp_stats['assists']['basic']['displayValue'] deaths = pvp_stats['deaths']['basic']['displayValue'] win_ratio = pvp_stats['winLossRatio']['basic']['displayValue'] if (win_ratio != '-'): win_ratio = float(win_ratio) win_rate = (str(round(((win_ratio / (win_ratio + 1)) * 100), 1)) + ' %') else: win_rate = win_ratio e = discord.Embed(colour=constants.BLUE) e.set_author(name='{} | Crucible Stats'.format(display_name), icon_url=constants.PLATFORM_URLS.get(platform)) e.add_field(name='Kills', value=kills, inline=True) e.add_field(name='Assists', value=assists, inline=True) e.add_field(name='Deaths', value=deaths, inline=True) e.add_field(name='KD Ratio', value=kdr, inline=True) e.add_field(name='Efficiency (KAD)', value=kda, inline=True) e.add_field(name='Win Rate', value=win_rate, inline=True) e.add_field(name='Best Spree', value=best_spree, inline=True) e.add_field(name='Most Kills in a Game', value=best_kills, inline=True) e.add_field(name='Favorite Weapon', value=best_weapon, inline=True) e.add_field(name='Combat Rating', value=combat_rating, inline=True) e.add_field(name='Games Played', value=games_played, inline=True) e.add_field(name='Time Played', value=time_played, inline=True) (await manager.say(e, embed=True, delete=False)) (await manager.clear())
Display Crucible stats for all characters on an account
cogs/stats.py
pvp
LSDicky/Destiny-2
1
python
@stats.command() async def pvp(self, ctx): manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message]) (await ctx.channel.trigger_typing()) info = self.bot.db.get_d2_info(ctx.author.id) if info: platform = info.get('platform') membership_id = info.get('membership_id') else: (await manager.say(('You must first register your Destiny 2 account with the ' + '`{}register` command.'.format(ctx.prefix)))) return (await manager.clear()) try: res = (await self.destiny.api.get_profile(platform, membership_id, ['Profiles'])) except: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) display_name = res['Response']['profile']['data']['userInfo']['displayName'] try: res = (await self.destiny.api.get_historical_stats(platform, membership_id, modes=[5])) except: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) pvp_stats = res['Response']['allPvP'].get('allTime') if (not len(pvp_stats)): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) time_played = pvp_stats['secondsPlayed']['basic']['displayValue'] kdr = pvp_stats['killsDeathsRatio']['basic']['displayValue'] kda = pvp_stats['killsDeathsAssists']['basic']['displayValue'] best_weapon = pvp_stats['weaponBestType']['basic']['displayValue'] games_played = pvp_stats['activitiesEntered']['basic']['displayValue'] best_kills = pvp_stats['bestSingleGameKills']['basic']['displayValue'] best_spree = pvp_stats['longestKillSpree']['basic']['displayValue'] combat_rating = pvp_stats['combatRating']['basic']['displayValue'] kills = pvp_stats['kills']['basic']['displayValue'] assists = pvp_stats['assists']['basic']['displayValue'] deaths = pvp_stats['deaths']['basic']['displayValue'] win_ratio = pvp_stats['winLossRatio']['basic']['displayValue'] if (win_ratio != '-'): win_ratio = float(win_ratio) win_rate = (str(round(((win_ratio / (win_ratio + 1)) * 100), 1)) + ' %') else: win_rate = win_ratio e = discord.Embed(colour=constants.BLUE) e.set_author(name='{} | Crucible Stats'.format(display_name), icon_url=constants.PLATFORM_URLS.get(platform)) e.add_field(name='Kills', value=kills, inline=True) e.add_field(name='Assists', value=assists, inline=True) e.add_field(name='Deaths', value=deaths, inline=True) e.add_field(name='KD Ratio', value=kdr, inline=True) e.add_field(name='Efficiency (KAD)', value=kda, inline=True) e.add_field(name='Win Rate', value=win_rate, inline=True) e.add_field(name='Best Spree', value=best_spree, inline=True) e.add_field(name='Most Kills in a Game', value=best_kills, inline=True) e.add_field(name='Favorite Weapon', value=best_weapon, inline=True) e.add_field(name='Combat Rating', value=combat_rating, inline=True) e.add_field(name='Games Played', value=games_played, inline=True) e.add_field(name='Time Played', value=time_played, inline=True) (await manager.say(e, embed=True, delete=False)) (await manager.clear())
@stats.command() async def pvp(self, ctx): manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message]) (await ctx.channel.trigger_typing()) info = self.bot.db.get_d2_info(ctx.author.id) if info: platform = info.get('platform') membership_id = info.get('membership_id') else: (await manager.say(('You must first register your Destiny 2 account with the ' + '`{}register` command.'.format(ctx.prefix)))) return (await manager.clear()) try: res = (await self.destiny.api.get_profile(platform, membership_id, ['Profiles'])) except: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) display_name = res['Response']['profile']['data']['userInfo']['displayName'] try: res = (await self.destiny.api.get_historical_stats(platform, membership_id, modes=[5])) except: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) pvp_stats = res['Response']['allPvP'].get('allTime') if (not len(pvp_stats)): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) time_played = pvp_stats['secondsPlayed']['basic']['displayValue'] kdr = pvp_stats['killsDeathsRatio']['basic']['displayValue'] kda = pvp_stats['killsDeathsAssists']['basic']['displayValue'] best_weapon = pvp_stats['weaponBestType']['basic']['displayValue'] games_played = pvp_stats['activitiesEntered']['basic']['displayValue'] best_kills = pvp_stats['bestSingleGameKills']['basic']['displayValue'] best_spree = pvp_stats['longestKillSpree']['basic']['displayValue'] combat_rating = pvp_stats['combatRating']['basic']['displayValue'] kills = pvp_stats['kills']['basic']['displayValue'] assists = pvp_stats['assists']['basic']['displayValue'] deaths = pvp_stats['deaths']['basic']['displayValue'] win_ratio = pvp_stats['winLossRatio']['basic']['displayValue'] if (win_ratio != '-'): win_ratio = float(win_ratio) win_rate = (str(round(((win_ratio / (win_ratio + 1)) * 100), 1)) + ' %') else: win_rate = win_ratio e = discord.Embed(colour=constants.BLUE) e.set_author(name='{} | Crucible Stats'.format(display_name), icon_url=constants.PLATFORM_URLS.get(platform)) e.add_field(name='Kills', value=kills, inline=True) e.add_field(name='Assists', value=assists, inline=True) e.add_field(name='Deaths', value=deaths, inline=True) e.add_field(name='KD Ratio', value=kdr, inline=True) e.add_field(name='Efficiency (KAD)', value=kda, inline=True) e.add_field(name='Win Rate', value=win_rate, inline=True) e.add_field(name='Best Spree', value=best_spree, inline=True) e.add_field(name='Most Kills in a Game', value=best_kills, inline=True) e.add_field(name='Favorite Weapon', value=best_weapon, inline=True) e.add_field(name='Combat Rating', value=combat_rating, inline=True) e.add_field(name='Games Played', value=games_played, inline=True) e.add_field(name='Time Played', value=time_played, inline=True) (await manager.say(e, embed=True, delete=False)) (await manager.clear())<|docstring|>Display Crucible stats for all characters on an account<|endoftext|>
24e3b074e5a1a99eaa5c8c6c95a16ee90a1b20c7451472cbeedacb498dd7875e
@stats.command() async def pve(self, ctx): 'Display PvE stats for all characters on an account' manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message]) (await ctx.channel.trigger_typing()) info = self.bot.db.get_d2_info(ctx.author.id) if info: platform = info.get('platform') membership_id = info.get('membership_id') else: (await manager.say(('You must first register your Destiny 2 account with the ' + '`{}register` command.'.format(ctx.prefix)))) return (await manager.clear()) try: res = (await self.destiny.api.get_profile(platform, membership_id, ['Profiles'])) except pydest.PydestException as e: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) display_name = res['Response']['profile']['data']['userInfo']['displayName'] try: res = (await self.destiny.api.get_historical_stats(platform, membership_id, modes=[7, 4, 16, 18])) except pydest.PydestException as e: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) pve_stats = res['Response'] time_played = (pve_stats['allPvE']['allTime']['totalActivityDurationSeconds']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) best_weapon = (pve_stats['allPvE']['allTime']['weaponBestType']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_heroic_events = (pve_stats['allPvE']['allTime']['heroicPublicEventsCompleted']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_events = (pve_stats['allPvE']['allTime']['publicEventsCompleted']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_raids = (pve_stats['raid']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['raid']) else 0) raid_time = (pve_stats['raid']['allTime']['totalActivityDurationSeconds']['basic']['displayValue'] if len(pve_stats['raid']) else 0) num_nightfall = (pve_stats['nightfall']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['nightfall']) else 0) num_strikes = (pve_stats['allStrikes']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['allStrikes']) else 0) fastest_nightfall = (pve_stats['nightfall']['allTime']['fastestCompletionMs']['basic']['displayValue'] if len(pve_stats['nightfall']) else 0) kills = (pve_stats['allPvE']['allTime']['kills']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) assists = (pve_stats['allPvE']['allTime']['assists']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) deaths = (pve_stats['allPvE']['allTime']['deaths']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) e = discord.Embed(colour=constants.BLUE) e.set_author(name='{} | PvE Stats'.format(display_name), icon_url=constants.PLATFORM_URLS.get(platform)) e.add_field(name='Kills', value=kills, inline=True) e.add_field(name='Assists', value=assists, inline=True) e.add_field(name='Deaths', value=deaths, inline=True) e.add_field(name='Strikes', value=num_strikes, inline=True) e.add_field(name='Nightfalls', value=num_nightfall, inline=True) e.add_field(name='Fastest Nightfall', value=fastest_nightfall, inline=True) e.add_field(name='Public Events', value=num_events, inline=True) e.add_field(name='Heroic Public Events', value=num_heroic_events, inline=True) e.add_field(name='Favorite Weapon', value=best_weapon, inline=True) e.add_field(name='Total Raid Time', value=raid_time, inline=True) e.add_field(name='Raids', value=num_raids, inline=True) e.add_field(name='Time Played', value=time_played, inline=True) (await manager.say(e, embed=True, delete=False)) (await manager.clear())
Display PvE stats for all characters on an account
cogs/stats.py
pve
LSDicky/Destiny-2
1
python
@stats.command() async def pve(self, ctx): manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message]) (await ctx.channel.trigger_typing()) info = self.bot.db.get_d2_info(ctx.author.id) if info: platform = info.get('platform') membership_id = info.get('membership_id') else: (await manager.say(('You must first register your Destiny 2 account with the ' + '`{}register` command.'.format(ctx.prefix)))) return (await manager.clear()) try: res = (await self.destiny.api.get_profile(platform, membership_id, ['Profiles'])) except pydest.PydestException as e: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) display_name = res['Response']['profile']['data']['userInfo']['displayName'] try: res = (await self.destiny.api.get_historical_stats(platform, membership_id, modes=[7, 4, 16, 18])) except pydest.PydestException as e: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) pve_stats = res['Response'] time_played = (pve_stats['allPvE']['allTime']['totalActivityDurationSeconds']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) best_weapon = (pve_stats['allPvE']['allTime']['weaponBestType']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_heroic_events = (pve_stats['allPvE']['allTime']['heroicPublicEventsCompleted']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_events = (pve_stats['allPvE']['allTime']['publicEventsCompleted']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_raids = (pve_stats['raid']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['raid']) else 0) raid_time = (pve_stats['raid']['allTime']['totalActivityDurationSeconds']['basic']['displayValue'] if len(pve_stats['raid']) else 0) num_nightfall = (pve_stats['nightfall']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['nightfall']) else 0) num_strikes = (pve_stats['allStrikes']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['allStrikes']) else 0) fastest_nightfall = (pve_stats['nightfall']['allTime']['fastestCompletionMs']['basic']['displayValue'] if len(pve_stats['nightfall']) else 0) kills = (pve_stats['allPvE']['allTime']['kills']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) assists = (pve_stats['allPvE']['allTime']['assists']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) deaths = (pve_stats['allPvE']['allTime']['deaths']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) e = discord.Embed(colour=constants.BLUE) e.set_author(name='{} | PvE Stats'.format(display_name), icon_url=constants.PLATFORM_URLS.get(platform)) e.add_field(name='Kills', value=kills, inline=True) e.add_field(name='Assists', value=assists, inline=True) e.add_field(name='Deaths', value=deaths, inline=True) e.add_field(name='Strikes', value=num_strikes, inline=True) e.add_field(name='Nightfalls', value=num_nightfall, inline=True) e.add_field(name='Fastest Nightfall', value=fastest_nightfall, inline=True) e.add_field(name='Public Events', value=num_events, inline=True) e.add_field(name='Heroic Public Events', value=num_heroic_events, inline=True) e.add_field(name='Favorite Weapon', value=best_weapon, inline=True) e.add_field(name='Total Raid Time', value=raid_time, inline=True) e.add_field(name='Raids', value=num_raids, inline=True) e.add_field(name='Time Played', value=time_played, inline=True) (await manager.say(e, embed=True, delete=False)) (await manager.clear())
@stats.command() async def pve(self, ctx): manager = MessageManager(self.bot, ctx.author, ctx.channel, ctx.prefix, [ctx.message]) (await ctx.channel.trigger_typing()) info = self.bot.db.get_d2_info(ctx.author.id) if info: platform = info.get('platform') membership_id = info.get('membership_id') else: (await manager.say(('You must first register your Destiny 2 account with the ' + '`{}register` command.'.format(ctx.prefix)))) return (await manager.clear()) try: res = (await self.destiny.api.get_profile(platform, membership_id, ['Profiles'])) except pydest.PydestException as e: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) display_name = res['Response']['profile']['data']['userInfo']['displayName'] try: res = (await self.destiny.api.get_historical_stats(platform, membership_id, modes=[7, 4, 16, 18])) except pydest.PydestException as e: (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) if (res['ErrorCode'] != 1): (await manager.say("Sorry, I can't seem to retrieve your stats right now")) return (await manager.clear()) pve_stats = res['Response'] time_played = (pve_stats['allPvE']['allTime']['totalActivityDurationSeconds']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) best_weapon = (pve_stats['allPvE']['allTime']['weaponBestType']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_heroic_events = (pve_stats['allPvE']['allTime']['heroicPublicEventsCompleted']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_events = (pve_stats['allPvE']['allTime']['publicEventsCompleted']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) num_raids = (pve_stats['raid']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['raid']) else 0) raid_time = (pve_stats['raid']['allTime']['totalActivityDurationSeconds']['basic']['displayValue'] if len(pve_stats['raid']) else 0) num_nightfall = (pve_stats['nightfall']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['nightfall']) else 0) num_strikes = (pve_stats['allStrikes']['allTime']['activitiesCleared']['basic']['displayValue'] if len(pve_stats['allStrikes']) else 0) fastest_nightfall = (pve_stats['nightfall']['allTime']['fastestCompletionMs']['basic']['displayValue'] if len(pve_stats['nightfall']) else 0) kills = (pve_stats['allPvE']['allTime']['kills']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) assists = (pve_stats['allPvE']['allTime']['assists']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) deaths = (pve_stats['allPvE']['allTime']['deaths']['basic']['displayValue'] if len(pve_stats['allPvE']) else 0) e = discord.Embed(colour=constants.BLUE) e.set_author(name='{} | PvE Stats'.format(display_name), icon_url=constants.PLATFORM_URLS.get(platform)) e.add_field(name='Kills', value=kills, inline=True) e.add_field(name='Assists', value=assists, inline=True) e.add_field(name='Deaths', value=deaths, inline=True) e.add_field(name='Strikes', value=num_strikes, inline=True) e.add_field(name='Nightfalls', value=num_nightfall, inline=True) e.add_field(name='Fastest Nightfall', value=fastest_nightfall, inline=True) e.add_field(name='Public Events', value=num_events, inline=True) e.add_field(name='Heroic Public Events', value=num_heroic_events, inline=True) e.add_field(name='Favorite Weapon', value=best_weapon, inline=True) e.add_field(name='Total Raid Time', value=raid_time, inline=True) e.add_field(name='Raids', value=num_raids, inline=True) e.add_field(name='Time Played', value=time_played, inline=True) (await manager.say(e, embed=True, delete=False)) (await manager.clear())<|docstring|>Display PvE stats for all characters on an account<|endoftext|>
5027487a71777a8c52b01a7d6a34bf92d589ca790859f198810eb5f0781b9b03
@np.vectorize def _rerange(utils, offset=OFFSET): 'Re-range utilities so they are in the open interval (0,1)' return ((utils * (1.0 - (2.0 * offset))) + offset)
Re-range utilities so they are in the open interval (0,1)
decisiorama/pda_gpu/aggregate.py
_rerange
j-chacon/Hartmann_contaminants
0
python
@np.vectorize def _rerange(utils, offset=OFFSET): return ((utils * (1.0 - (2.0 * offset))) + offset)
@np.vectorize def _rerange(utils, offset=OFFSET): return ((utils * (1.0 - (2.0 * offset))) + offset)<|docstring|>Re-range utilities so they are in the open interval (0,1)<|endoftext|>
f75019e1a04780e62885c083aac8d7a8aafc3379747c4c072d24a81e4ad37a4e
def _dimcheck(utils, w): 'Check the dimension consistency of inputs and weights' if (not (utils.ndim == 2)): msg = 'The dimensions of utils have to be (1, ) or (2, ) got {0}'.format(utils.ndim) raise ValueError(msg) if (w is None): w = (np.ones(utils.shape[1]) / utils.shape[1]) elif callable(w[0]): w = np.array([wi() for wi in w]) w = (w / np.sum(w, axis=0)) if (w.ndim == 1): if (utils.shape[1] != w.shape[0]): msg = 'Weights and solutions do not match. The shape of solutions is {0} and of weights is {1}'.format(utils.shape[1], w.shape) raise ValueError(msg) elif (w.ndim == 2): if (utils.shape != w.shape): msg = 'Weights and solutions do not match. The shape of solutions is {0} and of weights is {1}'.format(utils.shape, w.shape) raise ValueError(msg)
Check the dimension consistency of inputs and weights
decisiorama/pda_gpu/aggregate.py
_dimcheck
j-chacon/Hartmann_contaminants
0
python
def _dimcheck(utils, w): if (not (utils.ndim == 2)): msg = 'The dimensions of utils have to be (1, ) or (2, ) got {0}'.format(utils.ndim) raise ValueError(msg) if (w is None): w = (np.ones(utils.shape[1]) / utils.shape[1]) elif callable(w[0]): w = np.array([wi() for wi in w]) w = (w / np.sum(w, axis=0)) if (w.ndim == 1): if (utils.shape[1] != w.shape[0]): msg = 'Weights and solutions do not match. The shape of solutions is {0} and of weights is {1}'.format(utils.shape[1], w.shape) raise ValueError(msg) elif (w.ndim == 2): if (utils.shape != w.shape): msg = 'Weights and solutions do not match. The shape of solutions is {0} and of weights is {1}'.format(utils.shape, w.shape) raise ValueError(msg)
def _dimcheck(utils, w): if (not (utils.ndim == 2)): msg = 'The dimensions of utils have to be (1, ) or (2, ) got {0}'.format(utils.ndim) raise ValueError(msg) if (w is None): w = (np.ones(utils.shape[1]) / utils.shape[1]) elif callable(w[0]): w = np.array([wi() for wi in w]) w = (w / np.sum(w, axis=0)) if (w.ndim == 1): if (utils.shape[1] != w.shape[0]): msg = 'Weights and solutions do not match. The shape of solutions is {0} and of weights is {1}'.format(utils.shape[1], w.shape) raise ValueError(msg) elif (w.ndim == 2): if (utils.shape != w.shape): msg = 'Weights and solutions do not match. The shape of solutions is {0} and of weights is {1}'.format(utils.shape, w.shape) raise ValueError(msg)<|docstring|>Check the dimension consistency of inputs and weights<|endoftext|>
d62efefa81266687a2e09c34d570e288b5fa351edd1b6844846f6c9870ec20c5
def _w_normalize(w): 'Normalise the weights so the um is equal to 1' if (w.ndim == 1): w[:] = (w / np.sum(w, axis=0)) else: _w_sum = np.sum(w, axis=1) for i in range(w.shape[1]): w[(:, i)] = (w[(:, i)] / _w_sum)
Normalise the weights so the um is equal to 1
decisiorama/pda_gpu/aggregate.py
_w_normalize
j-chacon/Hartmann_contaminants
0
python
def _w_normalize(w): if (w.ndim == 1): w[:] = (w / np.sum(w, axis=0)) else: _w_sum = np.sum(w, axis=1) for i in range(w.shape[1]): w[(:, i)] = (w[(:, i)] / _w_sum)
def _w_normalize(w): if (w.ndim == 1): w[:] = (w / np.sum(w, axis=0)) else: _w_sum = np.sum(w, axis=1) for i in range(w.shape[1]): w[(:, i)] = (w[(:, i)] / _w_sum)<|docstring|>Normalise the weights so the um is equal to 1<|endoftext|>
48b550de4a46ff1e5d0a3500d3a08870bd6f37c4ad5363eccc4a6a430f4cf55c
def additive(utils, w, w_norm=True, *args, **kwargs): 'Additive utility aggregation function\n\n Aggregate preferences using a weighted average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(additive(s,w))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(additive(s,w))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(additive(s,w))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if (w.shape == utils.shape): out = np.sum((utils * w), axis=1) else: out = np.dot(utils, w) return out
Additive utility aggregation function Aggregate preferences using a weighted average Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(additive(s,w)) >>> [0.2] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(additive(s,w)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(additive(s,w)) >>> [0.2 0.8 0.5]
decisiorama/pda_gpu/aggregate.py
additive
j-chacon/Hartmann_contaminants
0
python
def additive(utils, w, w_norm=True, *args, **kwargs): 'Additive utility aggregation function\n\n Aggregate preferences using a weighted average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(additive(s,w))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(additive(s,w))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(additive(s,w))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if (w.shape == utils.shape): out = np.sum((utils * w), axis=1) else: out = np.dot(utils, w) return out
def additive(utils, w, w_norm=True, *args, **kwargs): 'Additive utility aggregation function\n\n Aggregate preferences using a weighted average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(additive(s,w))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(additive(s,w))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(additive(s,w))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if (w.shape == utils.shape): out = np.sum((utils * w), axis=1) else: out = np.dot(utils, w) return out<|docstring|>Additive utility aggregation function Aggregate preferences using a weighted average Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(additive(s,w)) >>> [0.2] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(additive(s,w)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(additive(s,w)) >>> [0.2 0.8 0.5]<|endoftext|>
648e8e6d34ca6df98a95155fb7e8520456eec5aab967098e42c63c7185261b35
def cobb_douglas(utils, w, w_norm=True, *args, **kwargs): 'Cobb-Douglas utility aggregation function\n\n Aggregate preferences using the cobb-douglas aggregation function. This\n method is also known as the weighted geometric average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(cobb_douglas(utils, w))\n\n >>> [0.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(cobb_douglas(utils, w))\n\n >>> [0. 0. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(cobb_douglas(utils, w))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) return np.prod((utils ** w), axis=1)
Cobb-Douglas utility aggregation function Aggregate preferences using the cobb-douglas aggregation function. This method is also known as the weighted geometric average Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(cobb_douglas(utils, w)) >>> [0.] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(cobb_douglas(utils, w)) >>> [0. 0. 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(cobb_douglas(utils, w)) >>> [0. 0. 0.5]
decisiorama/pda_gpu/aggregate.py
cobb_douglas
j-chacon/Hartmann_contaminants
0
python
def cobb_douglas(utils, w, w_norm=True, *args, **kwargs): 'Cobb-Douglas utility aggregation function\n\n Aggregate preferences using the cobb-douglas aggregation function. This\n method is also known as the weighted geometric average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(cobb_douglas(utils, w))\n\n >>> [0.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(cobb_douglas(utils, w))\n\n >>> [0. 0. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(cobb_douglas(utils, w))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) return np.prod((utils ** w), axis=1)
def cobb_douglas(utils, w, w_norm=True, *args, **kwargs): 'Cobb-Douglas utility aggregation function\n\n Aggregate preferences using the cobb-douglas aggregation function. This\n method is also known as the weighted geometric average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(cobb_douglas(utils, w))\n\n >>> [0.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(cobb_douglas(utils, w))\n\n >>> [0. 0. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(cobb_douglas(utils, w))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) return np.prod((utils ** w), axis=1)<|docstring|>Cobb-Douglas utility aggregation function Aggregate preferences using the cobb-douglas aggregation function. This method is also known as the weighted geometric average Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(cobb_douglas(utils, w)) >>> [0.] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(cobb_douglas(utils, w)) >>> [0. 0. 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(cobb_douglas(utils, w)) >>> [0. 0. 0.5]<|endoftext|>
8526e6677fe179053e9cf1cd371505fd6ad209da5809c543e4d09d4cc84c32d3
def mix_linear_cobb(utils, w, pars=[0.5], w_norm=True, *args, **kwargs): 'to be deprecated' if callable(pars[0]): alpha = pars[0]() else: alpha = pars[0] add_model = additive(utils, w, w_norm) cd_model = cobb_douglas(utils, w, w_norm) return ((alpha * add_model) + ((1.0 - alpha) * cd_model))
to be deprecated
decisiorama/pda_gpu/aggregate.py
mix_linear_cobb
j-chacon/Hartmann_contaminants
0
python
def mix_linear_cobb(utils, w, pars=[0.5], w_norm=True, *args, **kwargs): if callable(pars[0]): alpha = pars[0]() else: alpha = pars[0] add_model = additive(utils, w, w_norm) cd_model = cobb_douglas(utils, w, w_norm) return ((alpha * add_model) + ((1.0 - alpha) * cd_model))
def mix_linear_cobb(utils, w, pars=[0.5], w_norm=True, *args, **kwargs): if callable(pars[0]): alpha = pars[0]() else: alpha = pars[0] add_model = additive(utils, w, w_norm) cd_model = cobb_douglas(utils, w, w_norm) return ((alpha * add_model) + ((1.0 - alpha) * cd_model))<|docstring|>to be deprecated<|endoftext|>
7f51bd818d50827662109d6c706029ec6087fc7e21b6133394a3bb803d7cd8b1
def reverse_harmonic(utils, w, w_norm=True, *args, **kwargs): 'Reverse harmonic utility aggregation function\n\n Aggregate preferences using the cobb-douglas aggregation function. This\n method is also known as the weighted geometric average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(reverse_harmonic(utils, w))\n\n >>> [1.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(reverse_harmonic(utils, w))\n\n >>> [1. 1. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(reverse_harmonic(utils, w))\n\n >>> [1. 1. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) return (1.0 - (1.0 / np.sum((w / (1.0 - utils)), axis=1)))
Reverse harmonic utility aggregation function Aggregate preferences using the cobb-douglas aggregation function. This method is also known as the weighted geometric average Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(reverse_harmonic(utils, w)) >>> [1.] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(reverse_harmonic(utils, w)) >>> [1. 1. 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(reverse_harmonic(utils, w)) >>> [1. 1. 0.5]
decisiorama/pda_gpu/aggregate.py
reverse_harmonic
j-chacon/Hartmann_contaminants
0
python
def reverse_harmonic(utils, w, w_norm=True, *args, **kwargs): 'Reverse harmonic utility aggregation function\n\n Aggregate preferences using the cobb-douglas aggregation function. This\n method is also known as the weighted geometric average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(reverse_harmonic(utils, w))\n\n >>> [1.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(reverse_harmonic(utils, w))\n\n >>> [1. 1. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(reverse_harmonic(utils, w))\n\n >>> [1. 1. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) return (1.0 - (1.0 / np.sum((w / (1.0 - utils)), axis=1)))
def reverse_harmonic(utils, w, w_norm=True, *args, **kwargs): 'Reverse harmonic utility aggregation function\n\n Aggregate preferences using the cobb-douglas aggregation function. This\n method is also known as the weighted geometric average\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(reverse_harmonic(utils, w))\n\n >>> [1.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(reverse_harmonic(utils, w))\n\n >>> [1. 1. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(reverse_harmonic(utils, w))\n\n >>> [1. 1. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) return (1.0 - (1.0 / np.sum((w / (1.0 - utils)), axis=1)))<|docstring|>Reverse harmonic utility aggregation function Aggregate preferences using the cobb-douglas aggregation function. This method is also known as the weighted geometric average Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(reverse_harmonic(utils, w)) >>> [1.] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(reverse_harmonic(utils, w)) >>> [1. 1. 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(reverse_harmonic(utils, w)) >>> [1. 1. 0.5]<|endoftext|>
a06c6ef72e47a002938fc78f75cc21f4e6fc939451578428ef02e477e1b50ee3
def reverse_power(utils, w, alpha, w_norm=True, *args, **kwargs): 'Reverse power utility aggregation function\n\n Aggregate preferences using the reverse power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional, default True\n If True, the sum of the weights will be equal to 1\n alpha : float, ndarray [n], default 1.0\n power coefficient. If passed as a float, the values will remain the\n same over the whole computation. Otherwise, it is possible to pass a\n vector with a value for each random sample\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n alpha = 1.0\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n alpha = np.array([1.0, 1.0, 1.0])\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = np.array([1.0, 1.0,1.0])\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if (type(alpha) is np.ndarray): if (alpha.ndim == 1): alpha = np.tile(alpha, (utils.shape[1], 1)).T out = (1.0 - np.power(np.sum(np.power((w * (1.0 - utils)), alpha), axis=1), (1.0 / alpha[(:, 0)]))) else: _msg = 'alpha has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: out = (1.0 - np.power(np.sum(np.power((w * (1.0 - utils)), alpha), axis=1), (1.0 / alpha))) return out
Reverse power utility aggregation function Aggregate preferences using the reverse power aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional, default True If True, the sum of the weights will be equal to 1 alpha : float, ndarray [n], default 1.0 power coefficient. If passed as a float, the values will remain the same over the whole computation. Otherwise, it is possible to pass a vector with a value for each random sample Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) alpha = 1.0 print(reverse_power(utils, w, alpha)) >>> [0.2] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) alpha = np.array([1.0, 1.0, 1.0]) print(reverse_power(utils, w, alpha)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) alpha = np.array([1.0, 1.0,1.0]) print(reverse_power(utils, w, alpha)) >>> [0.2 0.8 0.5]
decisiorama/pda_gpu/aggregate.py
reverse_power
j-chacon/Hartmann_contaminants
0
python
def reverse_power(utils, w, alpha, w_norm=True, *args, **kwargs): 'Reverse power utility aggregation function\n\n Aggregate preferences using the reverse power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional, default True\n If True, the sum of the weights will be equal to 1\n alpha : float, ndarray [n], default 1.0\n power coefficient. If passed as a float, the values will remain the\n same over the whole computation. Otherwise, it is possible to pass a\n vector with a value for each random sample\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n alpha = 1.0\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n alpha = np.array([1.0, 1.0, 1.0])\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = np.array([1.0, 1.0,1.0])\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if (type(alpha) is np.ndarray): if (alpha.ndim == 1): alpha = np.tile(alpha, (utils.shape[1], 1)).T out = (1.0 - np.power(np.sum(np.power((w * (1.0 - utils)), alpha), axis=1), (1.0 / alpha[(:, 0)]))) else: _msg = 'alpha has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: out = (1.0 - np.power(np.sum(np.power((w * (1.0 - utils)), alpha), axis=1), (1.0 / alpha))) return out
def reverse_power(utils, w, alpha, w_norm=True, *args, **kwargs): 'Reverse power utility aggregation function\n\n Aggregate preferences using the reverse power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional, default True\n If True, the sum of the weights will be equal to 1\n alpha : float, ndarray [n], default 1.0\n power coefficient. If passed as a float, the values will remain the\n same over the whole computation. Otherwise, it is possible to pass a\n vector with a value for each random sample\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n alpha = 1.0\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n alpha = np.array([1.0, 1.0, 1.0])\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = np.array([1.0, 1.0,1.0])\n print(reverse_power(utils, w, alpha))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if (type(alpha) is np.ndarray): if (alpha.ndim == 1): alpha = np.tile(alpha, (utils.shape[1], 1)).T out = (1.0 - np.power(np.sum(np.power((w * (1.0 - utils)), alpha), axis=1), (1.0 / alpha[(:, 0)]))) else: _msg = 'alpha has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: out = (1.0 - np.power(np.sum(np.power((w * (1.0 - utils)), alpha), axis=1), (1.0 / alpha))) return out<|docstring|>Reverse power utility aggregation function Aggregate preferences using the reverse power aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional, default True If True, the sum of the weights will be equal to 1 alpha : float, ndarray [n], default 1.0 power coefficient. If passed as a float, the values will remain the same over the whole computation. Otherwise, it is possible to pass a vector with a value for each random sample Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) alpha = 1.0 print(reverse_power(utils, w, alpha)) >>> [0.2] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) alpha = np.array([1.0, 1.0, 1.0]) print(reverse_power(utils, w, alpha)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) alpha = np.array([1.0, 1.0,1.0]) print(reverse_power(utils, w, alpha)) >>> [0.2 0.8 0.5]<|endoftext|>
9a1494d8c60e55228045ae6b190bef341ce3008ae2cc909b2783ebc6f1ee08a0
def split_power(utils, w, alpha, s, w_norm=True, *args, **kwargs): 'Split power utility aggregation function\n\n Aggregate preferences using the split power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n alpha : float, ndarray[n]\n Alpha parameter of the power function. In case a float value is used,\n it will be constant for all of the random samples\n s : float, ndarray[n]\n s parameter of the power function. In case a float value is used,\n it will be constant for all of the random samples\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n alpha = 1.0\n s = 1.0\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n alpha = np.array([1.0, 1.0, 1.0])\n s = 1.0\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = np.array([1.0, 1.0, 1.0])\n s = np.array([1.0, 1.0, 1.0])\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = 1.0\n s = np.array([1.0, 1.0, 1.0])\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) @np.vectorize def _g(u, s, alpha): if (u <= s): out = (s * ((u / s) ** alpha)) else: out = (1.0 - ((1.0 - s) * (((1.0 - u) / (1.0 - s)) ** alpha))) return out @np.vectorize def _g_inv(u, s, alpha): if (u <= s): out = (s * ((u / s) ** (1.0 / alpha))) else: out = (1.0 - ((1.0 - s) * (((1.0 - u) / (1.0 - s)) ** (1.0 / alpha)))) return out if (type(alpha) is np.ndarray): if (alpha.ndim == 1): _alpha = np.tile(alpha, (utils.shape[1], 1)).T else: _msg = 'alpha has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: _alpha = alpha if (type(s) is np.ndarray): if (s.ndim == 1): _s = np.tile(s, (utils.shape[1], 1)).T else: _msg = 's has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: _s = s out = _g_inv(np.sum((w * _g(utils, _s, _alpha)), axis=1), s, alpha) return out
Split power utility aggregation function Aggregate preferences using the split power aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` alpha : float, ndarray[n] Alpha parameter of the power function. In case a float value is used, it will be constant for all of the random samples s : float, ndarray[n] s parameter of the power function. In case a float value is used, it will be constant for all of the random samples w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) alpha = 1.0 s = 1.0 print(split_power(utils, w, alpha, s)) >>> [0.2] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) alpha = np.array([1.0, 1.0, 1.0]) s = 1.0 print(split_power(utils, w, alpha, s)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) alpha = np.array([1.0, 1.0, 1.0]) s = np.array([1.0, 1.0, 1.0]) print(split_power(utils, w, alpha, s)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) alpha = 1.0 s = np.array([1.0, 1.0, 1.0]) print(split_power(utils, w, alpha, s)) >>> [0.2 0.8 0.5]
decisiorama/pda_gpu/aggregate.py
split_power
j-chacon/Hartmann_contaminants
0
python
def split_power(utils, w, alpha, s, w_norm=True, *args, **kwargs): 'Split power utility aggregation function\n\n Aggregate preferences using the split power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n alpha : float, ndarray[n]\n Alpha parameter of the power function. In case a float value is used,\n it will be constant for all of the random samples\n s : float, ndarray[n]\n s parameter of the power function. In case a float value is used,\n it will be constant for all of the random samples\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n alpha = 1.0\n s = 1.0\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n alpha = np.array([1.0, 1.0, 1.0])\n s = 1.0\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = np.array([1.0, 1.0, 1.0])\n s = np.array([1.0, 1.0, 1.0])\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = 1.0\n s = np.array([1.0, 1.0, 1.0])\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) @np.vectorize def _g(u, s, alpha): if (u <= s): out = (s * ((u / s) ** alpha)) else: out = (1.0 - ((1.0 - s) * (((1.0 - u) / (1.0 - s)) ** alpha))) return out @np.vectorize def _g_inv(u, s, alpha): if (u <= s): out = (s * ((u / s) ** (1.0 / alpha))) else: out = (1.0 - ((1.0 - s) * (((1.0 - u) / (1.0 - s)) ** (1.0 / alpha)))) return out if (type(alpha) is np.ndarray): if (alpha.ndim == 1): _alpha = np.tile(alpha, (utils.shape[1], 1)).T else: _msg = 'alpha has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: _alpha = alpha if (type(s) is np.ndarray): if (s.ndim == 1): _s = np.tile(s, (utils.shape[1], 1)).T else: _msg = 's has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: _s = s out = _g_inv(np.sum((w * _g(utils, _s, _alpha)), axis=1), s, alpha) return out
def split_power(utils, w, alpha, s, w_norm=True, *args, **kwargs): 'Split power utility aggregation function\n\n Aggregate preferences using the split power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n alpha : float, ndarray[n]\n Alpha parameter of the power function. In case a float value is used,\n it will be constant for all of the random samples\n s : float, ndarray[n]\n s parameter of the power function. In case a float value is used,\n it will be constant for all of the random samples\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n alpha = 1.0\n s = 1.0\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n alpha = np.array([1.0, 1.0, 1.0])\n s = 1.0\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = np.array([1.0, 1.0, 1.0])\n s = np.array([1.0, 1.0, 1.0])\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n alpha = 1.0\n s = np.array([1.0, 1.0, 1.0])\n print(split_power(utils, w, alpha, s))\n\n >>> [0.2 0.8 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) @np.vectorize def _g(u, s, alpha): if (u <= s): out = (s * ((u / s) ** alpha)) else: out = (1.0 - ((1.0 - s) * (((1.0 - u) / (1.0 - s)) ** alpha))) return out @np.vectorize def _g_inv(u, s, alpha): if (u <= s): out = (s * ((u / s) ** (1.0 / alpha))) else: out = (1.0 - ((1.0 - s) * (((1.0 - u) / (1.0 - s)) ** (1.0 / alpha)))) return out if (type(alpha) is np.ndarray): if (alpha.ndim == 1): _alpha = np.tile(alpha, (utils.shape[1], 1)).T else: _msg = 'alpha has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: _alpha = alpha if (type(s) is np.ndarray): if (s.ndim == 1): _s = np.tile(s, (utils.shape[1], 1)).T else: _msg = 's has to be scalar or 1D array, got {0}'.format(alpha.ndim) raise ValueError(_msg) else: _s = s out = _g_inv(np.sum((w * _g(utils, _s, _alpha)), axis=1), s, alpha) return out<|docstring|>Split power utility aggregation function Aggregate preferences using the split power aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` alpha : float, ndarray[n] Alpha parameter of the power function. In case a float value is used, it will be constant for all of the random samples s : float, ndarray[n] s parameter of the power function. In case a float value is used, it will be constant for all of the random samples w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) alpha = 1.0 s = 1.0 print(split_power(utils, w, alpha, s)) >>> [0.2] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) alpha = np.array([1.0, 1.0, 1.0]) s = 1.0 print(split_power(utils, w, alpha, s)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) alpha = np.array([1.0, 1.0, 1.0]) s = np.array([1.0, 1.0, 1.0]) print(split_power(utils, w, alpha, s)) >>> [0.2 0.8 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) alpha = 1.0 s = np.array([1.0, 1.0, 1.0]) print(split_power(utils, w, alpha, s)) >>> [0.2 0.8 0.5]<|endoftext|>
cce7f0efe71a186d42cf3e2fc3c331decffa4ada58e833f057d86e4749e04602
def harmonic(utils, w, w_norm=True, rerange=False, *args, **kwargs): 'Harmonic utility aggregation function\n\n Aggregate preferences using the reverse power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n rerange : Bool, optional\n Changes the range of utils to be in the open interval (0,1), defined\n by the offset value (defined at a library level as OFFSET, 1e-6).\n By default is set to False.\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=True))\n\n >>> [1.24999969e-06]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=True))\n\n >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(harmonic(utils, w, rerange=True))\n\n >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01]\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0. 0. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if rerange: utils = _rerange(utils, OFFSET) return (1.0 / np.sum((w / utils), axis=1))
Harmonic utility aggregation function Aggregate preferences using the reverse power aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 rerange : Bool, optional Changes the range of utils to be in the open interval (0,1), defined by the offset value (defined at a library level as OFFSET, 1e-6). By default is set to False. Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=True)) >>> [1.24999969e-06] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=True)) >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(harmonic(utils, w, rerange=True)) >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01] utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=False)) >>> [0.] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=False)) >>> [0. 0. 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(harmonic(utils, w, rerange=False)) >>> [0. 0. 0.5]
decisiorama/pda_gpu/aggregate.py
harmonic
j-chacon/Hartmann_contaminants
0
python
def harmonic(utils, w, w_norm=True, rerange=False, *args, **kwargs): 'Harmonic utility aggregation function\n\n Aggregate preferences using the reverse power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n rerange : Bool, optional\n Changes the range of utils to be in the open interval (0,1), defined\n by the offset value (defined at a library level as OFFSET, 1e-6).\n By default is set to False.\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=True))\n\n >>> [1.24999969e-06]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=True))\n\n >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(harmonic(utils, w, rerange=True))\n\n >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01]\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0. 0. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if rerange: utils = _rerange(utils, OFFSET) return (1.0 / np.sum((w / utils), axis=1))
def harmonic(utils, w, w_norm=True, rerange=False, *args, **kwargs): 'Harmonic utility aggregation function\n\n Aggregate preferences using the reverse power aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n rerange : Bool, optional\n Changes the range of utils to be in the open interval (0,1), defined\n by the offset value (defined at a library level as OFFSET, 1e-6).\n By default is set to False.\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=True))\n\n >>> [1.24999969e-06]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=True))\n\n >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(harmonic(utils, w, rerange=True))\n\n >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01]\n\n utils = np.array([0.0, 1.0])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0.]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([0.8, 0.2])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0. 0. 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n print(harmonic(utils, w, rerange=False))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) _dimcheck(utils, w) if w_norm: _w_normalize(w) if rerange: utils = _rerange(utils, OFFSET) return (1.0 / np.sum((w / utils), axis=1))<|docstring|>Harmonic utility aggregation function Aggregate preferences using the reverse power aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` w_norm : Bool, optional If True, the sum of the weights will be equal to 1 rerange : Bool, optional Changes the range of utils to be in the open interval (0,1), defined by the offset value (defined at a library level as OFFSET, 1e-6). By default is set to False. Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=True)) >>> [1.24999969e-06] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=True)) >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(harmonic(utils, w, rerange=True)) >>>[1.24999969e-06 4.99998000e-06 5.00000000e-01] utils = np.array([0.0, 1.0]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=False)) >>> [0.] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([0.8, 0.2]) print(harmonic(utils, w, rerange=False)) >>> [0. 0. 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) print(harmonic(utils, w, rerange=False)) >>> [0. 0. 0.5]<|endoftext|>
c30883e69ae5c900604352206366ca49b4b55b9eb3ebfc5fab8c91aa5bd953e2
def maximum(utils, *args, **kwargs): 'Maximum utility aggregation function\n\n Aggregate preferences using the maximum aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n print(maximum(utils))\n\n >>> [1. 1. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) return np.max(utils, axis=1)
Maximum utility aggregation function Aggregate preferences using the maximum aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) print(maximum(utils)) >>> [1. 1. 0.5]
decisiorama/pda_gpu/aggregate.py
maximum
j-chacon/Hartmann_contaminants
0
python
def maximum(utils, *args, **kwargs): 'Maximum utility aggregation function\n\n Aggregate preferences using the maximum aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n print(maximum(utils))\n\n >>> [1. 1. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) return np.max(utils, axis=1)
def maximum(utils, *args, **kwargs): 'Maximum utility aggregation function\n\n Aggregate preferences using the maximum aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n print(maximum(utils))\n\n >>> [1. 1. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) return np.max(utils, axis=1)<|docstring|>Maximum utility aggregation function Aggregate preferences using the maximum aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) print(maximum(utils)) >>> [1. 1. 0.5]<|endoftext|>
b9ea2803ee20e5711d01c870b40f21460d52fec0c88492d6b0d74dd82f4a4b28
def minimum(utils, *args, **kwargs): 'Minimum utility aggregation function\n\n Aggregate preferences using the minimum aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n print(minimum(utils))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) return np.min(utils, axis=1)
Minimum utility aggregation function Aggregate preferences using the minimum aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) print(minimum(utils)) >>> [0. 0. 0.5]
decisiorama/pda_gpu/aggregate.py
minimum
j-chacon/Hartmann_contaminants
0
python
def minimum(utils, *args, **kwargs): 'Minimum utility aggregation function\n\n Aggregate preferences using the minimum aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n print(minimum(utils))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) return np.min(utils, axis=1)
def minimum(utils, *args, **kwargs): 'Minimum utility aggregation function\n\n Aggregate preferences using the minimum aggregation function.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n print(minimum(utils))\n\n >>> [0. 0. 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) return np.min(utils, axis=1)<|docstring|>Minimum utility aggregation function Aggregate preferences using the minimum aggregation function. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) print(minimum(utils)) >>> [0. 0. 0.5]<|endoftext|>
688c049945ba4a001444b153354344cdb13b992265abbe765ad8bb9e32fc9f08
def mix(utils, w, methods, w_methods, mix_fun, w_norm=True, methods_args=None, mix_args=None, *args, **kwargs): 'mixed utility aggregation function\n\n Aggregate preferences using a mix of aggregation functions.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n methods : list [m]\n a list of functions that will create each individual member of the\n model mixture\n w_methods : ndarray [m], [n, m]\n An array for the weights that will be used to mix each of the methods\n mix_fun : function\n Function that will be used to aggregate each of the members of the\n methods\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n w_methods = np.array([0.5, 0.5])\n mix_fun = additive\n print(mix(utils, w, methods, w_methods, mix_fun))\n\n >>> [0.1 0.4 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n split_power,]\n methods_args = [{},\n dict(alpha = 1.0, s = 1.0)]\n w_methods = np.array([0.5, 0.5])\n mix_fun = additive\n print(mix(utils, w, methods, w_methods, mix_fun,\n methods_args=methods_args))\n >>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = 1.0, s = 1.0)\n w_methods = np.array([0.5, 0.5])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n #>>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = 1.0, s = 1.0)\n w_methods = np.array([[0.5, 0.5],\n [0.5, 0.5],\n [0.5, 0.5]])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n >>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = np.array([1.0, 1.0, 1.0]), s = 1.0)\n w_methods = np.array([[0.5, 0.5],\n [0.5, 0.5],\n [0.5, 0.5]])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n >>> [0.1 0.4 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) if (w_methods.ndim == 1): _dim_w_methods = w_methods.shape[0] elif (w_methods.ndim == 2): _dim_w_methods = w_methods.shape[1] _dimcheck(utils, w) if (len(methods) != _dim_w_methods): _msg = 'length of methods ({0}) and w_methods ({1}) are not the same'.format(len(methods), len(w_methods)) raise ValueError(_msg) if w_norm: _w_normalize(w) _w_normalize(w_methods) if (methods_args is None): methods_args = ([{}] * len(methods)) if (mix_args is None): mix_args = {} agg_util = [m(utils, w, **methods_args[i]) for (i, m) in enumerate(methods)] agg_util = np.array(agg_util).T return mix_fun(agg_util, w_methods, **mix_args)
mixed utility aggregation function Aggregate preferences using a mix of aggregation functions. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` methods : list [m] a list of functions that will create each individual member of the model mixture w_methods : ndarray [m], [n, m] An array for the weights that will be used to mix each of the methods mix_fun : function Function that will be used to aggregate each of the members of the methods w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] w_methods = np.array([0.5, 0.5]) mix_fun = additive print(mix(utils, w, methods, w_methods, mix_fun)) >>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, split_power,] methods_args = [{}, dict(alpha = 1.0, s = 1.0)] w_methods = np.array([0.5, 0.5]) mix_fun = additive print(mix(utils, w, methods, w_methods, mix_fun, methods_args=methods_args)) >>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] mix_args = dict(alpha = 1.0, s = 1.0) w_methods = np.array([0.5, 0.5]) mix_fun = split_power print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args)) #>>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] mix_args = dict(alpha = 1.0, s = 1.0) w_methods = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) mix_fun = split_power print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args)) >>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] mix_args = dict(alpha = np.array([1.0, 1.0, 1.0]), s = 1.0) w_methods = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) mix_fun = split_power print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args)) >>> [0.1 0.4 0.5]
decisiorama/pda_gpu/aggregate.py
mix
j-chacon/Hartmann_contaminants
0
python
def mix(utils, w, methods, w_methods, mix_fun, w_norm=True, methods_args=None, mix_args=None, *args, **kwargs): 'mixed utility aggregation function\n\n Aggregate preferences using a mix of aggregation functions.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n methods : list [m]\n a list of functions that will create each individual member of the\n model mixture\n w_methods : ndarray [m], [n, m]\n An array for the weights that will be used to mix each of the methods\n mix_fun : function\n Function that will be used to aggregate each of the members of the\n methods\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n w_methods = np.array([0.5, 0.5])\n mix_fun = additive\n print(mix(utils, w, methods, w_methods, mix_fun))\n\n >>> [0.1 0.4 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n split_power,]\n methods_args = [{},\n dict(alpha = 1.0, s = 1.0)]\n w_methods = np.array([0.5, 0.5])\n mix_fun = additive\n print(mix(utils, w, methods, w_methods, mix_fun,\n methods_args=methods_args))\n >>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = 1.0, s = 1.0)\n w_methods = np.array([0.5, 0.5])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n #>>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = 1.0, s = 1.0)\n w_methods = np.array([[0.5, 0.5],\n [0.5, 0.5],\n [0.5, 0.5]])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n >>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = np.array([1.0, 1.0, 1.0]), s = 1.0)\n w_methods = np.array([[0.5, 0.5],\n [0.5, 0.5],\n [0.5, 0.5]])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n >>> [0.1 0.4 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) if (w_methods.ndim == 1): _dim_w_methods = w_methods.shape[0] elif (w_methods.ndim == 2): _dim_w_methods = w_methods.shape[1] _dimcheck(utils, w) if (len(methods) != _dim_w_methods): _msg = 'length of methods ({0}) and w_methods ({1}) are not the same'.format(len(methods), len(w_methods)) raise ValueError(_msg) if w_norm: _w_normalize(w) _w_normalize(w_methods) if (methods_args is None): methods_args = ([{}] * len(methods)) if (mix_args is None): mix_args = {} agg_util = [m(utils, w, **methods_args[i]) for (i, m) in enumerate(methods)] agg_util = np.array(agg_util).T return mix_fun(agg_util, w_methods, **mix_args)
def mix(utils, w, methods, w_methods, mix_fun, w_norm=True, methods_args=None, mix_args=None, *args, **kwargs): 'mixed utility aggregation function\n\n Aggregate preferences using a mix of aggregation functions.\n\n Parameters\n ----------\n utils : ndarray [n, u]\n Two-dimensional array with the provided utilities to aggregate. The\n dimensions corresponds to the number of random samples (n) and the\n number of utilities (u)\n w : ndarray [u], [n, u]\n Array with the provided weights to each of the utilities. If passed\n as a 1D-array, the same weights are used for of all the random samples.\n In case it is a 2D-array, w requires the same dimensions as `utils`\n methods : list [m]\n a list of functions that will create each individual member of the\n model mixture\n w_methods : ndarray [m], [n, m]\n An array for the weights that will be used to mix each of the methods\n mix_fun : function\n Function that will be used to aggregate each of the members of the\n methods\n w_norm : Bool, optional\n If True, the sum of the weights will be equal to 1\n\n Returns\n -------\n out : ndarray [n]\n Vector with the aggregated values\n\n Example\n -------\n .. highlight:: python\n .. code-block:: python\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n w_methods = np.array([0.5, 0.5])\n mix_fun = additive\n print(mix(utils, w, methods, w_methods, mix_fun))\n\n >>> [0.1 0.4 0.5]\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n split_power,]\n methods_args = [{},\n dict(alpha = 1.0, s = 1.0)]\n w_methods = np.array([0.5, 0.5])\n mix_fun = additive\n print(mix(utils, w, methods, w_methods, mix_fun,\n methods_args=methods_args))\n >>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = 1.0, s = 1.0)\n w_methods = np.array([0.5, 0.5])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n #>>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = 1.0, s = 1.0)\n w_methods = np.array([[0.5, 0.5],\n [0.5, 0.5],\n [0.5, 0.5]])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n >>> [0.1 0.4 0.5]\n\n\n utils = np.array([[0.0, 1.0],\n [1.0, 0.0],\n [0.5, 0.5]])\n w = np.array([[0.8, 0.2],\n [0.8, 0.2],\n [0.8, 0.2]])\n methods = [cobb_douglas,\n additive,]\n mix_args = dict(alpha = np.array([1.0, 1.0, 1.0]), s = 1.0)\n w_methods = np.array([[0.5, 0.5],\n [0.5, 0.5],\n [0.5, 0.5]])\n mix_fun = split_power\n print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args))\n >>> [0.1 0.4 0.5]\n\n ' if (utils.ndim == 1): utils = np.reshape(utils, [1, (- 1)]) if (w_methods.ndim == 1): _dim_w_methods = w_methods.shape[0] elif (w_methods.ndim == 2): _dim_w_methods = w_methods.shape[1] _dimcheck(utils, w) if (len(methods) != _dim_w_methods): _msg = 'length of methods ({0}) and w_methods ({1}) are not the same'.format(len(methods), len(w_methods)) raise ValueError(_msg) if w_norm: _w_normalize(w) _w_normalize(w_methods) if (methods_args is None): methods_args = ([{}] * len(methods)) if (mix_args is None): mix_args = {} agg_util = [m(utils, w, **methods_args[i]) for (i, m) in enumerate(methods)] agg_util = np.array(agg_util).T return mix_fun(agg_util, w_methods, **mix_args)<|docstring|>mixed utility aggregation function Aggregate preferences using a mix of aggregation functions. Parameters ---------- utils : ndarray [n, u] Two-dimensional array with the provided utilities to aggregate. The dimensions corresponds to the number of random samples (n) and the number of utilities (u) w : ndarray [u], [n, u] Array with the provided weights to each of the utilities. If passed as a 1D-array, the same weights are used for of all the random samples. In case it is a 2D-array, w requires the same dimensions as `utils` methods : list [m] a list of functions that will create each individual member of the model mixture w_methods : ndarray [m], [n, m] An array for the weights that will be used to mix each of the methods mix_fun : function Function that will be used to aggregate each of the members of the methods w_norm : Bool, optional If True, the sum of the weights will be equal to 1 Returns ------- out : ndarray [n] Vector with the aggregated values Example ------- .. highlight:: python .. code-block:: python utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] w_methods = np.array([0.5, 0.5]) mix_fun = additive print(mix(utils, w, methods, w_methods, mix_fun)) >>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, split_power,] methods_args = [{}, dict(alpha = 1.0, s = 1.0)] w_methods = np.array([0.5, 0.5]) mix_fun = additive print(mix(utils, w, methods, w_methods, mix_fun, methods_args=methods_args)) >>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] mix_args = dict(alpha = 1.0, s = 1.0) w_methods = np.array([0.5, 0.5]) mix_fun = split_power print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args)) #>>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] mix_args = dict(alpha = 1.0, s = 1.0) w_methods = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) mix_fun = split_power print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args)) >>> [0.1 0.4 0.5] utils = np.array([[0.0, 1.0], [1.0, 0.0], [0.5, 0.5]]) w = np.array([[0.8, 0.2], [0.8, 0.2], [0.8, 0.2]]) methods = [cobb_douglas, additive,] mix_args = dict(alpha = np.array([1.0, 1.0, 1.0]), s = 1.0) w_methods = np.array([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]) mix_fun = split_power print(mix(utils, w, methods, w_methods, mix_fun, mix_args=mix_args)) >>> [0.1 0.4 0.5]<|endoftext|>
ffdc4e860239c26e7f085b75b7c80cf9463b90ca88afb691a30649a7b81a1922
def __init__(self, inputdimension, outputdimension): '\n Instantiates an embedding function object\n\n Parameters\n ----------\n inputdimension : int\n Expected dimension of the input points\n outputdimension : int\n Dimension of the output image points\n ' self.inputdimension = inputdimension self.outputdimension = outputdimension
Instantiates an embedding function object Parameters ---------- inputdimension : int Expected dimension of the input points outputdimension : int Dimension of the output image points
tmrc/embedding_functions.py
__init__
abittracher/pytmrc
0
python
def __init__(self, inputdimension, outputdimension): '\n Instantiates an embedding function object\n\n Parameters\n ----------\n inputdimension : int\n Expected dimension of the input points\n outputdimension : int\n Dimension of the output image points\n ' self.inputdimension = inputdimension self.outputdimension = outputdimension
def __init__(self, inputdimension, outputdimension): '\n Instantiates an embedding function object\n\n Parameters\n ----------\n inputdimension : int\n Expected dimension of the input points\n outputdimension : int\n Dimension of the output image points\n ' self.inputdimension = inputdimension self.outputdimension = outputdimension<|docstring|>Instantiates an embedding function object Parameters ---------- inputdimension : int Expected dimension of the input points outputdimension : int Dimension of the output image points<|endoftext|>
88af075020c3aa76bb8941439baf5c1777e7fc23a8f1d76488e237e5bd16d187
def __init__(self, inputdimension, outputdimension, seed, orthonormalize=False): '\n Instantiates a random linear embedding function object\n\n Parameters\n ----------\n inputdimension : int\n Expected dimension of the input points\n outputdimension : int\n Dimension of the output image points\n seed : Random seed for coefficient generfunctionation\n orthogonalize : bool\n Toggle to orthonormalize columns of the coefficient matrix\n ' super().__init__(inputdimension, outputdimension) self.seed = seed np.random.seed(self.seed) A = np.random.uniform(0, 1, (self.inputdimension, self.outputdimension)) if orthonormalize: (self.A, _) = np.linalg.qr(A, mode='complete') self.A = A
Instantiates a random linear embedding function object Parameters ---------- inputdimension : int Expected dimension of the input points outputdimension : int Dimension of the output image points seed : Random seed for coefficient generfunctionation orthogonalize : bool Toggle to orthonormalize columns of the coefficient matrix
tmrc/embedding_functions.py
__init__
abittracher/pytmrc
0
python
def __init__(self, inputdimension, outputdimension, seed, orthonormalize=False): '\n Instantiates a random linear embedding function object\n\n Parameters\n ----------\n inputdimension : int\n Expected dimension of the input points\n outputdimension : int\n Dimension of the output image points\n seed : Random seed for coefficient generfunctionation\n orthogonalize : bool\n Toggle to orthonormalize columns of the coefficient matrix\n ' super().__init__(inputdimension, outputdimension) self.seed = seed np.random.seed(self.seed) A = np.random.uniform(0, 1, (self.inputdimension, self.outputdimension)) if orthonormalize: (self.A, _) = np.linalg.qr(A, mode='complete') self.A = A
def __init__(self, inputdimension, outputdimension, seed, orthonormalize=False): '\n Instantiates a random linear embedding function object\n\n Parameters\n ----------\n inputdimension : int\n Expected dimension of the input points\n outputdimension : int\n Dimension of the output image points\n seed : Random seed for coefficient generfunctionation\n orthogonalize : bool\n Toggle to orthonormalize columns of the coefficient matrix\n ' super().__init__(inputdimension, outputdimension) self.seed = seed np.random.seed(self.seed) A = np.random.uniform(0, 1, (self.inputdimension, self.outputdimension)) if orthonormalize: (self.A, _) = np.linalg.qr(A, mode='complete') self.A = A<|docstring|>Instantiates a random linear embedding function object Parameters ---------- inputdimension : int Expected dimension of the input points outputdimension : int Dimension of the output image points seed : Random seed for coefficient generfunctionation orthogonalize : bool Toggle to orthonormalize columns of the coefficient matrix<|endoftext|>
bbe88d448ee1a3af56e71a47b486a43f21d4d357589448d4f427a0df814506b8
def evaluate(self, x): '\n Evaluates embedding function at specified points\n\n Parameters\n ----------\n x : np.array of shape [# points, inputdimension]\n Array of evaluation points\n\n Returns\n -------\n np.array of shape [# points, outputdimension]\n array of image points\n ' y = x.dot(self.A) return y
Evaluates embedding function at specified points Parameters ---------- x : np.array of shape [# points, inputdimension] Array of evaluation points Returns ------- np.array of shape [# points, outputdimension] array of image points
tmrc/embedding_functions.py
evaluate
abittracher/pytmrc
0
python
def evaluate(self, x): '\n Evaluates embedding function at specified points\n\n Parameters\n ----------\n x : np.array of shape [# points, inputdimension]\n Array of evaluation points\n\n Returns\n -------\n np.array of shape [# points, outputdimension]\n array of image points\n ' y = x.dot(self.A) return y
def evaluate(self, x): '\n Evaluates embedding function at specified points\n\n Parameters\n ----------\n x : np.array of shape [# points, inputdimension]\n Array of evaluation points\n\n Returns\n -------\n np.array of shape [# points, outputdimension]\n array of image points\n ' y = x.dot(self.A) return y<|docstring|>Evaluates embedding function at specified points Parameters ---------- x : np.array of shape [# points, inputdimension] Array of evaluation points Returns ------- np.array of shape [# points, outputdimension] array of image points<|endoftext|>
ef43a0db0d1200e8f28c66c02e65df82c10bd97a749c8f3110e01a2268af3354
def find(l, e): '\n\n :param l: List of sorted unique integers possibly rotated by an unknown amount\n :param e: Element in the list to find the index of\n :return: Index of e in l\n ' return search(l, start=0, end=(len(l) - 1), e=e)
:param l: List of sorted unique integers possibly rotated by an unknown amount :param e: Element in the list to find the index of :return: Index of e in l
rot_bin_search.py
find
greglever/rotated_binary_search_example
0
python
def find(l, e): '\n\n :param l: List of sorted unique integers possibly rotated by an unknown amount\n :param e: Element in the list to find the index of\n :return: Index of e in l\n ' return search(l, start=0, end=(len(l) - 1), e=e)
def find(l, e): '\n\n :param l: List of sorted unique integers possibly rotated by an unknown amount\n :param e: Element in the list to find the index of\n :return: Index of e in l\n ' return search(l, start=0, end=(len(l) - 1), e=e)<|docstring|>:param l: List of sorted unique integers possibly rotated by an unknown amount :param e: Element in the list to find the index of :return: Index of e in l<|endoftext|>
3bbfc8501414653dd91bd6a45ed1247673df4afe1fe25d3ecafaaea11bff64b2
def rotate(l, x): '\n rotate array l by x elements to the right\n :param l: list of integers\n :param x: number of elements to rotate l by to the right\n :return: rotated list\n ' return (l[((- x) % len(l)):] + l[:((- x) % len(l))])
rotate array l by x elements to the right :param l: list of integers :param x: number of elements to rotate l by to the right :return: rotated list
rot_bin_search.py
rotate
greglever/rotated_binary_search_example
0
python
def rotate(l, x): '\n rotate array l by x elements to the right\n :param l: list of integers\n :param x: number of elements to rotate l by to the right\n :return: rotated list\n ' return (l[((- x) % len(l)):] + l[:((- x) % len(l))])
def rotate(l, x): '\n rotate array l by x elements to the right\n :param l: list of integers\n :param x: number of elements to rotate l by to the right\n :return: rotated list\n ' return (l[((- x) % len(l)):] + l[:((- x) % len(l))])<|docstring|>rotate array l by x elements to the right :param l: list of integers :param x: number of elements to rotate l by to the right :return: rotated list<|endoftext|>
6760cb24525ea474a8e432a4bc298adf624d51e44b42923f46f9576587204911
def check_network(host='8.8.8.8', port=53, timeout=3): '\n Check for a valid network connection by attempting to contact the Google DNS server.\n :return:\n ' try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) return True except socket.error as err: print('\n[ERROR] No network connection detected. Error {}\n'.format(err)) return False
Check for a valid network connection by attempting to contact the Google DNS server. :return:
system-setup/windows/get-public-ip.py
check_network
Cashiuus/penprep
3
python
def check_network(host='8.8.8.8', port=53, timeout=3): '\n Check for a valid network connection by attempting to contact the Google DNS server.\n :return:\n ' try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) return True except socket.error as err: print('\n[ERROR] No network connection detected. Error {}\n'.format(err)) return False
def check_network(host='8.8.8.8', port=53, timeout=3): '\n Check for a valid network connection by attempting to contact the Google DNS server.\n :return:\n ' try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) return True except socket.error as err: print('\n[ERROR] No network connection detected. Error {}\n'.format(err)) return False<|docstring|>Check for a valid network connection by attempting to contact the Google DNS server. :return:<|endoftext|>
3a0d5e38fa1fc256f6e9c94931f09927e1442801be94f0ccfaadd85bae8c0d86
def __init__(self, joints=None, motions=None): '\n Display motion sequence in 3D.\n\n Parameter\n ---------\n joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and\n values are instance of Joint class.\n\n motions: List returned from `amc_parser.parse_amc. Each element is a dict\n with joint names as keys and relative rotation degree as values.\n\n ' self.joints = joints self.motions = motions self.frame = 0 self.playing = False self.fps = 120 self.rotate_dragging = False self.translate_dragging = False self.old_x = 0 self.old_y = 0 self.global_rx = 0 self.global_ry = 0 self.rotation_R = np.eye(3) self.speed_rx = (np.pi / 90) self.speed_ry = (np.pi / 90) self.speed_trans = 0.25 self.speed_zoom = 0.5 self.done = False self.default_translate = np.array([0, (- 20), (- 100)], dtype=np.float32) self.translate = np.copy(self.default_translate) pygame.init() self.screen_size = (1024, 768) self.screen = pygame.display.set_mode(self.screen_size, (pygame.DOUBLEBUF | pygame.OPENGL)) pygame.display.set_caption(('AMC Parser - frame %d / %d' % (self.frame, len(self.motions)))) self.clock = pygame.time.Clock() glClearColor(0, 0, 0, 0) glShadeModel(GL_SMOOTH) glMaterialfv(GL_FRONT, GL_SPECULAR, np.array([1, 1, 1, 1], dtype=np.float32)) glMaterialfv(GL_FRONT, GL_SHININESS, np.array([100.0], dtype=np.float32)) glMaterialfv(GL_FRONT, GL_AMBIENT, np.array([0.7, 0.7, 0.7, 0.7], dtype=np.float32)) glEnable(GL_POINT_SMOOTH) glLightfv(GL_LIGHT0, GL_POSITION, np.array([1, 1, 1, 0], dtype=np.float32)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_DEPTH_TEST) gluPerspective(45, (self.screen_size[0] / self.screen_size[1]), 0.1, 500.0) glPointSize(10) glLineWidth(2.5)
Display motion sequence in 3D. Parameter --------- joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and values are instance of Joint class. motions: List returned from `amc_parser.parse_amc. Each element is a dict with joint names as keys and relative rotation degree as values.
3Dviewer.py
__init__
cjearls/AMCParser
93
python
def __init__(self, joints=None, motions=None): '\n Display motion sequence in 3D.\n\n Parameter\n ---------\n joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and\n values are instance of Joint class.\n\n motions: List returned from `amc_parser.parse_amc. Each element is a dict\n with joint names as keys and relative rotation degree as values.\n\n ' self.joints = joints self.motions = motions self.frame = 0 self.playing = False self.fps = 120 self.rotate_dragging = False self.translate_dragging = False self.old_x = 0 self.old_y = 0 self.global_rx = 0 self.global_ry = 0 self.rotation_R = np.eye(3) self.speed_rx = (np.pi / 90) self.speed_ry = (np.pi / 90) self.speed_trans = 0.25 self.speed_zoom = 0.5 self.done = False self.default_translate = np.array([0, (- 20), (- 100)], dtype=np.float32) self.translate = np.copy(self.default_translate) pygame.init() self.screen_size = (1024, 768) self.screen = pygame.display.set_mode(self.screen_size, (pygame.DOUBLEBUF | pygame.OPENGL)) pygame.display.set_caption(('AMC Parser - frame %d / %d' % (self.frame, len(self.motions)))) self.clock = pygame.time.Clock() glClearColor(0, 0, 0, 0) glShadeModel(GL_SMOOTH) glMaterialfv(GL_FRONT, GL_SPECULAR, np.array([1, 1, 1, 1], dtype=np.float32)) glMaterialfv(GL_FRONT, GL_SHININESS, np.array([100.0], dtype=np.float32)) glMaterialfv(GL_FRONT, GL_AMBIENT, np.array([0.7, 0.7, 0.7, 0.7], dtype=np.float32)) glEnable(GL_POINT_SMOOTH) glLightfv(GL_LIGHT0, GL_POSITION, np.array([1, 1, 1, 0], dtype=np.float32)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_DEPTH_TEST) gluPerspective(45, (self.screen_size[0] / self.screen_size[1]), 0.1, 500.0) glPointSize(10) glLineWidth(2.5)
def __init__(self, joints=None, motions=None): '\n Display motion sequence in 3D.\n\n Parameter\n ---------\n joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and\n values are instance of Joint class.\n\n motions: List returned from `amc_parser.parse_amc. Each element is a dict\n with joint names as keys and relative rotation degree as values.\n\n ' self.joints = joints self.motions = motions self.frame = 0 self.playing = False self.fps = 120 self.rotate_dragging = False self.translate_dragging = False self.old_x = 0 self.old_y = 0 self.global_rx = 0 self.global_ry = 0 self.rotation_R = np.eye(3) self.speed_rx = (np.pi / 90) self.speed_ry = (np.pi / 90) self.speed_trans = 0.25 self.speed_zoom = 0.5 self.done = False self.default_translate = np.array([0, (- 20), (- 100)], dtype=np.float32) self.translate = np.copy(self.default_translate) pygame.init() self.screen_size = (1024, 768) self.screen = pygame.display.set_mode(self.screen_size, (pygame.DOUBLEBUF | pygame.OPENGL)) pygame.display.set_caption(('AMC Parser - frame %d / %d' % (self.frame, len(self.motions)))) self.clock = pygame.time.Clock() glClearColor(0, 0, 0, 0) glShadeModel(GL_SMOOTH) glMaterialfv(GL_FRONT, GL_SPECULAR, np.array([1, 1, 1, 1], dtype=np.float32)) glMaterialfv(GL_FRONT, GL_SHININESS, np.array([100.0], dtype=np.float32)) glMaterialfv(GL_FRONT, GL_AMBIENT, np.array([0.7, 0.7, 0.7, 0.7], dtype=np.float32)) glEnable(GL_POINT_SMOOTH) glLightfv(GL_LIGHT0, GL_POSITION, np.array([1, 1, 1, 0], dtype=np.float32)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_DEPTH_TEST) gluPerspective(45, (self.screen_size[0] / self.screen_size[1]), 0.1, 500.0) glPointSize(10) glLineWidth(2.5)<|docstring|>Display motion sequence in 3D. Parameter --------- joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and values are instance of Joint class. motions: List returned from `amc_parser.parse_amc. Each element is a dict with joint names as keys and relative rotation degree as values.<|endoftext|>
6d22e8f29072d0370985c595f2f1108b75aa02f394c568a8ab010859246f393e
def process_event(self): '\n Handle user interface events: keydown, close, dragging.\n\n ' for event in pygame.event.get(): if (event.type == pygame.QUIT): self.done = True elif (event.type == pygame.KEYDOWN): if (event.key == pygame.K_RETURN): self.translate = self.default_translate self.global_rx = 0 self.global_ry = 0 elif (event.key == pygame.K_SPACE): self.playing = (not self.playing) elif (event.type == pygame.MOUSEBUTTONDOWN): if (event.button == 1): self.rotate_dragging = True else: self.translate_dragging = True (self.old_x, self.old_y) = event.pos elif (event.type == pygame.MOUSEBUTTONUP): if (event.button == 1): self.rotate_dragging = False else: self.translate_dragging = False elif (event.type == pygame.MOUSEMOTION): if self.translate_dragging: pass elif self.rotate_dragging: (new_x, new_y) = event.pos self.global_ry -= (((new_x - self.old_x) / self.screen_size[0]) * np.pi) self.global_rx -= (((new_y - self.old_y) / self.screen_size[1]) * np.pi) (self.old_x, self.old_y) = (new_x, new_y) pressed = pygame.key.get_pressed() if pressed[pygame.K_DOWN]: self.global_rx -= self.speed_rx if pressed[pygame.K_UP]: self.global_rx += self.speed_rx if pressed[pygame.K_LEFT]: self.global_ry += self.speed_ry if pressed[pygame.K_RIGHT]: self.global_ry -= self.speed_ry if pressed[pygame.K_a]: self.translate[0] -= self.speed_trans if pressed[pygame.K_d]: self.translate[0] += self.speed_trans if pressed[pygame.K_w]: self.translate[1] += self.speed_trans if pressed[pygame.K_s]: self.translate[1] -= self.speed_trans if pressed[pygame.K_q]: self.translate[2] += self.speed_zoom if pressed[pygame.K_e]: self.translate[2] -= self.speed_zoom if pressed[pygame.K_COMMA]: self.frame -= 1 if (self.frame < 0): self.frame = (len(self.motions) - 1) if pressed[pygame.K_PERIOD]: self.frame += 1 if (self.frame >= len(self.motions)): self.frame = 0 grx = euler.euler2mat(self.global_rx, 0, 0) gry = euler.euler2mat(0, self.global_ry, 0) self.rotation_R = grx.dot(gry)
Handle user interface events: keydown, close, dragging.
3Dviewer.py
process_event
cjearls/AMCParser
93
python
def process_event(self): '\n \n\n ' for event in pygame.event.get(): if (event.type == pygame.QUIT): self.done = True elif (event.type == pygame.KEYDOWN): if (event.key == pygame.K_RETURN): self.translate = self.default_translate self.global_rx = 0 self.global_ry = 0 elif (event.key == pygame.K_SPACE): self.playing = (not self.playing) elif (event.type == pygame.MOUSEBUTTONDOWN): if (event.button == 1): self.rotate_dragging = True else: self.translate_dragging = True (self.old_x, self.old_y) = event.pos elif (event.type == pygame.MOUSEBUTTONUP): if (event.button == 1): self.rotate_dragging = False else: self.translate_dragging = False elif (event.type == pygame.MOUSEMOTION): if self.translate_dragging: pass elif self.rotate_dragging: (new_x, new_y) = event.pos self.global_ry -= (((new_x - self.old_x) / self.screen_size[0]) * np.pi) self.global_rx -= (((new_y - self.old_y) / self.screen_size[1]) * np.pi) (self.old_x, self.old_y) = (new_x, new_y) pressed = pygame.key.get_pressed() if pressed[pygame.K_DOWN]: self.global_rx -= self.speed_rx if pressed[pygame.K_UP]: self.global_rx += self.speed_rx if pressed[pygame.K_LEFT]: self.global_ry += self.speed_ry if pressed[pygame.K_RIGHT]: self.global_ry -= self.speed_ry if pressed[pygame.K_a]: self.translate[0] -= self.speed_trans if pressed[pygame.K_d]: self.translate[0] += self.speed_trans if pressed[pygame.K_w]: self.translate[1] += self.speed_trans if pressed[pygame.K_s]: self.translate[1] -= self.speed_trans if pressed[pygame.K_q]: self.translate[2] += self.speed_zoom if pressed[pygame.K_e]: self.translate[2] -= self.speed_zoom if pressed[pygame.K_COMMA]: self.frame -= 1 if (self.frame < 0): self.frame = (len(self.motions) - 1) if pressed[pygame.K_PERIOD]: self.frame += 1 if (self.frame >= len(self.motions)): self.frame = 0 grx = euler.euler2mat(self.global_rx, 0, 0) gry = euler.euler2mat(0, self.global_ry, 0) self.rotation_R = grx.dot(gry)
def process_event(self): '\n \n\n ' for event in pygame.event.get(): if (event.type == pygame.QUIT): self.done = True elif (event.type == pygame.KEYDOWN): if (event.key == pygame.K_RETURN): self.translate = self.default_translate self.global_rx = 0 self.global_ry = 0 elif (event.key == pygame.K_SPACE): self.playing = (not self.playing) elif (event.type == pygame.MOUSEBUTTONDOWN): if (event.button == 1): self.rotate_dragging = True else: self.translate_dragging = True (self.old_x, self.old_y) = event.pos elif (event.type == pygame.MOUSEBUTTONUP): if (event.button == 1): self.rotate_dragging = False else: self.translate_dragging = False elif (event.type == pygame.MOUSEMOTION): if self.translate_dragging: pass elif self.rotate_dragging: (new_x, new_y) = event.pos self.global_ry -= (((new_x - self.old_x) / self.screen_size[0]) * np.pi) self.global_rx -= (((new_y - self.old_y) / self.screen_size[1]) * np.pi) (self.old_x, self.old_y) = (new_x, new_y) pressed = pygame.key.get_pressed() if pressed[pygame.K_DOWN]: self.global_rx -= self.speed_rx if pressed[pygame.K_UP]: self.global_rx += self.speed_rx if pressed[pygame.K_LEFT]: self.global_ry += self.speed_ry if pressed[pygame.K_RIGHT]: self.global_ry -= self.speed_ry if pressed[pygame.K_a]: self.translate[0] -= self.speed_trans if pressed[pygame.K_d]: self.translate[0] += self.speed_trans if pressed[pygame.K_w]: self.translate[1] += self.speed_trans if pressed[pygame.K_s]: self.translate[1] -= self.speed_trans if pressed[pygame.K_q]: self.translate[2] += self.speed_zoom if pressed[pygame.K_e]: self.translate[2] -= self.speed_zoom if pressed[pygame.K_COMMA]: self.frame -= 1 if (self.frame < 0): self.frame = (len(self.motions) - 1) if pressed[pygame.K_PERIOD]: self.frame += 1 if (self.frame >= len(self.motions)): self.frame = 0 grx = euler.euler2mat(self.global_rx, 0, 0) gry = euler.euler2mat(0, self.global_ry, 0) self.rotation_R = grx.dot(gry)<|docstring|>Handle user interface events: keydown, close, dragging.<|endoftext|>
0f7cf5d87cb25938617577f5d6d50edd369c4e2fa8bf21925bf3e839025a4986
def set_joints(self, joints): '\n Set joints for viewer.\n\n Parameter\n ---------\n joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and\n values are instance of Joint class.\n\n ' self.joints = joints
Set joints for viewer. Parameter --------- joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and values are instance of Joint class.
3Dviewer.py
set_joints
cjearls/AMCParser
93
python
def set_joints(self, joints): '\n Set joints for viewer.\n\n Parameter\n ---------\n joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and\n values are instance of Joint class.\n\n ' self.joints = joints
def set_joints(self, joints): '\n Set joints for viewer.\n\n Parameter\n ---------\n joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and\n values are instance of Joint class.\n\n ' self.joints = joints<|docstring|>Set joints for viewer. Parameter --------- joints: Dict returned from `amc_parser.parse_asf`. Keys are joint names and values are instance of Joint class.<|endoftext|>
25c05b9ef9c98d30a5e493b0c2b5eabb2fe2e7db8deaa510e5f1cfda82279af0
def set_motion(self, motions): '\n Set motion sequence for viewer.\n\n Paramter\n --------\n motions: List returned from `amc_parser.parse_amc. Each element is a dict\n with joint names as keys and relative rotation degree as values.\n\n ' self.motions = motions
Set motion sequence for viewer. Paramter -------- motions: List returned from `amc_parser.parse_amc. Each element is a dict with joint names as keys and relative rotation degree as values.
3Dviewer.py
set_motion
cjearls/AMCParser
93
python
def set_motion(self, motions): '\n Set motion sequence for viewer.\n\n Paramter\n --------\n motions: List returned from `amc_parser.parse_amc. Each element is a dict\n with joint names as keys and relative rotation degree as values.\n\n ' self.motions = motions
def set_motion(self, motions): '\n Set motion sequence for viewer.\n\n Paramter\n --------\n motions: List returned from `amc_parser.parse_amc. Each element is a dict\n with joint names as keys and relative rotation degree as values.\n\n ' self.motions = motions<|docstring|>Set motion sequence for viewer. Paramter -------- motions: List returned from `amc_parser.parse_amc. Each element is a dict with joint names as keys and relative rotation degree as values.<|endoftext|>
e2afe16f57713764f9aeee179a5a79035cb15d922c1eef1b13050ccb52938fe9
def draw(self): '\n Draw the skeleton with balls and sticks.\n\n ' glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)) glBegin(GL_POINTS) for j in self.joints.values(): coord = np.array((np.squeeze(j.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) glVertex3f(*coord) glEnd() glBegin(GL_LINES) for j in self.joints.values(): child = j parent = j.parent if (parent is not None): coord_x = np.array((np.squeeze(child.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) coord_y = np.array((np.squeeze(parent.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) glVertex3f(*coord_x) glVertex3f(*coord_y) glEnd()
Draw the skeleton with balls and sticks.
3Dviewer.py
draw
cjearls/AMCParser
93
python
def draw(self): '\n \n\n ' glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)) glBegin(GL_POINTS) for j in self.joints.values(): coord = np.array((np.squeeze(j.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) glVertex3f(*coord) glEnd() glBegin(GL_LINES) for j in self.joints.values(): child = j parent = j.parent if (parent is not None): coord_x = np.array((np.squeeze(child.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) coord_y = np.array((np.squeeze(parent.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) glVertex3f(*coord_x) glVertex3f(*coord_y) glEnd()
def draw(self): '\n \n\n ' glClear((GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)) glBegin(GL_POINTS) for j in self.joints.values(): coord = np.array((np.squeeze(j.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) glVertex3f(*coord) glEnd() glBegin(GL_LINES) for j in self.joints.values(): child = j parent = j.parent if (parent is not None): coord_x = np.array((np.squeeze(child.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) coord_y = np.array((np.squeeze(parent.coordinate).dot(self.rotation_R) + self.translate), dtype=np.float32) glVertex3f(*coord_x) glVertex3f(*coord_y) glEnd()<|docstring|>Draw the skeleton with balls and sticks.<|endoftext|>
8b9d6ef64032679b3616a4710aff16aa8f772a6626d2d4e69322fcf29cc54c7f
def run(self): '\n Main loop.\n\n ' while (not self.done): self.process_event() self.joints['root'].set_motion(self.motions[self.frame]) if self.playing: self.frame += 1 if (self.frame >= len(self.motions)): self.frame = 0 self.draw() pygame.display.set_caption(('AMC Parser - frame %d / %d' % (self.frame, len(self.motions)))) pygame.display.flip() self.clock.tick(self.fps) pygame.quit()
Main loop.
3Dviewer.py
run
cjearls/AMCParser
93
python
def run(self): '\n \n\n ' while (not self.done): self.process_event() self.joints['root'].set_motion(self.motions[self.frame]) if self.playing: self.frame += 1 if (self.frame >= len(self.motions)): self.frame = 0 self.draw() pygame.display.set_caption(('AMC Parser - frame %d / %d' % (self.frame, len(self.motions)))) pygame.display.flip() self.clock.tick(self.fps) pygame.quit()
def run(self): '\n \n\n ' while (not self.done): self.process_event() self.joints['root'].set_motion(self.motions[self.frame]) if self.playing: self.frame += 1 if (self.frame >= len(self.motions)): self.frame = 0 self.draw() pygame.display.set_caption(('AMC Parser - frame %d / %d' % (self.frame, len(self.motions)))) pygame.display.flip() self.clock.tick(self.fps) pygame.quit()<|docstring|>Main loop.<|endoftext|>
4833fab7201a3a32d66ecddaff71a855f8862f69374465a1ff3c71a49eb9c7bc
def step(self, closure: OptLossClosure=None) -> OptFloat: 'Performs a single optimization step.\n\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for w in group['params']: if (w.grad is None): continue grad = w.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[w] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) state['exp_avg_sq'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) state['exp_avg2'] = w.new(1).fill_(0) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) (exp_avg, exp_avg2, exp_avg_sq) = (state['exp_avg'], state['exp_avg2'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 if (group['weight_decay'] != 0): grad.add_(w.data, alpha=group['weight_decay']) if (group['phase'] == 'SGD'): if ('momentum_buffer' not in state): buf = state['momentum_buffer'] = torch.clone(grad).detach() else: buf = state['momentum_buffer'] buf.mul_(beta1).add_(grad) grad = buf grad.mul_((1 - beta1)) if group['nesterov']: grad.add_(buf, alpha=beta1) w.data.add_(grad, alpha=(- group['lr'])) continue exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * (bias_correction2 ** 0.5)) / bias_correction1) p = ((- step_size) * (exp_avg / denom)) w.data.add_(p) p_view = p.view((- 1)) pg = p_view.dot(grad.view((- 1))) if (pg != 0): scaling = (p_view.dot(p_view) / (- pg)) exp_avg2.mul_(beta2).add_(scaling, alpha=(1 - beta2)) corrected_exp_avg = (exp_avg2 / bias_correction2) if ((state['step'] > 1) and corrected_exp_avg.allclose(scaling, rtol=1e-06) and (corrected_exp_avg > 0)): group['phase'] = 'SGD' group['lr'] = corrected_exp_avg.item() return loss
Performs a single optimization step. Arguments: closure: A closure that reevaluates the model and returns the loss.
torch_optimizer/swats.py
step
lipovsek/pytorch-optimizer
2,418
python
def step(self, closure: OptLossClosure=None) -> OptFloat: 'Performs a single optimization step.\n\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for w in group['params']: if (w.grad is None): continue grad = w.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[w] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) state['exp_avg_sq'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) state['exp_avg2'] = w.new(1).fill_(0) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) (exp_avg, exp_avg2, exp_avg_sq) = (state['exp_avg'], state['exp_avg2'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 if (group['weight_decay'] != 0): grad.add_(w.data, alpha=group['weight_decay']) if (group['phase'] == 'SGD'): if ('momentum_buffer' not in state): buf = state['momentum_buffer'] = torch.clone(grad).detach() else: buf = state['momentum_buffer'] buf.mul_(beta1).add_(grad) grad = buf grad.mul_((1 - beta1)) if group['nesterov']: grad.add_(buf, alpha=beta1) w.data.add_(grad, alpha=(- group['lr'])) continue exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * (bias_correction2 ** 0.5)) / bias_correction1) p = ((- step_size) * (exp_avg / denom)) w.data.add_(p) p_view = p.view((- 1)) pg = p_view.dot(grad.view((- 1))) if (pg != 0): scaling = (p_view.dot(p_view) / (- pg)) exp_avg2.mul_(beta2).add_(scaling, alpha=(1 - beta2)) corrected_exp_avg = (exp_avg2 / bias_correction2) if ((state['step'] > 1) and corrected_exp_avg.allclose(scaling, rtol=1e-06) and (corrected_exp_avg > 0)): group['phase'] = 'SGD' group['lr'] = corrected_exp_avg.item() return loss
def step(self, closure: OptLossClosure=None) -> OptFloat: 'Performs a single optimization step.\n\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for w in group['params']: if (w.grad is None): continue grad = w.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[w] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) state['exp_avg_sq'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) state['exp_avg2'] = w.new(1).fill_(0) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(w.data, memory_format=torch.preserve_format) (exp_avg, exp_avg2, exp_avg_sq) = (state['exp_avg'], state['exp_avg2'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 if (group['weight_decay'] != 0): grad.add_(w.data, alpha=group['weight_decay']) if (group['phase'] == 'SGD'): if ('momentum_buffer' not in state): buf = state['momentum_buffer'] = torch.clone(grad).detach() else: buf = state['momentum_buffer'] buf.mul_(beta1).add_(grad) grad = buf grad.mul_((1 - beta1)) if group['nesterov']: grad.add_(buf, alpha=beta1) w.data.add_(grad, alpha=(- group['lr'])) continue exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) step_size = ((group['lr'] * (bias_correction2 ** 0.5)) / bias_correction1) p = ((- step_size) * (exp_avg / denom)) w.data.add_(p) p_view = p.view((- 1)) pg = p_view.dot(grad.view((- 1))) if (pg != 0): scaling = (p_view.dot(p_view) / (- pg)) exp_avg2.mul_(beta2).add_(scaling, alpha=(1 - beta2)) corrected_exp_avg = (exp_avg2 / bias_correction2) if ((state['step'] > 1) and corrected_exp_avg.allclose(scaling, rtol=1e-06) and (corrected_exp_avg > 0)): group['phase'] = 'SGD' group['lr'] = corrected_exp_avg.item() return loss<|docstring|>Performs a single optimization step. Arguments: closure: A closure that reevaluates the model and returns the loss.<|endoftext|>
abadcd5df6ff52860a7dd1655d121709caf70b1459e677832da018be54de5cb2
def get_fileinfo(filespec, cache_dir='/tmp', download=False): '\n Parse a file specification and return information about it.\n ' numhdu = None match = re.match('^(.+)\\[(\\d+)\\]$', filespec) if match: filespec = match.group(1) numhdu = int(match.group(2)) else: filespec = filespec url = filespec filepath = None match = re.match('^(\\w+)://(.+)$', filespec) if match: urlinfo = urllib_parse.urlparse(filespec) if (urlinfo.scheme == 'file'): filepath = urlinfo.path match = re.match('^/(\\w+\\:)', filepath) if match: filepath = filepath[1:] else: (path, filename) = os.path.split(urlinfo.path) filepath = os.path.join(cache_dir, filename) else: filepath = filespec url = ('file://' + filepath) ondisk = os.path.exists(filepath) res = Bunch.Bunch(filepath=filepath, url=url, numhdu=numhdu, ondisk=ondisk) return res
Parse a file specification and return information about it.
ginga/util/iohelper.py
get_fileinfo
sosey/ginga
0
python
def get_fileinfo(filespec, cache_dir='/tmp', download=False): '\n \n ' numhdu = None match = re.match('^(.+)\\[(\\d+)\\]$', filespec) if match: filespec = match.group(1) numhdu = int(match.group(2)) else: filespec = filespec url = filespec filepath = None match = re.match('^(\\w+)://(.+)$', filespec) if match: urlinfo = urllib_parse.urlparse(filespec) if (urlinfo.scheme == 'file'): filepath = urlinfo.path match = re.match('^/(\\w+\\:)', filepath) if match: filepath = filepath[1:] else: (path, filename) = os.path.split(urlinfo.path) filepath = os.path.join(cache_dir, filename) else: filepath = filespec url = ('file://' + filepath) ondisk = os.path.exists(filepath) res = Bunch.Bunch(filepath=filepath, url=url, numhdu=numhdu, ondisk=ondisk) return res
def get_fileinfo(filespec, cache_dir='/tmp', download=False): '\n \n ' numhdu = None match = re.match('^(.+)\\[(\\d+)\\]$', filespec) if match: filespec = match.group(1) numhdu = int(match.group(2)) else: filespec = filespec url = filespec filepath = None match = re.match('^(\\w+)://(.+)$', filespec) if match: urlinfo = urllib_parse.urlparse(filespec) if (urlinfo.scheme == 'file'): filepath = urlinfo.path match = re.match('^/(\\w+\\:)', filepath) if match: filepath = filepath[1:] else: (path, filename) = os.path.split(urlinfo.path) filepath = os.path.join(cache_dir, filename) else: filepath = filespec url = ('file://' + filepath) ondisk = os.path.exists(filepath) res = Bunch.Bunch(filepath=filepath, url=url, numhdu=numhdu, ondisk=ondisk) return res<|docstring|>Parse a file specification and return information about it.<|endoftext|>
0b4421b25bfbf2360e7c84e9bd6c0ef73fad4ed6eacc44a1ca1addf02b6ce8d2
def _shap_explain_cme(cme_model, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `const_marginal_effect` function using shap Explainer().\n\n Parameters\n ----------\n cme_models: function\n const_marginal_effect function.\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n\n ' (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for i in range(dy): def cmd_func(X): return cme_model(X).reshape((- 1), dy, dt)[(:, i, :)] explainer = shap.Explainer(cmd_func, background, feature_names=feature_names) shap_out = explainer(X) if (dt > 1): for j in range(dt): base_values = shap_out.base_values[(..., j)] values = shap_out.values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., j)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[i]][treatment_names[j]] = shap_out_new else: base_values = shap_out.base_values[(..., 0)] shap_out_new = shap.Explanation(shap_out.values, base_values=base_values, data=shap_out.data, main_effects=shap_out.main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[i]][treatment_names[0]] = shap_out_new return shap_outs
Method to explain `const_marginal_effect` function using shap Explainer(). Parameters ---------- cme_models: function const_marginal_effect function. X: (m, d_x) matrix Features for each sample. Should be in the same shape of fitted X in final stage. d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.
econml/_shap.py
_shap_explain_cme
Jimmy-INL/EconML
1
python
def _shap_explain_cme(cme_model, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `const_marginal_effect` function using shap Explainer().\n\n Parameters\n ----------\n cme_models: function\n const_marginal_effect function.\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n\n ' (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for i in range(dy): def cmd_func(X): return cme_model(X).reshape((- 1), dy, dt)[(:, i, :)] explainer = shap.Explainer(cmd_func, background, feature_names=feature_names) shap_out = explainer(X) if (dt > 1): for j in range(dt): base_values = shap_out.base_values[(..., j)] values = shap_out.values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., j)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[i]][treatment_names[j]] = shap_out_new else: base_values = shap_out.base_values[(..., 0)] shap_out_new = shap.Explanation(shap_out.values, base_values=base_values, data=shap_out.data, main_effects=shap_out.main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[i]][treatment_names[0]] = shap_out_new return shap_outs
def _shap_explain_cme(cme_model, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `const_marginal_effect` function using shap Explainer().\n\n Parameters\n ----------\n cme_models: function\n const_marginal_effect function.\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n\n ' (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for i in range(dy): def cmd_func(X): return cme_model(X).reshape((- 1), dy, dt)[(:, i, :)] explainer = shap.Explainer(cmd_func, background, feature_names=feature_names) shap_out = explainer(X) if (dt > 1): for j in range(dt): base_values = shap_out.base_values[(..., j)] values = shap_out.values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., j)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[i]][treatment_names[j]] = shap_out_new else: base_values = shap_out.base_values[(..., 0)] shap_out_new = shap.Explanation(shap_out.values, base_values=base_values, data=shap_out.data, main_effects=shap_out.main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[i]][treatment_names[0]] = shap_out_new return shap_outs<|docstring|>Method to explain `const_marginal_effect` function using shap Explainer(). Parameters ---------- cme_models: function const_marginal_effect function. X: (m, d_x) matrix Features for each sample. Should be in the same shape of fitted X in final stage. d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.<|endoftext|>
d233e0e7bbb4657faef26b70439dcd5476ea3b63f7b0d16279191cc6d3b72cec
def _shap_explain_model_cate(cme_model, models, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `model_cate` using shap Explainer(), will instead explain `const_marignal_effect`\n if `model_cate` can\'t be parsed. Models should be a list of length d_t. Each element in the list of\n models represents the const_marginal_effect associated with each treatments and for all outcomes, i.e.\n the outcome of the predict method of each model should be of length d_y.\n\n Parameters\n ----------\n cme_models: function\n const_marginal_effect function.\n models: a single estimator or a list of estimators with one estimator per treatment\n models for the model\'s final stage model.\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario.\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t_, d_y_) = (d_t, d_y) (feature_names_, treatment_names_) = (feature_names, treatment_names) (output_names_, input_names_) = (output_names, input_names) (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) if (not isinstance(models, list)): models = [models] assert (len(models) == dt), "Number of final stage models don't equals to number of treatments!" bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for i in range(dt): try: explainer = shap.Explainer(models[i], background, feature_names=feature_names) except Exception as e: print("Final model can't be parsed, explain const_marginal_effect() instead!", repr(e)) return _shap_explain_cme(cme_model, X, d_t_, d_y_, feature_names=feature_names_, treatment_names=treatment_names_, output_names=output_names_, input_names=input_names_, background_samples=background_samples) shap_out = explainer(X) if (dy > 1): for j in range(dy): base_values = shap_out.base_values[(..., j)] values = shap_out.values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., j)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: shap_outs[output_names[0]][treatment_names[i]] = shap_out return shap_outs
Method to explain `model_cate` using shap Explainer(), will instead explain `const_marignal_effect` if `model_cate` can't be parsed. Models should be a list of length d_t. Each element in the list of models represents the const_marginal_effect associated with each treatments and for all outcomes, i.e. the outcome of the predict method of each model should be of length d_y. Parameters ---------- cme_models: function const_marginal_effect function. models: a single estimator or a list of estimators with one estimator per treatment models for the model's final stage model. X: (m, d_x) matrix Features for each sample. Should be in the same shape of fitted X in final stage. d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario. d_y: tuple of int Tuple of number of outcome. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.
econml/_shap.py
_shap_explain_model_cate
Jimmy-INL/EconML
1
python
def _shap_explain_model_cate(cme_model, models, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `model_cate` using shap Explainer(), will instead explain `const_marignal_effect`\n if `model_cate` can\'t be parsed. Models should be a list of length d_t. Each element in the list of\n models represents the const_marginal_effect associated with each treatments and for all outcomes, i.e.\n the outcome of the predict method of each model should be of length d_y.\n\n Parameters\n ----------\n cme_models: function\n const_marginal_effect function.\n models: a single estimator or a list of estimators with one estimator per treatment\n models for the model\'s final stage model.\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario.\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t_, d_y_) = (d_t, d_y) (feature_names_, treatment_names_) = (feature_names, treatment_names) (output_names_, input_names_) = (output_names, input_names) (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) if (not isinstance(models, list)): models = [models] assert (len(models) == dt), "Number of final stage models don't equals to number of treatments!" bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for i in range(dt): try: explainer = shap.Explainer(models[i], background, feature_names=feature_names) except Exception as e: print("Final model can't be parsed, explain const_marginal_effect() instead!", repr(e)) return _shap_explain_cme(cme_model, X, d_t_, d_y_, feature_names=feature_names_, treatment_names=treatment_names_, output_names=output_names_, input_names=input_names_, background_samples=background_samples) shap_out = explainer(X) if (dy > 1): for j in range(dy): base_values = shap_out.base_values[(..., j)] values = shap_out.values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., j)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: shap_outs[output_names[0]][treatment_names[i]] = shap_out return shap_outs
def _shap_explain_model_cate(cme_model, models, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `model_cate` using shap Explainer(), will instead explain `const_marignal_effect`\n if `model_cate` can\'t be parsed. Models should be a list of length d_t. Each element in the list of\n models represents the const_marginal_effect associated with each treatments and for all outcomes, i.e.\n the outcome of the predict method of each model should be of length d_y.\n\n Parameters\n ----------\n cme_models: function\n const_marginal_effect function.\n models: a single estimator or a list of estimators with one estimator per treatment\n models for the model\'s final stage model.\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario.\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t_, d_y_) = (d_t, d_y) (feature_names_, treatment_names_) = (feature_names, treatment_names) (output_names_, input_names_) = (output_names, input_names) (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) if (not isinstance(models, list)): models = [models] assert (len(models) == dt), "Number of final stage models don't equals to number of treatments!" bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for i in range(dt): try: explainer = shap.Explainer(models[i], background, feature_names=feature_names) except Exception as e: print("Final model can't be parsed, explain const_marginal_effect() instead!", repr(e)) return _shap_explain_cme(cme_model, X, d_t_, d_y_, feature_names=feature_names_, treatment_names=treatment_names_, output_names=output_names_, input_names=input_names_, background_samples=background_samples) shap_out = explainer(X) if (dy > 1): for j in range(dy): base_values = shap_out.base_values[(..., j)] values = shap_out.values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., j)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: shap_outs[output_names[0]][treatment_names[i]] = shap_out return shap_outs<|docstring|>Method to explain `model_cate` using shap Explainer(), will instead explain `const_marignal_effect` if `model_cate` can't be parsed. Models should be a list of length d_t. Each element in the list of models represents the const_marginal_effect associated with each treatments and for all outcomes, i.e. the outcome of the predict method of each model should be of length d_y. Parameters ---------- cme_models: function const_marginal_effect function. models: a single estimator or a list of estimators with one estimator per treatment models for the model's final stage model. X: (m, d_x) matrix Features for each sample. Should be in the same shape of fitted X in final stage. d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario. d_y: tuple of int Tuple of number of outcome. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.<|endoftext|>
a0cdb686834b5715180689f2a0797102698e070e4f3fe75aae1407f48167078f
def _shap_explain_joint_linear_model_cate(model_final, X, d_t, d_y, fit_cate_intercept, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `model_cate` of parametric final stage that was fitted on the cross product of\n `featurizer(X)` and T.\n\n Parameters\n ----------\n model_final: a single estimator\n the model\'s final stage model.\n X: matrix\n Featurized X\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n fit_cate_intercept: bool\n Whether the first entry of the coefficient of the joint linear model associated with\n each treatment, is an intercept.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t, d_y, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) (X, T) = broadcast_unit_treatments(X, d_t) X = cross_product(X, T) d_x = X.shape[1] ind_x = np.arange(d_x).reshape(d_t, (- 1)) if fit_cate_intercept: ind_x = ind_x[(:, 1:)] shap_outs = defaultdict(dict) for i in range(d_t): X_sub = X[(T[(:, i)] == 1)] bg_samples = (X_sub.shape[0] if (background_samples is None) else min(background_samples, X_sub.shape[0])) background = shap.maskers.Independent(X_sub, max_samples=bg_samples) explainer = shap.Explainer(model_final, background) shap_out = explainer(X_sub) data = shap_out.data[(:, ind_x[i])] if (d_y > 1): for j in range(d_y): base_values = shap_out.base_values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., ind_x[i], j)]) values = shap_out.values[(..., ind_x[i], j)] shap_out_new = shap.Explanation(values, base_values=base_values, data=data, main_effects=main_effects, feature_names=feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: values = shap_out.values[(..., ind_x[i])] main_effects = shap_out.main_effects[(..., ind_x[i], 0)] shap_out_new = shap.Explanation(values, base_values=shap_out.base_values, data=data, main_effects=main_effects, feature_names=feature_names) shap_outs[output_names[0]][treatment_names[i]] = shap_out_new return shap_outs
Method to explain `model_cate` of parametric final stage that was fitted on the cross product of `featurizer(X)` and T. Parameters ---------- model_final: a single estimator the model's final stage model. X: matrix Featurized X d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. fit_cate_intercept: bool Whether the first entry of the coefficient of the joint linear model associated with each treatment, is an intercept. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.
econml/_shap.py
_shap_explain_joint_linear_model_cate
Jimmy-INL/EconML
1
python
def _shap_explain_joint_linear_model_cate(model_final, X, d_t, d_y, fit_cate_intercept, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `model_cate` of parametric final stage that was fitted on the cross product of\n `featurizer(X)` and T.\n\n Parameters\n ----------\n model_final: a single estimator\n the model\'s final stage model.\n X: matrix\n Featurized X\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n fit_cate_intercept: bool\n Whether the first entry of the coefficient of the joint linear model associated with\n each treatment, is an intercept.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t, d_y, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) (X, T) = broadcast_unit_treatments(X, d_t) X = cross_product(X, T) d_x = X.shape[1] ind_x = np.arange(d_x).reshape(d_t, (- 1)) if fit_cate_intercept: ind_x = ind_x[(:, 1:)] shap_outs = defaultdict(dict) for i in range(d_t): X_sub = X[(T[(:, i)] == 1)] bg_samples = (X_sub.shape[0] if (background_samples is None) else min(background_samples, X_sub.shape[0])) background = shap.maskers.Independent(X_sub, max_samples=bg_samples) explainer = shap.Explainer(model_final, background) shap_out = explainer(X_sub) data = shap_out.data[(:, ind_x[i])] if (d_y > 1): for j in range(d_y): base_values = shap_out.base_values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., ind_x[i], j)]) values = shap_out.values[(..., ind_x[i], j)] shap_out_new = shap.Explanation(values, base_values=base_values, data=data, main_effects=main_effects, feature_names=feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: values = shap_out.values[(..., ind_x[i])] main_effects = shap_out.main_effects[(..., ind_x[i], 0)] shap_out_new = shap.Explanation(values, base_values=shap_out.base_values, data=data, main_effects=main_effects, feature_names=feature_names) shap_outs[output_names[0]][treatment_names[i]] = shap_out_new return shap_outs
def _shap_explain_joint_linear_model_cate(model_final, X, d_t, d_y, fit_cate_intercept, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain `model_cate` of parametric final stage that was fitted on the cross product of\n `featurizer(X)` and T.\n\n Parameters\n ----------\n model_final: a single estimator\n the model\'s final stage model.\n X: matrix\n Featurized X\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n fit_cate_intercept: bool\n Whether the first entry of the coefficient of the joint linear model associated with\n each treatment, is an intercept.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t, d_y, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) (X, T) = broadcast_unit_treatments(X, d_t) X = cross_product(X, T) d_x = X.shape[1] ind_x = np.arange(d_x).reshape(d_t, (- 1)) if fit_cate_intercept: ind_x = ind_x[(:, 1:)] shap_outs = defaultdict(dict) for i in range(d_t): X_sub = X[(T[(:, i)] == 1)] bg_samples = (X_sub.shape[0] if (background_samples is None) else min(background_samples, X_sub.shape[0])) background = shap.maskers.Independent(X_sub, max_samples=bg_samples) explainer = shap.Explainer(model_final, background) shap_out = explainer(X_sub) data = shap_out.data[(:, ind_x[i])] if (d_y > 1): for j in range(d_y): base_values = shap_out.base_values[(..., j)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., ind_x[i], j)]) values = shap_out.values[(..., ind_x[i], j)] shap_out_new = shap.Explanation(values, base_values=base_values, data=data, main_effects=main_effects, feature_names=feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: values = shap_out.values[(..., ind_x[i])] main_effects = shap_out.main_effects[(..., ind_x[i], 0)] shap_out_new = shap.Explanation(values, base_values=shap_out.base_values, data=data, main_effects=main_effects, feature_names=feature_names) shap_outs[output_names[0]][treatment_names[i]] = shap_out_new return shap_outs<|docstring|>Method to explain `model_cate` of parametric final stage that was fitted on the cross product of `featurizer(X)` and T. Parameters ---------- model_final: a single estimator the model's final stage model. X: matrix Featurized X d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. fit_cate_intercept: bool Whether the first entry of the coefficient of the joint linear model associated with each treatment, is an intercept. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.<|endoftext|>
9af0b69b51b0997fe84330c8e7e125b74c65c1bfcbc69af2476317ff411e3894
def _shap_explain_multitask_model_cate(cme_model, multitask_model_cate, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain a final cate model that is represented in a multi-task manner, i.e. the prediction\n of the method is of dimension equal to the number of treatments and represents the const_marginal_effect\n vector for all treatments.\n\n Parameters\n ----------\n cme_model: function\n const_marginal_effect function.\n multitask_model_cate: a single estimator or a list of estimators of length d_y if d_y > 1\n the model\'s final stage model whose predict represents the const_marginal_effect for\n all treatments (or list of models, one for each outcome)\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t_, d_y_) = (d_t, d_y) (feature_names_, treatment_names_) = (feature_names, treatment_names) (output_names_, input_names_) = (output_names, input_names) (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) if ((dy == 1) and (not isinstance(multitask_model_cate, list))): multitask_model_cate = [multitask_model_cate] bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for j in range(dy): try: explainer = shap.Explainer(multitask_model_cate[j], background, feature_names=feature_names) except Exception as e: print("Final model can't be parsed, explain const_marginal_effect() instead!", repr(e)) return _shap_explain_cme(cme_model, X, d_t_, d_y_, feature_names=feature_names_, treatment_names=treatment_names_, output_names=output_names_, input_names=input_names_, background_samples=background_samples) shap_out = explainer(X) if (dt > 1): for i in range(dt): base_values = shap_out.base_values[(..., i)] values = shap_out.values[(..., i)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., i)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: shap_outs[output_names[j]][treatment_names[0]] = shap_out return shap_outs
Method to explain a final cate model that is represented in a multi-task manner, i.e. the prediction of the method is of dimension equal to the number of treatments and represents the const_marginal_effect vector for all treatments. Parameters ---------- cme_model: function const_marginal_effect function. multitask_model_cate: a single estimator or a list of estimators of length d_y if d_y > 1 the model's final stage model whose predict represents the const_marginal_effect for all treatments (or list of models, one for each outcome) X: (m, d_x) matrix Features for each sample. Should be in the same shape of fitted X in final stage. d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.
econml/_shap.py
_shap_explain_multitask_model_cate
Jimmy-INL/EconML
1
python
def _shap_explain_multitask_model_cate(cme_model, multitask_model_cate, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain a final cate model that is represented in a multi-task manner, i.e. the prediction\n of the method is of dimension equal to the number of treatments and represents the const_marginal_effect\n vector for all treatments.\n\n Parameters\n ----------\n cme_model: function\n const_marginal_effect function.\n multitask_model_cate: a single estimator or a list of estimators of length d_y if d_y > 1\n the model\'s final stage model whose predict represents the const_marginal_effect for\n all treatments (or list of models, one for each outcome)\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t_, d_y_) = (d_t, d_y) (feature_names_, treatment_names_) = (feature_names, treatment_names) (output_names_, input_names_) = (output_names, input_names) (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) if ((dy == 1) and (not isinstance(multitask_model_cate, list))): multitask_model_cate = [multitask_model_cate] bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for j in range(dy): try: explainer = shap.Explainer(multitask_model_cate[j], background, feature_names=feature_names) except Exception as e: print("Final model can't be parsed, explain const_marginal_effect() instead!", repr(e)) return _shap_explain_cme(cme_model, X, d_t_, d_y_, feature_names=feature_names_, treatment_names=treatment_names_, output_names=output_names_, input_names=input_names_, background_samples=background_samples) shap_out = explainer(X) if (dt > 1): for i in range(dt): base_values = shap_out.base_values[(..., i)] values = shap_out.values[(..., i)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., i)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: shap_outs[output_names[j]][treatment_names[0]] = shap_out return shap_outs
def _shap_explain_multitask_model_cate(cme_model, multitask_model_cate, X, d_t, d_y, feature_names=None, treatment_names=None, output_names=None, input_names=None, background_samples=100): '\n Method to explain a final cate model that is represented in a multi-task manner, i.e. the prediction\n of the method is of dimension equal to the number of treatments and represents the const_marginal_effect\n vector for all treatments.\n\n Parameters\n ----------\n cme_model: function\n const_marginal_effect function.\n multitask_model_cate: a single estimator or a list of estimators of length d_y if d_y > 1\n the model\'s final stage model whose predict represents the const_marginal_effect for\n all treatments (or list of models, one for each outcome)\n X: (m, d_x) matrix\n Features for each sample. Should be in the same shape of fitted X in final stage.\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n feature_names: optional None or list of strings of length X.shape[1] (Default=None)\n The names of input features.\n treatment_names: optional None or list (Default=None)\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: optional None or list (Default=None)\n The name of the outcome.\n input_names: dictionary or None\n The parsed names of variables at fit input time of cate estimators\n background_samples: int or None, (Default=100)\n How many samples to use to compute the baseline effect. If None then all samples are used.\n\n Returns\n -------\n shap_outs: nested dictionary of Explanation object\n A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and\n each treatment name (e.g. "T0" when `treatment_names=None`) as key\n and the shap_values explanation object as value.\n ' (d_t_, d_y_) = (d_t, d_y) (feature_names_, treatment_names_) = (feature_names, treatment_names) (output_names_, input_names_) = (output_names, input_names) (dt, dy, treatment_names, output_names, feature_names) = _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names) if ((dy == 1) and (not isinstance(multitask_model_cate, list))): multitask_model_cate = [multitask_model_cate] bg_samples = (X.shape[0] if (background_samples is None) else min(background_samples, X.shape[0])) background = shap.maskers.Independent(X, max_samples=bg_samples) shap_outs = defaultdict(dict) for j in range(dy): try: explainer = shap.Explainer(multitask_model_cate[j], background, feature_names=feature_names) except Exception as e: print("Final model can't be parsed, explain const_marginal_effect() instead!", repr(e)) return _shap_explain_cme(cme_model, X, d_t_, d_y_, feature_names=feature_names_, treatment_names=treatment_names_, output_names=output_names_, input_names=input_names_, background_samples=background_samples) shap_out = explainer(X) if (dt > 1): for i in range(dt): base_values = shap_out.base_values[(..., i)] values = shap_out.values[(..., i)] main_effects = (None if (shap_out.main_effects is None) else shap_out.main_effects[(..., i)]) shap_out_new = shap.Explanation(values, base_values=base_values, data=shap_out.data, main_effects=main_effects, feature_names=shap_out.feature_names) shap_outs[output_names[j]][treatment_names[i]] = shap_out_new else: shap_outs[output_names[j]][treatment_names[0]] = shap_out return shap_outs<|docstring|>Method to explain a final cate model that is represented in a multi-task manner, i.e. the prediction of the method is of dimension equal to the number of treatments and represents the const_marginal_effect vector for all treatments. Parameters ---------- cme_model: function const_marginal_effect function. multitask_model_cate: a single estimator or a list of estimators of length d_y if d_y > 1 the model's final stage model whose predict represents the const_marginal_effect for all treatments (or list of models, one for each outcome) X: (m, d_x) matrix Features for each sample. Should be in the same shape of fitted X in final stage. d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. feature_names: optional None or list of strings of length X.shape[1] (Default=None) The names of input features. treatment_names: optional None or list (Default=None) The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: optional None or list (Default=None) The name of the outcome. input_names: dictionary or None The parsed names of variables at fit input time of cate estimators background_samples: int or None, (Default=100) How many samples to use to compute the baseline effect. If None then all samples are used. Returns ------- shap_outs: nested dictionary of Explanation object A nested dictionary by using each output name (e.g. "Y0" when `output_names=None`) and each treatment name (e.g. "T0" when `treatment_names=None`) as key and the shap_values explanation object as value.<|endoftext|>
c542a604de95dca86f44f37bc27478b0b85255a9e1e3b18cdb2ca516b28e8b7c
def _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names): '\n Helper function to get treatment and output names\n\n Parameters\n ----------\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n treatment_names: None or list\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: None or list\n The name of the outcome.\n feature_names: None or list\n The user provided names of the features\n input_names: dicitionary\n The names of the features, outputs and treatments parsed from the fit input at fit time.\n\n Returns\n -------\n d_t: int\n d_y: int\n treament_names: List\n output_names: List\n feature_names: List or None\n ' d_t = (d_t[0] if d_t else 1) d_y = (d_y[0] if d_y else 1) if (treatment_names is None): if ((input_names is None) or (input_names['treatment_names'] is None)): treatment_names = [f'T{i}' for i in range(d_t)] else: treatment_names = input_names['treatment_names'] if (output_names is None): if ((input_names is None) or (input_names['output_names'] is None)): output_names = [f'Y{i}' for i in range(d_y)] else: output_names = input_names['output_names'] if ((feature_names is None) and (input_names is not None)): feature_names = input_names['feature_names'] return (d_t, d_y, treatment_names, output_names, feature_names)
Helper function to get treatment and output names Parameters ---------- d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. treatment_names: None or list The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: None or list The name of the outcome. feature_names: None or list The user provided names of the features input_names: dicitionary The names of the features, outputs and treatments parsed from the fit input at fit time. Returns ------- d_t: int d_y: int treament_names: List output_names: List feature_names: List or None
econml/_shap.py
_define_names
Jimmy-INL/EconML
1
python
def _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names): '\n Helper function to get treatment and output names\n\n Parameters\n ----------\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n treatment_names: None or list\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: None or list\n The name of the outcome.\n feature_names: None or list\n The user provided names of the features\n input_names: dicitionary\n The names of the features, outputs and treatments parsed from the fit input at fit time.\n\n Returns\n -------\n d_t: int\n d_y: int\n treament_names: List\n output_names: List\n feature_names: List or None\n ' d_t = (d_t[0] if d_t else 1) d_y = (d_y[0] if d_y else 1) if (treatment_names is None): if ((input_names is None) or (input_names['treatment_names'] is None)): treatment_names = [f'T{i}' for i in range(d_t)] else: treatment_names = input_names['treatment_names'] if (output_names is None): if ((input_names is None) or (input_names['output_names'] is None)): output_names = [f'Y{i}' for i in range(d_y)] else: output_names = input_names['output_names'] if ((feature_names is None) and (input_names is not None)): feature_names = input_names['feature_names'] return (d_t, d_y, treatment_names, output_names, feature_names)
def _define_names(d_t, d_y, treatment_names, output_names, feature_names, input_names): '\n Helper function to get treatment and output names\n\n Parameters\n ----------\n d_t: tuple of int\n Tuple of number of treatment (exclude control in discrete treatment scenario).\n d_y: tuple of int\n Tuple of number of outcome.\n treatment_names: None or list\n The name of treatment. In discrete treatment scenario, the name should not include the name of\n the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller)\n output_names: None or list\n The name of the outcome.\n feature_names: None or list\n The user provided names of the features\n input_names: dicitionary\n The names of the features, outputs and treatments parsed from the fit input at fit time.\n\n Returns\n -------\n d_t: int\n d_y: int\n treament_names: List\n output_names: List\n feature_names: List or None\n ' d_t = (d_t[0] if d_t else 1) d_y = (d_y[0] if d_y else 1) if (treatment_names is None): if ((input_names is None) or (input_names['treatment_names'] is None)): treatment_names = [f'T{i}' for i in range(d_t)] else: treatment_names = input_names['treatment_names'] if (output_names is None): if ((input_names is None) or (input_names['output_names'] is None)): output_names = [f'Y{i}' for i in range(d_y)] else: output_names = input_names['output_names'] if ((feature_names is None) and (input_names is not None)): feature_names = input_names['feature_names'] return (d_t, d_y, treatment_names, output_names, feature_names)<|docstring|>Helper function to get treatment and output names Parameters ---------- d_t: tuple of int Tuple of number of treatment (exclude control in discrete treatment scenario). d_y: tuple of int Tuple of number of outcome. treatment_names: None or list The name of treatment. In discrete treatment scenario, the name should not include the name of the baseline treatment (i.e. the control treatment, which by default is the alphabetically smaller) output_names: None or list The name of the outcome. feature_names: None or list The user provided names of the features input_names: dicitionary The names of the features, outputs and treatments parsed from the fit input at fit time. Returns ------- d_t: int d_y: int treament_names: List output_names: List feature_names: List or None<|endoftext|>
f46ded5764798c7b9eb465312d9d1a6446b03a6c01af17c0fc3b7ae8c7df19f9
def get_shape(feature): 'Get shape geometry from feature\n\n Parameters\n ----------\n feature : dict\n Feature as read from Fiona\n\n Returns\n -------\n shapely.geometry.BaseGeometry\n\n ' geom = feature['geometry'] try: return shape(geom) except Exception as err: _logger.warn('Failed to get shape from feature %s: %s', feature, err) return None
Get shape geometry from feature Parameters ---------- feature : dict Feature as read from Fiona Returns ------- shapely.geometry.BaseGeometry
src/satproc/chips.py
get_shape
dymaxionlabs/satproc
22
python
def get_shape(feature): 'Get shape geometry from feature\n\n Parameters\n ----------\n feature : dict\n Feature as read from Fiona\n\n Returns\n -------\n shapely.geometry.BaseGeometry\n\n ' geom = feature['geometry'] try: return shape(geom) except Exception as err: _logger.warn('Failed to get shape from feature %s: %s', feature, err) return None
def get_shape(feature): 'Get shape geometry from feature\n\n Parameters\n ----------\n feature : dict\n Feature as read from Fiona\n\n Returns\n -------\n shapely.geometry.BaseGeometry\n\n ' geom = feature['geometry'] try: return shape(geom) except Exception as err: _logger.warn('Failed to get shape from feature %s: %s', feature, err) return None<|docstring|>Get shape geometry from feature Parameters ---------- feature : dict Feature as read from Fiona Returns ------- shapely.geometry.BaseGeometry<|endoftext|>
650e77a351334706a34b7a7c5846a6fd794884efed28e1de4b1e384dd45db502
@registry.register_hparams def svg_decoder(): 'Basic hparams for SVG decoder.' hparams = common_hparams.basic_params1() hparams.daisy_chain_variables = False hparams.batch_size = 128 hparams.hidden_size = 1024 hparams.num_hidden_layers = 2 hparams.initializer = 'uniform_unit_scaling' hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.num_hidden_layers = 4 hparams.force_full_predict = True hparams.dropout = 0.5 hparams.learning_rate_warmup_steps = 100000 hparams.add_hparam('vocab_size', None) hparams.add_hparam('bottleneck_bits', 32) hparams.add_hparam('kl_beta', 300) hparams.add_hparam('free_bits_div', 4) hparams.add_hparam('soft_k', 10) hparams.add_hparam('mdn_k', 1) hparams.add_hparam('layer_norm', False) hparams.add_hparam('ff_dropout', True) hparams.add_hparam('rec_dropout', 0.3) hparams.add_hparam('twice_decoder', False) hparams.add_hparam('sg_bottleneck', True) hparams.add_hparam('condition_on_sln', False) hparams.add_hparam('use_cls', True) hparams.add_hparam('num_mixture', 50) hparams.add_hparam('mix_temperature', 0.0001) hparams.add_hparam('gauss_temperature', 0.0001) hparams.add_hparam('dont_reduce_loss', False) hparams.add_hparam('vae_ckpt_dir', '') hparams.add_hparam('vae_hparams', '') hparams.add_hparam('vae_data_dir', '') hparams.add_hparam('vae_hparam_set', 'image_vae') hparams.add_hparam('vae_problem', 'glyph_azzn_problem') hparams.add_hparam('num_categories', 62) hparams.add_hparam('absolute', False) hparams.add_hparam('just_render', False) hparams.add_hparam('plus_render', False) hparams.bottom = {'inputs': svg_decoder_loss.real_svg_bottom, 'targets': svg_decoder_loss.real_svg_bottom} hparams.top = {'targets': svg_decoder_loss.real_svg_top} hparams.loss = {'targets': svg_decoder_loss.real_svg_loss} return hparams
Basic hparams for SVG decoder.
magenta/models/svg_vae/svg_decoder.py
svg_decoder
sandutsar/magenta
16,143
python
@registry.register_hparams def svg_decoder(): hparams = common_hparams.basic_params1() hparams.daisy_chain_variables = False hparams.batch_size = 128 hparams.hidden_size = 1024 hparams.num_hidden_layers = 2 hparams.initializer = 'uniform_unit_scaling' hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.num_hidden_layers = 4 hparams.force_full_predict = True hparams.dropout = 0.5 hparams.learning_rate_warmup_steps = 100000 hparams.add_hparam('vocab_size', None) hparams.add_hparam('bottleneck_bits', 32) hparams.add_hparam('kl_beta', 300) hparams.add_hparam('free_bits_div', 4) hparams.add_hparam('soft_k', 10) hparams.add_hparam('mdn_k', 1) hparams.add_hparam('layer_norm', False) hparams.add_hparam('ff_dropout', True) hparams.add_hparam('rec_dropout', 0.3) hparams.add_hparam('twice_decoder', False) hparams.add_hparam('sg_bottleneck', True) hparams.add_hparam('condition_on_sln', False) hparams.add_hparam('use_cls', True) hparams.add_hparam('num_mixture', 50) hparams.add_hparam('mix_temperature', 0.0001) hparams.add_hparam('gauss_temperature', 0.0001) hparams.add_hparam('dont_reduce_loss', False) hparams.add_hparam('vae_ckpt_dir', ) hparams.add_hparam('vae_hparams', ) hparams.add_hparam('vae_data_dir', ) hparams.add_hparam('vae_hparam_set', 'image_vae') hparams.add_hparam('vae_problem', 'glyph_azzn_problem') hparams.add_hparam('num_categories', 62) hparams.add_hparam('absolute', False) hparams.add_hparam('just_render', False) hparams.add_hparam('plus_render', False) hparams.bottom = {'inputs': svg_decoder_loss.real_svg_bottom, 'targets': svg_decoder_loss.real_svg_bottom} hparams.top = {'targets': svg_decoder_loss.real_svg_top} hparams.loss = {'targets': svg_decoder_loss.real_svg_loss} return hparams
@registry.register_hparams def svg_decoder(): hparams = common_hparams.basic_params1() hparams.daisy_chain_variables = False hparams.batch_size = 128 hparams.hidden_size = 1024 hparams.num_hidden_layers = 2 hparams.initializer = 'uniform_unit_scaling' hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.num_hidden_layers = 4 hparams.force_full_predict = True hparams.dropout = 0.5 hparams.learning_rate_warmup_steps = 100000 hparams.add_hparam('vocab_size', None) hparams.add_hparam('bottleneck_bits', 32) hparams.add_hparam('kl_beta', 300) hparams.add_hparam('free_bits_div', 4) hparams.add_hparam('soft_k', 10) hparams.add_hparam('mdn_k', 1) hparams.add_hparam('layer_norm', False) hparams.add_hparam('ff_dropout', True) hparams.add_hparam('rec_dropout', 0.3) hparams.add_hparam('twice_decoder', False) hparams.add_hparam('sg_bottleneck', True) hparams.add_hparam('condition_on_sln', False) hparams.add_hparam('use_cls', True) hparams.add_hparam('num_mixture', 50) hparams.add_hparam('mix_temperature', 0.0001) hparams.add_hparam('gauss_temperature', 0.0001) hparams.add_hparam('dont_reduce_loss', False) hparams.add_hparam('vae_ckpt_dir', ) hparams.add_hparam('vae_hparams', ) hparams.add_hparam('vae_data_dir', ) hparams.add_hparam('vae_hparam_set', 'image_vae') hparams.add_hparam('vae_problem', 'glyph_azzn_problem') hparams.add_hparam('num_categories', 62) hparams.add_hparam('absolute', False) hparams.add_hparam('just_render', False) hparams.add_hparam('plus_render', False) hparams.bottom = {'inputs': svg_decoder_loss.real_svg_bottom, 'targets': svg_decoder_loss.real_svg_bottom} hparams.top = {'targets': svg_decoder_loss.real_svg_top} hparams.loss = {'targets': svg_decoder_loss.real_svg_loss} return hparams<|docstring|>Basic hparams for SVG decoder.<|endoftext|>
5cb018427674cd73cb76e9ead95d01d91e60525a4a2d98fbfa0d71beeb1f1f99
def infer_step(logits_so_far, current_hidden): 'Inference step of LSTM while loop.' current_hidden = tuple((rnn.LSTMStateTuple(c=s[0], h=s[1]) for s in current_hidden)) tm = self._problem_hparams.modality['targets'] reset_scope = tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, ''), reuse=tf.AUTO_REUSE, auxiliary_name_scope=False) top_scope = tf.variable_scope('svg_decoder/{}_modality'.format(tm), reuse=tf.AUTO_REUSE) with reset_scope, top_scope: samples_so_far = self.hparams.top['targets'](logits_so_far, None, self.hparams, self.problem_hparams.vocab_size) samples_so_far = tf.concat([zero_pad, samples_so_far], axis=1) shifted_targets = common_layers.flatten4d3d(samples_so_far) shifted_targets = shifted_targets[(:, (- 1):, :)] sln_offset = 0 if hparams.condition_on_sln: sln_offset = 51 pre_tile_y = tf.reshape(bottleneck, [common_layers.shape_list(bottleneck)[0], 1, ((hparams.bottleneck_bits + hparams.num_categories) + sln_offset)]) overlay_x = tf.tile(pre_tile_y, [1, common_layers.shape_list(shifted_targets)[1], 1]) inputs = tf.concat([shifted_targets, overlay_x], (- 1)) seq_len_batch = tf.ones([common_layers.shape_list(inputs)[0]]) with tf.variable_scope('pre_decoder', reuse=tf.AUTO_REUSE): inputs = tf.layers.dense(inputs, hparams.hidden_size, name='bottom') inputs = tf.nn.tanh(inputs) with tf.variable_scope('lstm_decoder', reuse=tf.AUTO_REUSE): (next_step, next_state) = tf.nn.dynamic_rnn(layers, inputs, seq_len_batch, initial_state=current_hidden, dtype=tf.float32, time_major=False) next_step = tf.expand_dims(next_step, [1]) logits_so_far = tf.concat([logits_so_far, next_step], 1) next_state = tuple(((s.c, s.h) for s in next_state)) return (logits_so_far, next_state)
Inference step of LSTM while loop.
magenta/models/svg_vae/svg_decoder.py
infer_step
sandutsar/magenta
16,143
python
def infer_step(logits_so_far, current_hidden): current_hidden = tuple((rnn.LSTMStateTuple(c=s[0], h=s[1]) for s in current_hidden)) tm = self._problem_hparams.modality['targets'] reset_scope = tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, ), reuse=tf.AUTO_REUSE, auxiliary_name_scope=False) top_scope = tf.variable_scope('svg_decoder/{}_modality'.format(tm), reuse=tf.AUTO_REUSE) with reset_scope, top_scope: samples_so_far = self.hparams.top['targets'](logits_so_far, None, self.hparams, self.problem_hparams.vocab_size) samples_so_far = tf.concat([zero_pad, samples_so_far], axis=1) shifted_targets = common_layers.flatten4d3d(samples_so_far) shifted_targets = shifted_targets[(:, (- 1):, :)] sln_offset = 0 if hparams.condition_on_sln: sln_offset = 51 pre_tile_y = tf.reshape(bottleneck, [common_layers.shape_list(bottleneck)[0], 1, ((hparams.bottleneck_bits + hparams.num_categories) + sln_offset)]) overlay_x = tf.tile(pre_tile_y, [1, common_layers.shape_list(shifted_targets)[1], 1]) inputs = tf.concat([shifted_targets, overlay_x], (- 1)) seq_len_batch = tf.ones([common_layers.shape_list(inputs)[0]]) with tf.variable_scope('pre_decoder', reuse=tf.AUTO_REUSE): inputs = tf.layers.dense(inputs, hparams.hidden_size, name='bottom') inputs = tf.nn.tanh(inputs) with tf.variable_scope('lstm_decoder', reuse=tf.AUTO_REUSE): (next_step, next_state) = tf.nn.dynamic_rnn(layers, inputs, seq_len_batch, initial_state=current_hidden, dtype=tf.float32, time_major=False) next_step = tf.expand_dims(next_step, [1]) logits_so_far = tf.concat([logits_so_far, next_step], 1) next_state = tuple(((s.c, s.h) for s in next_state)) return (logits_so_far, next_state)
def infer_step(logits_so_far, current_hidden): current_hidden = tuple((rnn.LSTMStateTuple(c=s[0], h=s[1]) for s in current_hidden)) tm = self._problem_hparams.modality['targets'] reset_scope = tf.variable_scope(tf.VariableScope(tf.AUTO_REUSE, ), reuse=tf.AUTO_REUSE, auxiliary_name_scope=False) top_scope = tf.variable_scope('svg_decoder/{}_modality'.format(tm), reuse=tf.AUTO_REUSE) with reset_scope, top_scope: samples_so_far = self.hparams.top['targets'](logits_so_far, None, self.hparams, self.problem_hparams.vocab_size) samples_so_far = tf.concat([zero_pad, samples_so_far], axis=1) shifted_targets = common_layers.flatten4d3d(samples_so_far) shifted_targets = shifted_targets[(:, (- 1):, :)] sln_offset = 0 if hparams.condition_on_sln: sln_offset = 51 pre_tile_y = tf.reshape(bottleneck, [common_layers.shape_list(bottleneck)[0], 1, ((hparams.bottleneck_bits + hparams.num_categories) + sln_offset)]) overlay_x = tf.tile(pre_tile_y, [1, common_layers.shape_list(shifted_targets)[1], 1]) inputs = tf.concat([shifted_targets, overlay_x], (- 1)) seq_len_batch = tf.ones([common_layers.shape_list(inputs)[0]]) with tf.variable_scope('pre_decoder', reuse=tf.AUTO_REUSE): inputs = tf.layers.dense(inputs, hparams.hidden_size, name='bottom') inputs = tf.nn.tanh(inputs) with tf.variable_scope('lstm_decoder', reuse=tf.AUTO_REUSE): (next_step, next_state) = tf.nn.dynamic_rnn(layers, inputs, seq_len_batch, initial_state=current_hidden, dtype=tf.float32, time_major=False) next_step = tf.expand_dims(next_step, [1]) logits_so_far = tf.concat([logits_so_far, next_step], 1) next_state = tuple(((s.c, s.h) for s in next_state)) return (logits_so_far, next_state)<|docstring|>Inference step of LSTM while loop.<|endoftext|>
a078ab46f0b0641eebc142486964b218bde239cebf0d90f71bcb091cfbae5321
def isValidEncoding(encoding): 'Determine if a string is a supported encoding' return ((encoding is not None) and (type(encoding) == types.StringType) and (encoding.lower().strip() in encodings))
Determine if a string is a supported encoding
planet/vendor/html5lib/inputstream.py
isValidEncoding
moztw/planet.moztw.org
1
python
def isValidEncoding(encoding): return ((encoding is not None) and (type(encoding) == types.StringType) and (encoding.lower().strip() in encodings))
def isValidEncoding(encoding): return ((encoding is not None) and (type(encoding) == types.StringType) and (encoding.lower().strip() in encodings))<|docstring|>Determine if a string is a supported encoding<|endoftext|>
1a0d6398295203c47e9e68e193a3ddc22fc7771ab51d94d2e633994dd62f60e3
def __init__(self, source, encoding=None, parseMeta=True, chardet=True): 'Initialises the HTMLInputStream.\n\n HTMLInputStream(source, [encoding]) -> Normalized stream from source\n for use by the HTML5Lib.\n\n source can be either a file-object, local filename or a string.\n\n The optional encoding parameter must be a string that indicates\n the encoding. If specified, that encoding will be used,\n regardless of any BOM or later declaration (such as in a meta\n element)\n \n parseMeta - Look for a <meta> element containing encoding information\n\n ' self.newLines = [0] self.charEncoding = encoding self.rawStream = self.openStream(source) self.numBytesMeta = 512 self.numBytesChardet = 100 self.defaultEncoding = 'windows-1252' if ((self.charEncoding is None) or (not isValidEncoding(self.charEncoding))): self.charEncoding = self.detectEncoding(parseMeta, chardet) self.dataStream = codecs.getreader(self.charEncoding)(self.rawStream, 'replace') self.queue = [] self.errors = [] self.line = self.col = 0 self.lineLengths = [] self._lastChunkEndsWithCR = False
Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by the HTML5Lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) parseMeta - Look for a <meta> element containing encoding information
planet/vendor/html5lib/inputstream.py
__init__
moztw/planet.moztw.org
1
python
def __init__(self, source, encoding=None, parseMeta=True, chardet=True): 'Initialises the HTMLInputStream.\n\n HTMLInputStream(source, [encoding]) -> Normalized stream from source\n for use by the HTML5Lib.\n\n source can be either a file-object, local filename or a string.\n\n The optional encoding parameter must be a string that indicates\n the encoding. If specified, that encoding will be used,\n regardless of any BOM or later declaration (such as in a meta\n element)\n \n parseMeta - Look for a <meta> element containing encoding information\n\n ' self.newLines = [0] self.charEncoding = encoding self.rawStream = self.openStream(source) self.numBytesMeta = 512 self.numBytesChardet = 100 self.defaultEncoding = 'windows-1252' if ((self.charEncoding is None) or (not isValidEncoding(self.charEncoding))): self.charEncoding = self.detectEncoding(parseMeta, chardet) self.dataStream = codecs.getreader(self.charEncoding)(self.rawStream, 'replace') self.queue = [] self.errors = [] self.line = self.col = 0 self.lineLengths = [] self._lastChunkEndsWithCR = False
def __init__(self, source, encoding=None, parseMeta=True, chardet=True): 'Initialises the HTMLInputStream.\n\n HTMLInputStream(source, [encoding]) -> Normalized stream from source\n for use by the HTML5Lib.\n\n source can be either a file-object, local filename or a string.\n\n The optional encoding parameter must be a string that indicates\n the encoding. If specified, that encoding will be used,\n regardless of any BOM or later declaration (such as in a meta\n element)\n \n parseMeta - Look for a <meta> element containing encoding information\n\n ' self.newLines = [0] self.charEncoding = encoding self.rawStream = self.openStream(source) self.numBytesMeta = 512 self.numBytesChardet = 100 self.defaultEncoding = 'windows-1252' if ((self.charEncoding is None) or (not isValidEncoding(self.charEncoding))): self.charEncoding = self.detectEncoding(parseMeta, chardet) self.dataStream = codecs.getreader(self.charEncoding)(self.rawStream, 'replace') self.queue = [] self.errors = [] self.line = self.col = 0 self.lineLengths = [] self._lastChunkEndsWithCR = False<|docstring|>Initialises the HTMLInputStream. HTMLInputStream(source, [encoding]) -> Normalized stream from source for use by the HTML5Lib. source can be either a file-object, local filename or a string. The optional encoding parameter must be a string that indicates the encoding. If specified, that encoding will be used, regardless of any BOM or later declaration (such as in a meta element) parseMeta - Look for a <meta> element containing encoding information<|endoftext|>
a2cd42ba663c8a3431315328b43c8677d282cf5b36c03aadcaf1e36ab156159d
def openStream(self, source): 'Produces a file object from source.\n\n source can be either a file object, local filename or a string.\n\n ' if hasattr(source, 'read'): stream = source else: if isinstance(source, unicode): source = source.encode('utf-8') self.charEncoding = 'utf-8' import cStringIO stream = cStringIO.StringIO(str(source)) return stream
Produces a file object from source. source can be either a file object, local filename or a string.
planet/vendor/html5lib/inputstream.py
openStream
moztw/planet.moztw.org
1
python
def openStream(self, source): 'Produces a file object from source.\n\n source can be either a file object, local filename or a string.\n\n ' if hasattr(source, 'read'): stream = source else: if isinstance(source, unicode): source = source.encode('utf-8') self.charEncoding = 'utf-8' import cStringIO stream = cStringIO.StringIO(str(source)) return stream
def openStream(self, source): 'Produces a file object from source.\n\n source can be either a file object, local filename or a string.\n\n ' if hasattr(source, 'read'): stream = source else: if isinstance(source, unicode): source = source.encode('utf-8') self.charEncoding = 'utf-8' import cStringIO stream = cStringIO.StringIO(str(source)) return stream<|docstring|>Produces a file object from source. source can be either a file object, local filename or a string.<|endoftext|>
40ce9f16b0a0db2c66325e813dc684658d34e9aa647f64b4bf17101f84f03ccb
def detectBOM(self): 'Attempts to detect at BOM at the start of the stream. If\n an encoding can be determined from the BOM return the name of the\n encoding otherwise return None' bomDict = {codecs.BOM_UTF8: 'utf-8', codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'} string = self.rawStream.read(4) encoding = bomDict.get(string[:3]) seek = 3 if (not encoding): encoding = bomDict.get(string) seek = 4 if (not encoding): encoding = bomDict.get(string[:2]) seek = 2 self.seek(string, ((encoding and seek) or 0)) return encoding
Attempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return None
planet/vendor/html5lib/inputstream.py
detectBOM
moztw/planet.moztw.org
1
python
def detectBOM(self): 'Attempts to detect at BOM at the start of the stream. If\n an encoding can be determined from the BOM return the name of the\n encoding otherwise return None' bomDict = {codecs.BOM_UTF8: 'utf-8', codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'} string = self.rawStream.read(4) encoding = bomDict.get(string[:3]) seek = 3 if (not encoding): encoding = bomDict.get(string) seek = 4 if (not encoding): encoding = bomDict.get(string[:2]) seek = 2 self.seek(string, ((encoding and seek) or 0)) return encoding
def detectBOM(self): 'Attempts to detect at BOM at the start of the stream. If\n an encoding can be determined from the BOM return the name of the\n encoding otherwise return None' bomDict = {codecs.BOM_UTF8: 'utf-8', codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be', codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'} string = self.rawStream.read(4) encoding = bomDict.get(string[:3]) seek = 3 if (not encoding): encoding = bomDict.get(string) seek = 4 if (not encoding): encoding = bomDict.get(string[:2]) seek = 2 self.seek(string, ((encoding and seek) or 0)) return encoding<|docstring|>Attempts to detect at BOM at the start of the stream. If an encoding can be determined from the BOM return the name of the encoding otherwise return None<|endoftext|>
1fcff55b85b28d7ad44196793443894bca7d86391fe916ba05362ecc0d0ddc2d
def seek(self, buffer, n): 'Unget buffer[n:]' if hasattr(self.rawStream, 'unget'): self.rawStream.unget(buffer[n:]) return if hasattr(self.rawStream, 'seek'): try: self.rawStream.seek(n) return except IOError: pass class BufferedStream(): def __init__(self, data, stream): self.data = data self.stream = stream def read(self, chars=(- 1)): if ((chars == (- 1)) or (chars > len(self.data))): result = self.data self.data = '' if (chars == (- 1)): return (result + self.stream.read()) else: return (result + self.stream.read((chars - len(result)))) elif (not self.data): return self.stream.read(chars) else: result = self.data[:chars] self.data = self.data[chars:] return result def unget(self, data): if self.data: self.data += data else: self.data = data self.rawStream = BufferedStream(buffer[n:], self.rawStream)
Unget buffer[n:]
planet/vendor/html5lib/inputstream.py
seek
moztw/planet.moztw.org
1
python
def seek(self, buffer, n): if hasattr(self.rawStream, 'unget'): self.rawStream.unget(buffer[n:]) return if hasattr(self.rawStream, 'seek'): try: self.rawStream.seek(n) return except IOError: pass class BufferedStream(): def __init__(self, data, stream): self.data = data self.stream = stream def read(self, chars=(- 1)): if ((chars == (- 1)) or (chars > len(self.data))): result = self.data self.data = if (chars == (- 1)): return (result + self.stream.read()) else: return (result + self.stream.read((chars - len(result)))) elif (not self.data): return self.stream.read(chars) else: result = self.data[:chars] self.data = self.data[chars:] return result def unget(self, data): if self.data: self.data += data else: self.data = data self.rawStream = BufferedStream(buffer[n:], self.rawStream)
def seek(self, buffer, n): if hasattr(self.rawStream, 'unget'): self.rawStream.unget(buffer[n:]) return if hasattr(self.rawStream, 'seek'): try: self.rawStream.seek(n) return except IOError: pass class BufferedStream(): def __init__(self, data, stream): self.data = data self.stream = stream def read(self, chars=(- 1)): if ((chars == (- 1)) or (chars > len(self.data))): result = self.data self.data = if (chars == (- 1)): return (result + self.stream.read()) else: return (result + self.stream.read((chars - len(result)))) elif (not self.data): return self.stream.read(chars) else: result = self.data[:chars] self.data = self.data[chars:] return result def unget(self, data): if self.data: self.data += data else: self.data = data self.rawStream = BufferedStream(buffer[n:], self.rawStream)<|docstring|>Unget buffer[n:]<|endoftext|>
f42aa2337cb92ad34849743b8179e355c2f9f8be61c659c366d67c88d29a0a88
def detectEncodingMeta(self): 'Report the encoding declared by the meta element\n ' buffer = self.rawStream.read(self.numBytesMeta) parser = EncodingParser(buffer) self.seek(buffer, 0) return parser.getEncoding()
Report the encoding declared by the meta element
planet/vendor/html5lib/inputstream.py
detectEncodingMeta
moztw/planet.moztw.org
1
python
def detectEncodingMeta(self): '\n ' buffer = self.rawStream.read(self.numBytesMeta) parser = EncodingParser(buffer) self.seek(buffer, 0) return parser.getEncoding()
def detectEncodingMeta(self): '\n ' buffer = self.rawStream.read(self.numBytesMeta) parser = EncodingParser(buffer) self.seek(buffer, 0) return parser.getEncoding()<|docstring|>Report the encoding declared by the meta element<|endoftext|>
796957185e278d426368be435844b8b02a42a390c7af396f16d4e30b6dc79cef
def position(self): 'Returns (line, col) of the current position in the stream.' (line, col) = (self.line, self.col) return ((line + 1), col)
Returns (line, col) of the current position in the stream.
planet/vendor/html5lib/inputstream.py
position
moztw/planet.moztw.org
1
python
def position(self): (line, col) = (self.line, self.col) return ((line + 1), col)
def position(self): (line, col) = (self.line, self.col) return ((line + 1), col)<|docstring|>Returns (line, col) of the current position in the stream.<|endoftext|>
860ee199a73f7540c48f0c3a7d7afeba8def34c442ae37e094aaa57e68be2864
def char(self): ' Read one character from the stream or queue if available. Return\n EOF when EOF is reached.\n ' if (not self.queue): self.readChunk() if (not self.queue): return EOF char = self.queue.pop(0) if (char == '\n'): self.lineLengths.append(self.col) self.line += 1 self.col = 0 else: self.col += 1 return char
Read one character from the stream or queue if available. Return EOF when EOF is reached.
planet/vendor/html5lib/inputstream.py
char
moztw/planet.moztw.org
1
python
def char(self): ' Read one character from the stream or queue if available. Return\n EOF when EOF is reached.\n ' if (not self.queue): self.readChunk() if (not self.queue): return EOF char = self.queue.pop(0) if (char == '\n'): self.lineLengths.append(self.col) self.line += 1 self.col = 0 else: self.col += 1 return char
def char(self): ' Read one character from the stream or queue if available. Return\n EOF when EOF is reached.\n ' if (not self.queue): self.readChunk() if (not self.queue): return EOF char = self.queue.pop(0) if (char == '\n'): self.lineLengths.append(self.col) self.line += 1 self.col = 0 else: self.col += 1 return char<|docstring|>Read one character from the stream or queue if available. Return EOF when EOF is reached.<|endoftext|>
71d6581317377c523ae813a73ef8b1fbe39ed3056673088424455e667a1425b2
def charsUntil(self, characters, opposite=False): ' Returns a string of characters from the stream up to but not\n including any character in characters or EOF. characters can be\n any container that supports the in method being called on it.\n ' if (not self.queue): self.readChunk() if ((not self.queue) or (self.queue[0] == None)): return u'' i = 0 while ((self.queue[i] in characters) == opposite): i += 1 if (i == len(self.queue)): self.readChunk() if ((i == len(self.queue)) or (self.queue[i] is EOF)): break if (self.queue[i] == '\n'): self.lineLengths.append(self.col) self.line += 1 self.col = 0 else: self.col += 1 rv = u''.join(self.queue[:i]) self.queue = self.queue[i:] return rv
Returns a string of characters from the stream up to but not including any character in characters or EOF. characters can be any container that supports the in method being called on it.
planet/vendor/html5lib/inputstream.py
charsUntil
moztw/planet.moztw.org
1
python
def charsUntil(self, characters, opposite=False): ' Returns a string of characters from the stream up to but not\n including any character in characters or EOF. characters can be\n any container that supports the in method being called on it.\n ' if (not self.queue): self.readChunk() if ((not self.queue) or (self.queue[0] == None)): return u i = 0 while ((self.queue[i] in characters) == opposite): i += 1 if (i == len(self.queue)): self.readChunk() if ((i == len(self.queue)) or (self.queue[i] is EOF)): break if (self.queue[i] == '\n'): self.lineLengths.append(self.col) self.line += 1 self.col = 0 else: self.col += 1 rv = u.join(self.queue[:i]) self.queue = self.queue[i:] return rv
def charsUntil(self, characters, opposite=False): ' Returns a string of characters from the stream up to but not\n including any character in characters or EOF. characters can be\n any container that supports the in method being called on it.\n ' if (not self.queue): self.readChunk() if ((not self.queue) or (self.queue[0] == None)): return u i = 0 while ((self.queue[i] in characters) == opposite): i += 1 if (i == len(self.queue)): self.readChunk() if ((i == len(self.queue)) or (self.queue[i] is EOF)): break if (self.queue[i] == '\n'): self.lineLengths.append(self.col) self.line += 1 self.col = 0 else: self.col += 1 rv = u.join(self.queue[:i]) self.queue = self.queue[i:] return rv<|docstring|>Returns a string of characters from the stream up to but not including any character in characters or EOF. characters can be any container that supports the in method being called on it.<|endoftext|>
96f7b202e2e488de8dbb7bad41480b16ede7967d904744a68b0716197a30f659
def skip(self, chars=spaceCharacters): 'Skip past a list of characters' while (self.currentByte in chars): self.position += 1
Skip past a list of characters
planet/vendor/html5lib/inputstream.py
skip
moztw/planet.moztw.org
1
python
def skip(self, chars=spaceCharacters): while (self.currentByte in chars): self.position += 1
def skip(self, chars=spaceCharacters): while (self.currentByte in chars): self.position += 1<|docstring|>Skip past a list of characters<|endoftext|>
058cf803b459cfebff16f99dcebe915c8d1dd10841ea5f480bb78cafe3e14d90
def matchBytes(self, bytes, lower=False): 'Look for a sequence of bytes at the start of a string. If the bytes \n are found return True and advance the position to the byte after the \n match. Otherwise return False and leave the position alone' data = self[self.position:(self.position + len(bytes))] if lower: data = data.lower() rv = data.startswith(bytes) if (rv == True): self.position += len(bytes) return rv
Look for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone
planet/vendor/html5lib/inputstream.py
matchBytes
moztw/planet.moztw.org
1
python
def matchBytes(self, bytes, lower=False): 'Look for a sequence of bytes at the start of a string. If the bytes \n are found return True and advance the position to the byte after the \n match. Otherwise return False and leave the position alone' data = self[self.position:(self.position + len(bytes))] if lower: data = data.lower() rv = data.startswith(bytes) if (rv == True): self.position += len(bytes) return rv
def matchBytes(self, bytes, lower=False): 'Look for a sequence of bytes at the start of a string. If the bytes \n are found return True and advance the position to the byte after the \n match. Otherwise return False and leave the position alone' data = self[self.position:(self.position + len(bytes))] if lower: data = data.lower() rv = data.startswith(bytes) if (rv == True): self.position += len(bytes) return rv<|docstring|>Look for a sequence of bytes at the start of a string. If the bytes are found return True and advance the position to the byte after the match. Otherwise return False and leave the position alone<|endoftext|>
9fee19ed8c88c18281486dc74c228172754a74ec6ba30eddae7ce76ae0d3c56e
def jumpTo(self, bytes): 'Look for the next sequence of bytes matching a given sequence. If\n a match is found advance the position to the last byte of the match' newPosition = self[self.position:].find(bytes) if (newPosition > (- 1)): self._position += ((newPosition + len(bytes)) - 1) return True else: raise StopIteration
Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match
planet/vendor/html5lib/inputstream.py
jumpTo
moztw/planet.moztw.org
1
python
def jumpTo(self, bytes): 'Look for the next sequence of bytes matching a given sequence. If\n a match is found advance the position to the last byte of the match' newPosition = self[self.position:].find(bytes) if (newPosition > (- 1)): self._position += ((newPosition + len(bytes)) - 1) return True else: raise StopIteration
def jumpTo(self, bytes): 'Look for the next sequence of bytes matching a given sequence. If\n a match is found advance the position to the last byte of the match' newPosition = self[self.position:].find(bytes) if (newPosition > (- 1)): self._position += ((newPosition + len(bytes)) - 1) return True else: raise StopIteration<|docstring|>Look for the next sequence of bytes matching a given sequence. If a match is found advance the position to the last byte of the match<|endoftext|>
79943ed1b04a37a94669aa23a69693611c16e8820edd2265eabaafbbeebb548c
def findNext(self, byteList): 'Move the pointer so it points to the next byte in a set of possible\n bytes' while (self.currentByte not in byteList): self.position += 1
Move the pointer so it points to the next byte in a set of possible bytes
planet/vendor/html5lib/inputstream.py
findNext
moztw/planet.moztw.org
1
python
def findNext(self, byteList): 'Move the pointer so it points to the next byte in a set of possible\n bytes' while (self.currentByte not in byteList): self.position += 1
def findNext(self, byteList): 'Move the pointer so it points to the next byte in a set of possible\n bytes' while (self.currentByte not in byteList): self.position += 1<|docstring|>Move the pointer so it points to the next byte in a set of possible bytes<|endoftext|>
f0cbc4c10cd90378cd54e403e37614446735f03430b4d63c2593bc74ece1c465
def __init__(self, data): 'string - the data to work on for encoding detection' self.data = EncodingBytes(data) self.encoding = None
string - the data to work on for encoding detection
planet/vendor/html5lib/inputstream.py
__init__
moztw/planet.moztw.org
1
python
def __init__(self, data): self.data = EncodingBytes(data) self.encoding = None
def __init__(self, data): self.data = EncodingBytes(data) self.encoding = None<|docstring|>string - the data to work on for encoding detection<|endoftext|>
8ce9b84c55c5fcb362ef0fa7c77b1b3c958ac4bb3386a6a2b297f5a28a950cc2
def handleComment(self): 'Skip over comments' return self.data.jumpTo('-->')
Skip over comments
planet/vendor/html5lib/inputstream.py
handleComment
moztw/planet.moztw.org
1
python
def handleComment(self): return self.data.jumpTo('-->')
def handleComment(self): return self.data.jumpTo('-->')<|docstring|>Skip over comments<|endoftext|>
f59cc8e93b7e0942b8a52638ba2715fc2ccbd6b5d4f9fb4c3ddb84fbfefe37f0
def getAttribute(self): 'Return a name,value pair for the next attribute in the stream, \n if one is found, or None' self.data.skip((list(spaceCharacters) + ['/'])) if (self.data.currentByte == '<'): self.data.position -= 1 return None elif (self.data.currentByte == '>'): return None attrName = [] attrValue = [] spaceFound = False while True: if ((self.data.currentByte == '=') and attrName): break elif (self.data.currentByte in spaceCharacters): spaceFound = True break elif (self.data.currentByte in ('/', '<', '>')): return (''.join(attrName), '') elif (self.data.currentByte in asciiUppercase): attrName.extend(self.data.currentByte.lower()) else: attrName.extend(self.data.currentByte) self.data.position += 1 if spaceFound: self.data.skip() if (self.data.currentByte != '='): self.data.position -= 1 return (''.join(attrName), '') self.data.position += 1 self.data.skip() if (self.data.currentByte in ("'", '"')): quoteChar = self.data.currentByte while True: self.data.position += 1 if (self.data.currentByte == quoteChar): self.data.position += 1 return (''.join(attrName), ''.join(attrValue)) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte) elif (self.data.currentByte in ('>', '<')): return (''.join(attrName), '') elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte) while True: self.data.position += 1 if (self.data.currentByte in (list(spaceCharacters) + ['>', '<'])): return (''.join(attrName), ''.join(attrValue)) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte)
Return a name,value pair for the next attribute in the stream, if one is found, or None
planet/vendor/html5lib/inputstream.py
getAttribute
moztw/planet.moztw.org
1
python
def getAttribute(self): 'Return a name,value pair for the next attribute in the stream, \n if one is found, or None' self.data.skip((list(spaceCharacters) + ['/'])) if (self.data.currentByte == '<'): self.data.position -= 1 return None elif (self.data.currentByte == '>'): return None attrName = [] attrValue = [] spaceFound = False while True: if ((self.data.currentByte == '=') and attrName): break elif (self.data.currentByte in spaceCharacters): spaceFound = True break elif (self.data.currentByte in ('/', '<', '>')): return (.join(attrName), ) elif (self.data.currentByte in asciiUppercase): attrName.extend(self.data.currentByte.lower()) else: attrName.extend(self.data.currentByte) self.data.position += 1 if spaceFound: self.data.skip() if (self.data.currentByte != '='): self.data.position -= 1 return (.join(attrName), ) self.data.position += 1 self.data.skip() if (self.data.currentByte in ("'", '"')): quoteChar = self.data.currentByte while True: self.data.position += 1 if (self.data.currentByte == quoteChar): self.data.position += 1 return (.join(attrName), .join(attrValue)) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte) elif (self.data.currentByte in ('>', '<')): return (.join(attrName), ) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte) while True: self.data.position += 1 if (self.data.currentByte in (list(spaceCharacters) + ['>', '<'])): return (.join(attrName), .join(attrValue)) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte)
def getAttribute(self): 'Return a name,value pair for the next attribute in the stream, \n if one is found, or None' self.data.skip((list(spaceCharacters) + ['/'])) if (self.data.currentByte == '<'): self.data.position -= 1 return None elif (self.data.currentByte == '>'): return None attrName = [] attrValue = [] spaceFound = False while True: if ((self.data.currentByte == '=') and attrName): break elif (self.data.currentByte in spaceCharacters): spaceFound = True break elif (self.data.currentByte in ('/', '<', '>')): return (.join(attrName), ) elif (self.data.currentByte in asciiUppercase): attrName.extend(self.data.currentByte.lower()) else: attrName.extend(self.data.currentByte) self.data.position += 1 if spaceFound: self.data.skip() if (self.data.currentByte != '='): self.data.position -= 1 return (.join(attrName), ) self.data.position += 1 self.data.skip() if (self.data.currentByte in ("'", '"')): quoteChar = self.data.currentByte while True: self.data.position += 1 if (self.data.currentByte == quoteChar): self.data.position += 1 return (.join(attrName), .join(attrValue)) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte) elif (self.data.currentByte in ('>', '<')): return (.join(attrName), ) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte) while True: self.data.position += 1 if (self.data.currentByte in (list(spaceCharacters) + ['>', '<'])): return (.join(attrName), .join(attrValue)) elif (self.data.currentByte in asciiUppercase): attrValue.extend(self.data.currentByte.lower()) else: attrValue.extend(self.data.currentByte)<|docstring|>Return a name,value pair for the next attribute in the stream, if one is found, or None<|endoftext|>
814f8186863371457bddb12f3bfa37b6eb7f73a9014a35436477f4d0cc3b3abb
@classmethod def args_to_add(cls, index=None) -> [Argument]: ' list arguments to add to argparse when this class (or a child class) is chosen ' return (super().args_to_add(index) + [Argument('count_only_trainable', default='True', type=str, help='ignore buffers etc', is_bool=True)])
list arguments to add to argparse when this class (or a child class) is chosen
uninas/optimization/estimators/net.py
args_to_add
cogsys-tuebingen/uninas
18
python
@classmethod def args_to_add(cls, index=None) -> [Argument]: ' ' return (super().args_to_add(index) + [Argument('count_only_trainable', default='True', type=str, help='ignore buffers etc', is_bool=True)])
@classmethod def args_to_add(cls, index=None) -> [Argument]: ' ' return (super().args_to_add(index) + [Argument('count_only_trainable', default='True', type=str, help='ignore buffers etc', is_bool=True)])<|docstring|>list arguments to add to argparse when this class (or a child class) is chosen<|endoftext|>
79cda0af96368c3727fc13124a410ae8d0748ea2a0a1e33086361f165669c9da
@classmethod def args_to_add(cls, index=None) -> [Argument]: ' list arguments to add to argparse when this class (or a child class) is chosen ' return (super().args_to_add(index) + [Argument('load', default='False', type=str, help='load the cached weights or continue', is_bool=True), Argument('batches_forward', default=0, type=int, help='num batches to forward the network, to adapt bn'), Argument('batches_train', default=0, type=int, help='num batches to train the network, -1 for an epoch'), Argument('batches_eval', default=(- 1), type=int, help='num batches to train the network, -1 for an epoch'), Argument('value', default='val/accuracy/1', type=str, help='which top k value to optimize')])
list arguments to add to argparse when this class (or a child class) is chosen
uninas/optimization/estimators/net.py
args_to_add
cogsys-tuebingen/uninas
18
python
@classmethod def args_to_add(cls, index=None) -> [Argument]: ' ' return (super().args_to_add(index) + [Argument('load', default='False', type=str, help='load the cached weights or continue', is_bool=True), Argument('batches_forward', default=0, type=int, help='num batches to forward the network, to adapt bn'), Argument('batches_train', default=0, type=int, help='num batches to train the network, -1 for an epoch'), Argument('batches_eval', default=(- 1), type=int, help='num batches to train the network, -1 for an epoch'), Argument('value', default='val/accuracy/1', type=str, help='which top k value to optimize')])
@classmethod def args_to_add(cls, index=None) -> [Argument]: ' ' return (super().args_to_add(index) + [Argument('load', default='False', type=str, help='load the cached weights or continue', is_bool=True), Argument('batches_forward', default=0, type=int, help='num batches to forward the network, to adapt bn'), Argument('batches_train', default=0, type=int, help='num batches to train the network, -1 for an epoch'), Argument('batches_eval', default=(- 1), type=int, help='num batches to train the network, -1 for an epoch'), Argument('value', default='val/accuracy/1', type=str, help='which top k value to optimize')])<|docstring|>list arguments to add to argparse when this class (or a child class) is chosen<|endoftext|>
2157a7d6a05bf1beee1be9dc4565f11dfcff3f7b3768b2d7ac6632f81c9c2db0
def __generate_coco_json(self, cate2img_count, need_img, drop_or_others, test_size, random_state): '\n\n :param cate2img_count:\n :param need_img:\n :param drop_or_others:\n :param test_size:\n :param random_state:\n :return:\n ' if drop_or_others: model_categories = [x for x in cate2img_count.keys() if (cate2img_count[x] >= need_img)] data_convert = Voc2Coco(self.sample_root, self.xml_img_in_same_folder) data_convert.get_train_test_json(test_size=test_size, random_state=random_state, categories=model_categories) self.model_classes_num = len(model_categories) else: data_convert = Voc2Coco(self.sample_root, self.xml_img_in_same_folder) data_convert.get_train_test_json(test_size=test_size, random_state=random_state) new2olds = dict() for cate in cate2img_count.keys(): if (cate2img_count[cate] < need_img): if (main_info.others_code not in new2olds.keys()): new2olds[main_info.others_code] = [cate] else: new2olds[main_info.others_code].append(cate) else: new2olds[cate] = cate self.model_classes_num = len(new2olds.keys()) redefine_code = RenameCategory(new2olds, os.path.join(self.sample_root, 'train.json')) redefine_code.convert(os.path.join(self.sample_root, 'train_others.json')) redefine_code = RenameCategory(new2olds, os.path.join(self.sample_root, 'test.json')) redefine_code.convert(os.path.join(self.sample_root, 'test_others.json'))
:param cate2img_count: :param need_img: :param drop_or_others: :param test_size: :param random_state: :return:
tools_2/work_flow/train_flow_kFold.py
__generate_coco_json
hukefei/chongqing_contest
1
python
def __generate_coco_json(self, cate2img_count, need_img, drop_or_others, test_size, random_state): '\n\n :param cate2img_count:\n :param need_img:\n :param drop_or_others:\n :param test_size:\n :param random_state:\n :return:\n ' if drop_or_others: model_categories = [x for x in cate2img_count.keys() if (cate2img_count[x] >= need_img)] data_convert = Voc2Coco(self.sample_root, self.xml_img_in_same_folder) data_convert.get_train_test_json(test_size=test_size, random_state=random_state, categories=model_categories) self.model_classes_num = len(model_categories) else: data_convert = Voc2Coco(self.sample_root, self.xml_img_in_same_folder) data_convert.get_train_test_json(test_size=test_size, random_state=random_state) new2olds = dict() for cate in cate2img_count.keys(): if (cate2img_count[cate] < need_img): if (main_info.others_code not in new2olds.keys()): new2olds[main_info.others_code] = [cate] else: new2olds[main_info.others_code].append(cate) else: new2olds[cate] = cate self.model_classes_num = len(new2olds.keys()) redefine_code = RenameCategory(new2olds, os.path.join(self.sample_root, 'train.json')) redefine_code.convert(os.path.join(self.sample_root, 'train_others.json')) redefine_code = RenameCategory(new2olds, os.path.join(self.sample_root, 'test.json')) redefine_code.convert(os.path.join(self.sample_root, 'test_others.json'))
def __generate_coco_json(self, cate2img_count, need_img, drop_or_others, test_size, random_state): '\n\n :param cate2img_count:\n :param need_img:\n :param drop_or_others:\n :param test_size:\n :param random_state:\n :return:\n ' if drop_or_others: model_categories = [x for x in cate2img_count.keys() if (cate2img_count[x] >= need_img)] data_convert = Voc2Coco(self.sample_root, self.xml_img_in_same_folder) data_convert.get_train_test_json(test_size=test_size, random_state=random_state, categories=model_categories) self.model_classes_num = len(model_categories) else: data_convert = Voc2Coco(self.sample_root, self.xml_img_in_same_folder) data_convert.get_train_test_json(test_size=test_size, random_state=random_state) new2olds = dict() for cate in cate2img_count.keys(): if (cate2img_count[cate] < need_img): if (main_info.others_code not in new2olds.keys()): new2olds[main_info.others_code] = [cate] else: new2olds[main_info.others_code].append(cate) else: new2olds[cate] = cate self.model_classes_num = len(new2olds.keys()) redefine_code = RenameCategory(new2olds, os.path.join(self.sample_root, 'train.json')) redefine_code.convert(os.path.join(self.sample_root, 'train_others.json')) redefine_code = RenameCategory(new2olds, os.path.join(self.sample_root, 'test.json')) redefine_code.convert(os.path.join(self.sample_root, 'test_others.json'))<|docstring|>:param cate2img_count: :param need_img: :param drop_or_others: :param test_size: :param random_state: :return:<|endoftext|>
286abee341b6e4cb1184228036b00bc75c4681236ae63cbbd9e0a1c5e2c994af
def climbStairs(self, n): '\n :type n: int\n :rtype: int\n ' if (n == 1): return 1 dp = [0 for x in range(n)] dp[0] = 1 dp[1] = 2 for i in range(2, n): dp[i] = (dp[(i - 1)] + dp[(i - 2)]) return dp[(n - 1)]
:type n: int :rtype: int
Leetcode/Python Solutions/Dynamic Programming/ClimbingStairs.py
climbStairs
Mostofa-Najmus-Sakib/Applied-Algorithm
1
python
def climbStairs(self, n): '\n :type n: int\n :rtype: int\n ' if (n == 1): return 1 dp = [0 for x in range(n)] dp[0] = 1 dp[1] = 2 for i in range(2, n): dp[i] = (dp[(i - 1)] + dp[(i - 2)]) return dp[(n - 1)]
def climbStairs(self, n): '\n :type n: int\n :rtype: int\n ' if (n == 1): return 1 dp = [0 for x in range(n)] dp[0] = 1 dp[1] = 2 for i in range(2, n): dp[i] = (dp[(i - 1)] + dp[(i - 2)]) return dp[(n - 1)]<|docstring|>:type n: int :rtype: int<|endoftext|>
7476bb8d5e6223496de83041ad46febbf2bbcbb9dd2b25f9e0dd919177d0f811
def tokenize(self, line=None): '\n Tokenizes based on regex\n @param line:\n @return:\n ' if ((line is None) or (line.strip() == '')): raise ValueError('Line not provided') line = line.strip() array = re.split(self.regex, str(line)) return array
Tokenizes based on regex @param line: @return:
network_insight_sdk_generic_datasources/parsers/common/line_parser.py
tokenize
lukaswinn/network-insight-sdk-generic-datasources
11
python
def tokenize(self, line=None): '\n Tokenizes based on regex\n @param line:\n @return:\n ' if ((line is None) or (line.strip() == )): raise ValueError('Line not provided') line = line.strip() array = re.split(self.regex, str(line)) return array
def tokenize(self, line=None): '\n Tokenizes based on regex\n @param line:\n @return:\n ' if ((line is None) or (line.strip() == )): raise ValueError('Line not provided') line = line.strip() array = re.split(self.regex, str(line)) return array<|docstring|>Tokenizes based on regex @param line: @return:<|endoftext|>
5a8512a3d41f4e3c4e6eda7bc319371baa015fc266b970ee5e418aef894b48de
@staticmethod def analyze_race_data(sire_name: str, horse_id: str, birth_year: int) -> Series: '\n 対象の種牡馬の対象の産駒の成長曲線(年毎の賞金ベース)を描くためのデータを集計\n\n Parameters\n ----------\n sire_name : str\n 種牡馬名\n horse_id : str\n 対象の競走馬を識別するためのhorse_id\n birth_year : int\n 対象の競走馬の生年\n ' horse_detail_csv = FilePathUtil.get_horse_detail_csv_path(sire_name, horse_id) if (not os.path.isfile(horse_detail_csv)): raise OSError('file not exist {}'.format(horse_detail_csv)) race_data = pd.read_csv(horse_detail_csv, thousands=',') race_data['race_day'] = pd.to_datetime(race_data['日付']) race_data.fillna(value={'賞金': '0'}, inplace=True) race_data['賞金'] = race_data['賞金'].astype(float) total_prize_money = race_data['賞金'].sum() if (total_prize_money == 0): raise ValueError('total_prize_money is zero') prize_money_year = race_data.groupby(race_data['race_day'].dt.strftime('%Y'))['賞金'].sum() prize_money_year.index = prize_money_year.index.astype(int) target_age = 2 prize_money_percentage = {} for year in range((birth_year + RaceAnalyzer.START_AGE), (birth_year + RaceAnalyzer.END_AGE)): try: prize_money_percentage[target_age] = ((prize_money_year[year] * 100) / total_prize_money) except KeyError: prize_money_percentage[target_age] = float(0) finally: target_age += 1 return pd.Series(data=prize_money_percentage, name=horse_id)
対象の種牡馬の対象の産駒の成長曲線(年毎の賞金ベース)を描くためのデータを集計 Parameters ---------- sire_name : str 種牡馬名 horse_id : str 対象の競走馬を識別するためのhorse_id birth_year : int 対象の競走馬の生年
getmodule/race_analyzer.py
analyze_race_data
small-java-world/growth_curve_of_horse
0
python
@staticmethod def analyze_race_data(sire_name: str, horse_id: str, birth_year: int) -> Series: '\n 対象の種牡馬の対象の産駒の成長曲線(年毎の賞金ベース)を描くためのデータを集計\n\n Parameters\n ----------\n sire_name : str\n 種牡馬名\n horse_id : str\n 対象の競走馬を識別するためのhorse_id\n birth_year : int\n 対象の競走馬の生年\n ' horse_detail_csv = FilePathUtil.get_horse_detail_csv_path(sire_name, horse_id) if (not os.path.isfile(horse_detail_csv)): raise OSError('file not exist {}'.format(horse_detail_csv)) race_data = pd.read_csv(horse_detail_csv, thousands=',') race_data['race_day'] = pd.to_datetime(race_data['日付']) race_data.fillna(value={'賞金': '0'}, inplace=True) race_data['賞金'] = race_data['賞金'].astype(float) total_prize_money = race_data['賞金'].sum() if (total_prize_money == 0): raise ValueError('total_prize_money is zero') prize_money_year = race_data.groupby(race_data['race_day'].dt.strftime('%Y'))['賞金'].sum() prize_money_year.index = prize_money_year.index.astype(int) target_age = 2 prize_money_percentage = {} for year in range((birth_year + RaceAnalyzer.START_AGE), (birth_year + RaceAnalyzer.END_AGE)): try: prize_money_percentage[target_age] = ((prize_money_year[year] * 100) / total_prize_money) except KeyError: prize_money_percentage[target_age] = float(0) finally: target_age += 1 return pd.Series(data=prize_money_percentage, name=horse_id)
@staticmethod def analyze_race_data(sire_name: str, horse_id: str, birth_year: int) -> Series: '\n 対象の種牡馬の対象の産駒の成長曲線(年毎の賞金ベース)を描くためのデータを集計\n\n Parameters\n ----------\n sire_name : str\n 種牡馬名\n horse_id : str\n 対象の競走馬を識別するためのhorse_id\n birth_year : int\n 対象の競走馬の生年\n ' horse_detail_csv = FilePathUtil.get_horse_detail_csv_path(sire_name, horse_id) if (not os.path.isfile(horse_detail_csv)): raise OSError('file not exist {}'.format(horse_detail_csv)) race_data = pd.read_csv(horse_detail_csv, thousands=',') race_data['race_day'] = pd.to_datetime(race_data['日付']) race_data.fillna(value={'賞金': '0'}, inplace=True) race_data['賞金'] = race_data['賞金'].astype(float) total_prize_money = race_data['賞金'].sum() if (total_prize_money == 0): raise ValueError('total_prize_money is zero') prize_money_year = race_data.groupby(race_data['race_day'].dt.strftime('%Y'))['賞金'].sum() prize_money_year.index = prize_money_year.index.astype(int) target_age = 2 prize_money_percentage = {} for year in range((birth_year + RaceAnalyzer.START_AGE), (birth_year + RaceAnalyzer.END_AGE)): try: prize_money_percentage[target_age] = ((prize_money_year[year] * 100) / total_prize_money) except KeyError: prize_money_percentage[target_age] = float(0) finally: target_age += 1 return pd.Series(data=prize_money_percentage, name=horse_id)<|docstring|>対象の種牡馬の対象の産駒の成長曲線(年毎の賞金ベース)を描くためのデータを集計 Parameters ---------- sire_name : str 種牡馬名 horse_id : str 対象の競走馬を識別するためのhorse_id birth_year : int 対象の競走馬の生年<|endoftext|>
d6bc81f4c21aa9c23ff77584169910b02bfd825d77b853bf337fb30e38799c38
@staticmethod def analyze_race_montly_data(sire_name: str, horse_id: str, birth_year: str, accumulate_flag: bool) -> Series: '\n 対象の種牡馬の対象の産駒の成長曲線(年月毎の賞金ベース)を描くためのデータを集計\n\n Parameters\n ----------\n sire_name : str\n 種牡馬名\n horse_id : str\n 対象の競走馬を識別するためのhorse_id\n birth_year : int\n 対象の競走馬の生年\n accumulate_flag : bool\n 累積値で集計する場合にTrueを指定\n ' horse_detail_csv = FilePathUtil.get_horse_detail_csv_path(sire_name, horse_id) if (not os.path.isfile(horse_detail_csv)): raise OSError('file not exist {}'.format(horse_detail_csv)) birth_year_int: int = int(birth_year) race_data = pd.read_csv(horse_detail_csv, thousands=',') race_data.head() race_data['race_day'] = pd.to_datetime(race_data['日付']) race_data.fillna(value={'賞金': '0'}, inplace=True) race_data['賞金'] = race_data['賞金'].astype(float) total_prize_money = race_data['賞金'].sum() if (total_prize_money == 0): raise ValueError('total_prize_money is zero') prize_money_year_month = race_data.groupby(race_data['race_day'].dt.strftime('%Y/%m'))['賞金'].sum() sum_value: float = 0 target_age = 2 prize_money_percentage = {} for year in range((birth_year_int + RaceAnalyzer.START_AGE), (birth_year_int + RaceAnalyzer.END_AGE)): for month in range(1, 13): target_age_month_key_1 = '{}_{:02}'.format(target_age, month) target_age_month_key_2 = '{}/{:02}'.format(year, month) try: current_value = ((prize_money_year_month[target_age_month_key_2] * 100) / total_prize_money) prize_money_percentage[target_age_month_key_1] = (sum_value + current_value) if accumulate_flag: sum_value = (sum_value + current_value) except KeyError: prize_money_percentage[target_age_month_key_1] = sum_value target_age += 1 result = pd.Series(data=prize_money_percentage, name=horse_id) return result
対象の種牡馬の対象の産駒の成長曲線(年月毎の賞金ベース)を描くためのデータを集計 Parameters ---------- sire_name : str 種牡馬名 horse_id : str 対象の競走馬を識別するためのhorse_id birth_year : int 対象の競走馬の生年 accumulate_flag : bool 累積値で集計する場合にTrueを指定
getmodule/race_analyzer.py
analyze_race_montly_data
small-java-world/growth_curve_of_horse
0
python
@staticmethod def analyze_race_montly_data(sire_name: str, horse_id: str, birth_year: str, accumulate_flag: bool) -> Series: '\n 対象の種牡馬の対象の産駒の成長曲線(年月毎の賞金ベース)を描くためのデータを集計\n\n Parameters\n ----------\n sire_name : str\n 種牡馬名\n horse_id : str\n 対象の競走馬を識別するためのhorse_id\n birth_year : int\n 対象の競走馬の生年\n accumulate_flag : bool\n 累積値で集計する場合にTrueを指定\n ' horse_detail_csv = FilePathUtil.get_horse_detail_csv_path(sire_name, horse_id) if (not os.path.isfile(horse_detail_csv)): raise OSError('file not exist {}'.format(horse_detail_csv)) birth_year_int: int = int(birth_year) race_data = pd.read_csv(horse_detail_csv, thousands=',') race_data.head() race_data['race_day'] = pd.to_datetime(race_data['日付']) race_data.fillna(value={'賞金': '0'}, inplace=True) race_data['賞金'] = race_data['賞金'].astype(float) total_prize_money = race_data['賞金'].sum() if (total_prize_money == 0): raise ValueError('total_prize_money is zero') prize_money_year_month = race_data.groupby(race_data['race_day'].dt.strftime('%Y/%m'))['賞金'].sum() sum_value: float = 0 target_age = 2 prize_money_percentage = {} for year in range((birth_year_int + RaceAnalyzer.START_AGE), (birth_year_int + RaceAnalyzer.END_AGE)): for month in range(1, 13): target_age_month_key_1 = '{}_{:02}'.format(target_age, month) target_age_month_key_2 = '{}/{:02}'.format(year, month) try: current_value = ((prize_money_year_month[target_age_month_key_2] * 100) / total_prize_money) prize_money_percentage[target_age_month_key_1] = (sum_value + current_value) if accumulate_flag: sum_value = (sum_value + current_value) except KeyError: prize_money_percentage[target_age_month_key_1] = sum_value target_age += 1 result = pd.Series(data=prize_money_percentage, name=horse_id) return result
@staticmethod def analyze_race_montly_data(sire_name: str, horse_id: str, birth_year: str, accumulate_flag: bool) -> Series: '\n 対象の種牡馬の対象の産駒の成長曲線(年月毎の賞金ベース)を描くためのデータを集計\n\n Parameters\n ----------\n sire_name : str\n 種牡馬名\n horse_id : str\n 対象の競走馬を識別するためのhorse_id\n birth_year : int\n 対象の競走馬の生年\n accumulate_flag : bool\n 累積値で集計する場合にTrueを指定\n ' horse_detail_csv = FilePathUtil.get_horse_detail_csv_path(sire_name, horse_id) if (not os.path.isfile(horse_detail_csv)): raise OSError('file not exist {}'.format(horse_detail_csv)) birth_year_int: int = int(birth_year) race_data = pd.read_csv(horse_detail_csv, thousands=',') race_data.head() race_data['race_day'] = pd.to_datetime(race_data['日付']) race_data.fillna(value={'賞金': '0'}, inplace=True) race_data['賞金'] = race_data['賞金'].astype(float) total_prize_money = race_data['賞金'].sum() if (total_prize_money == 0): raise ValueError('total_prize_money is zero') prize_money_year_month = race_data.groupby(race_data['race_day'].dt.strftime('%Y/%m'))['賞金'].sum() sum_value: float = 0 target_age = 2 prize_money_percentage = {} for year in range((birth_year_int + RaceAnalyzer.START_AGE), (birth_year_int + RaceAnalyzer.END_AGE)): for month in range(1, 13): target_age_month_key_1 = '{}_{:02}'.format(target_age, month) target_age_month_key_2 = '{}/{:02}'.format(year, month) try: current_value = ((prize_money_year_month[target_age_month_key_2] * 100) / total_prize_money) prize_money_percentage[target_age_month_key_1] = (sum_value + current_value) if accumulate_flag: sum_value = (sum_value + current_value) except KeyError: prize_money_percentage[target_age_month_key_1] = sum_value target_age += 1 result = pd.Series(data=prize_money_percentage, name=horse_id) return result<|docstring|>対象の種牡馬の対象の産駒の成長曲線(年月毎の賞金ベース)を描くためのデータを集計 Parameters ---------- sire_name : str 種牡馬名 horse_id : str 対象の競走馬を識別するためのhorse_id birth_year : int 対象の競走馬の生年 accumulate_flag : bool 累積値で集計する場合にTrueを指定<|endoftext|>
441b12fc5989afa4ca05d08c5b3d0d1a0749362fc92ff419685cf527386e57fc
def arg_pairs(args): 'Return overlapping pairs of the input args ([1,2,3] yields [1,2],[2,3]).' (a, b) = itertools.tee((list(args) + [None])) next(b, None) return zip(a, b)
Return overlapping pairs of the input args ([1,2,3] yields [1,2],[2,3]).
bindit/__init__.py
arg_pairs
jooh/bindit
1
python
def arg_pairs(args): (a, b) = itertools.tee((list(args) + [None])) next(b, None) return zip(a, b)
def arg_pairs(args): (a, b) = itertools.tee((list(args) + [None])) next(b, None) return zip(a, b)<|docstring|>Return overlapping pairs of the input args ([1,2,3] yields [1,2],[2,3]).<|endoftext|>
031acdd05dc8d03d2612b12c0e94d5720f05616a1285ebf7517d1f2beee9479c
def remove_redundant_binds(binds): 'Remove entries in the dict binds that are sub-directories of another key.\n Operates in-place.\n ' sources = set(binds.keys()) for candidate in sources: remaining = (sources ^ set([candidate])) if any([(test in candidate.parents) for test in remaining]): del binds[candidate] return
Remove entries in the dict binds that are sub-directories of another key. Operates in-place.
bindit/__init__.py
remove_redundant_binds
jooh/bindit
1
python
def remove_redundant_binds(binds): 'Remove entries in the dict binds that are sub-directories of another key.\n Operates in-place.\n ' sources = set(binds.keys()) for candidate in sources: remaining = (sources ^ set([candidate])) if any([(test in candidate.parents) for test in remaining]): del binds[candidate] return
def remove_redundant_binds(binds): 'Remove entries in the dict binds that are sub-directories of another key.\n Operates in-place.\n ' sources = set(binds.keys()) for candidate in sources: remaining = (sources ^ set([candidate])) if any([(test in candidate.parents) for test in remaining]): del binds[candidate] return<|docstring|>Remove entries in the dict binds that are sub-directories of another key. Operates in-place.<|endoftext|>
7c04061ccd119da07a5bf1a16e77951d16a00934506ff0ccdb6eb09bc94066d8
def bind_dict_to_arg(mapper, new_binds): 'Return a generator that converts new_binds to valid container-runner bind\n arguments.\n\n Args:\n mapper (callable): Function that returns a key, val argument pair when called\n with mapper(source, dest). For example, see docker.volume_bind_args\n new_binds (dict): binds specified in source:destination format\n\n Returns:\n generator: returns a [key, val] argument pair for each key in new_binds\n\n ' return itertools.chain.from_iterable((mapper(source, dest) for (source, dest) in new_binds.items()))
Return a generator that converts new_binds to valid container-runner bind arguments. Args: mapper (callable): Function that returns a key, val argument pair when called with mapper(source, dest). For example, see docker.volume_bind_args new_binds (dict): binds specified in source:destination format Returns: generator: returns a [key, val] argument pair for each key in new_binds
bindit/__init__.py
bind_dict_to_arg
jooh/bindit
1
python
def bind_dict_to_arg(mapper, new_binds): 'Return a generator that converts new_binds to valid container-runner bind\n arguments.\n\n Args:\n mapper (callable): Function that returns a key, val argument pair when called\n with mapper(source, dest). For example, see docker.volume_bind_args\n new_binds (dict): binds specified in source:destination format\n\n Returns:\n generator: returns a [key, val] argument pair for each key in new_binds\n\n ' return itertools.chain.from_iterable((mapper(source, dest) for (source, dest) in new_binds.items()))
def bind_dict_to_arg(mapper, new_binds): 'Return a generator that converts new_binds to valid container-runner bind\n arguments.\n\n Args:\n mapper (callable): Function that returns a key, val argument pair when called\n with mapper(source, dest). For example, see docker.volume_bind_args\n new_binds (dict): binds specified in source:destination format\n\n Returns:\n generator: returns a [key, val] argument pair for each key in new_binds\n\n ' return itertools.chain.from_iterable((mapper(source, dest) for (source, dest) in new_binds.items()))<|docstring|>Return a generator that converts new_binds to valid container-runner bind arguments. Args: mapper (callable): Function that returns a key, val argument pair when called with mapper(source, dest). For example, see docker.volume_bind_args new_binds (dict): binds specified in source:destination format Returns: generator: returns a [key, val] argument pair for each key in new_binds<|endoftext|>
a30111df6c11f54b8c08c77746a22bd03ce90ac662e90914fb853bff4dab128c
def parse_container_args(args_iter, bind_parser=None, valid_args=None, valid_letters=None): 'Parse arguments to container runner (e.g., docker run). Typically used as the\n first pass of a CLI application (e.g., bindit.docker.docker).\n\n Args:\n args_iter (iterator): arg_pairs iterator of arguments\n bind_parser (dict): keys as bind mount flags and values as handles to functions\n that parse such flags into a {source: dest} dict. See e.g.\n bindit.docker.BIND_PARSER\n valid_args (dict) : keys as valid container runner arguments and values as the\n expected type of the argument (or non for boolean flags). See e.g.\n bindit.docker.ARGS\n valid_letters (set): single-letter boolean flags. Used to detect arbitrary\n combinations of letters (e.g., docker run -it)\n\n Returns:\n tuple: (list: detected args to the container runner (DOES NOT include any new\n binds at this stage), dict: defines user-provided bind mounts\n (manual_binds[source] = dest), str: detected container image)\n\n ' container_args = [] manual_binds = {} gotkv = False for (key, val) in args_iter: if (key in bind_parser): user_bind = bind_parser[key](val) manual_binds.update(user_bind) LOGGER.debug(f'added user-defined bind to manual_binds: {user_bind}') if (key in valid_args): if valid_args[key]: gotkv = True container_args += [key, val] LOGGER.debug(f'new key-val arg: {key}={val}') else: gotkv = False container_args += [key] LOGGER.debug(f'new flag: {key}') elif gotkv: gotkv = False continue elif (valid_letters and (not (set(key) - valid_letters))): gotkv = False container_args += [key] LOGGER.debug(f'multi-letter flag: {key}') else: container_name = key LOGGER.debug(f'identified container as: {key}') break return (container_args, manual_binds, container_name)
Parse arguments to container runner (e.g., docker run). Typically used as the first pass of a CLI application (e.g., bindit.docker.docker). Args: args_iter (iterator): arg_pairs iterator of arguments bind_parser (dict): keys as bind mount flags and values as handles to functions that parse such flags into a {source: dest} dict. See e.g. bindit.docker.BIND_PARSER valid_args (dict) : keys as valid container runner arguments and values as the expected type of the argument (or non for boolean flags). See e.g. bindit.docker.ARGS valid_letters (set): single-letter boolean flags. Used to detect arbitrary combinations of letters (e.g., docker run -it) Returns: tuple: (list: detected args to the container runner (DOES NOT include any new binds at this stage), dict: defines user-provided bind mounts (manual_binds[source] = dest), str: detected container image)
bindit/__init__.py
parse_container_args
jooh/bindit
1
python
def parse_container_args(args_iter, bind_parser=None, valid_args=None, valid_letters=None): 'Parse arguments to container runner (e.g., docker run). Typically used as the\n first pass of a CLI application (e.g., bindit.docker.docker).\n\n Args:\n args_iter (iterator): arg_pairs iterator of arguments\n bind_parser (dict): keys as bind mount flags and values as handles to functions\n that parse such flags into a {source: dest} dict. See e.g.\n bindit.docker.BIND_PARSER\n valid_args (dict) : keys as valid container runner arguments and values as the\n expected type of the argument (or non for boolean flags). See e.g.\n bindit.docker.ARGS\n valid_letters (set): single-letter boolean flags. Used to detect arbitrary\n combinations of letters (e.g., docker run -it)\n\n Returns:\n tuple: (list: detected args to the container runner (DOES NOT include any new\n binds at this stage), dict: defines user-provided bind mounts\n (manual_binds[source] = dest), str: detected container image)\n\n ' container_args = [] manual_binds = {} gotkv = False for (key, val) in args_iter: if (key in bind_parser): user_bind = bind_parser[key](val) manual_binds.update(user_bind) LOGGER.debug(f'added user-defined bind to manual_binds: {user_bind}') if (key in valid_args): if valid_args[key]: gotkv = True container_args += [key, val] LOGGER.debug(f'new key-val arg: {key}={val}') else: gotkv = False container_args += [key] LOGGER.debug(f'new flag: {key}') elif gotkv: gotkv = False continue elif (valid_letters and (not (set(key) - valid_letters))): gotkv = False container_args += [key] LOGGER.debug(f'multi-letter flag: {key}') else: container_name = key LOGGER.debug(f'identified container as: {key}') break return (container_args, manual_binds, container_name)
def parse_container_args(args_iter, bind_parser=None, valid_args=None, valid_letters=None): 'Parse arguments to container runner (e.g., docker run). Typically used as the\n first pass of a CLI application (e.g., bindit.docker.docker).\n\n Args:\n args_iter (iterator): arg_pairs iterator of arguments\n bind_parser (dict): keys as bind mount flags and values as handles to functions\n that parse such flags into a {source: dest} dict. See e.g.\n bindit.docker.BIND_PARSER\n valid_args (dict) : keys as valid container runner arguments and values as the\n expected type of the argument (or non for boolean flags). See e.g.\n bindit.docker.ARGS\n valid_letters (set): single-letter boolean flags. Used to detect arbitrary\n combinations of letters (e.g., docker run -it)\n\n Returns:\n tuple: (list: detected args to the container runner (DOES NOT include any new\n binds at this stage), dict: defines user-provided bind mounts\n (manual_binds[source] = dest), str: detected container image)\n\n ' container_args = [] manual_binds = {} gotkv = False for (key, val) in args_iter: if (key in bind_parser): user_bind = bind_parser[key](val) manual_binds.update(user_bind) LOGGER.debug(f'added user-defined bind to manual_binds: {user_bind}') if (key in valid_args): if valid_args[key]: gotkv = True container_args += [key, val] LOGGER.debug(f'new key-val arg: {key}={val}') else: gotkv = False container_args += [key] LOGGER.debug(f'new flag: {key}') elif gotkv: gotkv = False continue elif (valid_letters and (not (set(key) - valid_letters))): gotkv = False container_args += [key] LOGGER.debug(f'multi-letter flag: {key}') else: container_name = key LOGGER.debug(f'identified container as: {key}') break return (container_args, manual_binds, container_name)<|docstring|>Parse arguments to container runner (e.g., docker run). Typically used as the first pass of a CLI application (e.g., bindit.docker.docker). Args: args_iter (iterator): arg_pairs iterator of arguments bind_parser (dict): keys as bind mount flags and values as handles to functions that parse such flags into a {source: dest} dict. See e.g. bindit.docker.BIND_PARSER valid_args (dict) : keys as valid container runner arguments and values as the expected type of the argument (or non for boolean flags). See e.g. bindit.docker.ARGS valid_letters (set): single-letter boolean flags. Used to detect arbitrary combinations of letters (e.g., docker run -it) Returns: tuple: (list: detected args to the container runner (DOES NOT include any new binds at this stage), dict: defines user-provided bind mounts (manual_binds[source] = dest), str: detected container image)<|endoftext|>
7b55a070b71988d93d0e66e6f28bda153f0eadb709b3aad0cc3c4c578bf49344
def arg_to_file_paths(arg): 'Generator that returns valid file paths in the input arg, splitting according to\n shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if\n they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as\n parents.\n\n ' for candidate in shlex.split(arg): for this_split in re.split(ARG_SPLIT_PATTERN, candidate): if (not this_split): continue this_path = pathlib.Path(this_split) abs_ok = (this_path.is_absolute() or (not ABS_ONLY)) resolved_path = this_path.resolve() ignore_ok = all([((not (this_ignore == resolved_path)) and (this_ignore not in resolved_path.parents)) for this_ignore in IGNORE_PATH]) exist_ok = (this_path.is_absolute() or resolved_path.exists()) if exist_ok: LOGGER.debug(f'detected path {this_path}') LOGGER.debug(f'absolute path pass={abs_ok}') LOGGER.debug(f'ignore path pass={ignore_ok}') if (exist_ok and abs_ok and ignore_ok): (yield this_path)
Generator that returns valid file paths in the input arg, splitting according to shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as parents.
bindit/__init__.py
arg_to_file_paths
jooh/bindit
1
python
def arg_to_file_paths(arg): 'Generator that returns valid file paths in the input arg, splitting according to\n shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if\n they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as\n parents.\n\n ' for candidate in shlex.split(arg): for this_split in re.split(ARG_SPLIT_PATTERN, candidate): if (not this_split): continue this_path = pathlib.Path(this_split) abs_ok = (this_path.is_absolute() or (not ABS_ONLY)) resolved_path = this_path.resolve() ignore_ok = all([((not (this_ignore == resolved_path)) and (this_ignore not in resolved_path.parents)) for this_ignore in IGNORE_PATH]) exist_ok = (this_path.is_absolute() or resolved_path.exists()) if exist_ok: LOGGER.debug(f'detected path {this_path}') LOGGER.debug(f'absolute path pass={abs_ok}') LOGGER.debug(f'ignore path pass={ignore_ok}') if (exist_ok and abs_ok and ignore_ok): (yield this_path)
def arg_to_file_paths(arg): 'Generator that returns valid file paths in the input arg, splitting according to\n shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if\n they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as\n parents.\n\n ' for candidate in shlex.split(arg): for this_split in re.split(ARG_SPLIT_PATTERN, candidate): if (not this_split): continue this_path = pathlib.Path(this_split) abs_ok = (this_path.is_absolute() or (not ABS_ONLY)) resolved_path = this_path.resolve() ignore_ok = all([((not (this_ignore == resolved_path)) and (this_ignore not in resolved_path.parents)) for this_ignore in IGNORE_PATH]) exist_ok = (this_path.is_absolute() or resolved_path.exists()) if exist_ok: LOGGER.debug(f'detected path {this_path}') LOGGER.debug(f'absolute path pass={abs_ok}') LOGGER.debug(f'ignore path pass={ignore_ok}') if (exist_ok and abs_ok and ignore_ok): (yield this_path)<|docstring|>Generator that returns valid file paths in the input arg, splitting according to shell characters (with shlex.split) and on ARG_SPLIT_PATTERN. Paths are valid if they exist, are absolute (if ABS_ONLY), and do not have any IGNORE_PATH as parents.<|endoftext|>
b15772f3f99ea5d9b8fee57eebe0578c91112532c3d0fda3cb18dc7f73d243aa
def parse_image_args(args_iter, manual_binds): "Parse arguments to the container image, rebasing binds as necessary to make paths\n available inside the container. Typically used as the second pass of a CLI\n application (following parse_container_args, see e.g., bindit.docker.docker).\n\n Args:\n args_iter (iterator): arg_pairs iterator of arguments (generally the same you\n would use in parse_container_args to make sure you're in the right place)\n manual_binds (dict): defines user-provided bind mounts\n (manual_binds[source] = dest)\n\n Returns:\n tuple: (list: args to the image (DOES include rebasing of any args that are\n deemed file paths according to new_binds), dict: defines new bind mounts\n (new_binds[source] = dest))\n\n " image_args = [] new_binds = {} for (in_arg, _) in args_iter: if (in_arg is None): continue for this_path in arg_to_file_paths(in_arg): full_path = this_path.resolve() this_dir = full_path.parent if full_path.is_dir(): this_dir = full_path try: manual_parent = next((this_manual_bind for this_manual_bind in manual_binds.keys() if ((this_manual_bind == this_dir) or (this_manual_bind in this_dir.parents)))) new_base = (manual_binds[manual_parent] / this_dir.relative_to(manual_parent)) LOGGER.debug(f'rebasing on manual bind: {new_base}') except StopIteration: LOGGER.debug(f'none of these manual binds match: {manual_binds.keys()}') if (this_dir not in new_binds): new_binds[this_dir] = (pathlib.PosixPath('/bindit') / this_dir.relative_to(this_dir.anchor)) LOGGER.debug(f'creating new bind: {new_binds[this_dir]}') new_base = new_binds[this_dir] except: raise new_path = (new_base / full_path.name) if full_path.is_dir(): new_path = new_base LOGGER.debug(f'rebasing in_arg path: {this_path}:{new_path}') in_arg = in_arg.replace(str(this_path), str(new_path)) image_args.append(in_arg) remove_redundant_binds(new_binds) return (image_args, new_binds)
Parse arguments to the container image, rebasing binds as necessary to make paths available inside the container. Typically used as the second pass of a CLI application (following parse_container_args, see e.g., bindit.docker.docker). Args: args_iter (iterator): arg_pairs iterator of arguments (generally the same you would use in parse_container_args to make sure you're in the right place) manual_binds (dict): defines user-provided bind mounts (manual_binds[source] = dest) Returns: tuple: (list: args to the image (DOES include rebasing of any args that are deemed file paths according to new_binds), dict: defines new bind mounts (new_binds[source] = dest))
bindit/__init__.py
parse_image_args
jooh/bindit
1
python
def parse_image_args(args_iter, manual_binds): "Parse arguments to the container image, rebasing binds as necessary to make paths\n available inside the container. Typically used as the second pass of a CLI\n application (following parse_container_args, see e.g., bindit.docker.docker).\n\n Args:\n args_iter (iterator): arg_pairs iterator of arguments (generally the same you\n would use in parse_container_args to make sure you're in the right place)\n manual_binds (dict): defines user-provided bind mounts\n (manual_binds[source] = dest)\n\n Returns:\n tuple: (list: args to the image (DOES include rebasing of any args that are\n deemed file paths according to new_binds), dict: defines new bind mounts\n (new_binds[source] = dest))\n\n " image_args = [] new_binds = {} for (in_arg, _) in args_iter: if (in_arg is None): continue for this_path in arg_to_file_paths(in_arg): full_path = this_path.resolve() this_dir = full_path.parent if full_path.is_dir(): this_dir = full_path try: manual_parent = next((this_manual_bind for this_manual_bind in manual_binds.keys() if ((this_manual_bind == this_dir) or (this_manual_bind in this_dir.parents)))) new_base = (manual_binds[manual_parent] / this_dir.relative_to(manual_parent)) LOGGER.debug(f'rebasing on manual bind: {new_base}') except StopIteration: LOGGER.debug(f'none of these manual binds match: {manual_binds.keys()}') if (this_dir not in new_binds): new_binds[this_dir] = (pathlib.PosixPath('/bindit') / this_dir.relative_to(this_dir.anchor)) LOGGER.debug(f'creating new bind: {new_binds[this_dir]}') new_base = new_binds[this_dir] except: raise new_path = (new_base / full_path.name) if full_path.is_dir(): new_path = new_base LOGGER.debug(f'rebasing in_arg path: {this_path}:{new_path}') in_arg = in_arg.replace(str(this_path), str(new_path)) image_args.append(in_arg) remove_redundant_binds(new_binds) return (image_args, new_binds)
def parse_image_args(args_iter, manual_binds): "Parse arguments to the container image, rebasing binds as necessary to make paths\n available inside the container. Typically used as the second pass of a CLI\n application (following parse_container_args, see e.g., bindit.docker.docker).\n\n Args:\n args_iter (iterator): arg_pairs iterator of arguments (generally the same you\n would use in parse_container_args to make sure you're in the right place)\n manual_binds (dict): defines user-provided bind mounts\n (manual_binds[source] = dest)\n\n Returns:\n tuple: (list: args to the image (DOES include rebasing of any args that are\n deemed file paths according to new_binds), dict: defines new bind mounts\n (new_binds[source] = dest))\n\n " image_args = [] new_binds = {} for (in_arg, _) in args_iter: if (in_arg is None): continue for this_path in arg_to_file_paths(in_arg): full_path = this_path.resolve() this_dir = full_path.parent if full_path.is_dir(): this_dir = full_path try: manual_parent = next((this_manual_bind for this_manual_bind in manual_binds.keys() if ((this_manual_bind == this_dir) or (this_manual_bind in this_dir.parents)))) new_base = (manual_binds[manual_parent] / this_dir.relative_to(manual_parent)) LOGGER.debug(f'rebasing on manual bind: {new_base}') except StopIteration: LOGGER.debug(f'none of these manual binds match: {manual_binds.keys()}') if (this_dir not in new_binds): new_binds[this_dir] = (pathlib.PosixPath('/bindit') / this_dir.relative_to(this_dir.anchor)) LOGGER.debug(f'creating new bind: {new_binds[this_dir]}') new_base = new_binds[this_dir] except: raise new_path = (new_base / full_path.name) if full_path.is_dir(): new_path = new_base LOGGER.debug(f'rebasing in_arg path: {this_path}:{new_path}') in_arg = in_arg.replace(str(this_path), str(new_path)) image_args.append(in_arg) remove_redundant_binds(new_binds) return (image_args, new_binds)<|docstring|>Parse arguments to the container image, rebasing binds as necessary to make paths available inside the container. Typically used as the second pass of a CLI application (following parse_container_args, see e.g., bindit.docker.docker). Args: args_iter (iterator): arg_pairs iterator of arguments (generally the same you would use in parse_container_args to make sure you're in the right place) manual_binds (dict): defines user-provided bind mounts (manual_binds[source] = dest) Returns: tuple: (list: args to the image (DOES include rebasing of any args that are deemed file paths according to new_binds), dict: defines new bind mounts (new_binds[source] = dest))<|endoftext|>
7b0e7ba9fcb158ce9531283c97e2bbaaac08ddb821958fe637e3717513f1862a
def _find_dataframes(a: ast.AST) -> ast_DataFrame: 'Find the asts that represent dataframes. Limit to one or failure for now' class df_scanner(ast.NodeVisitor): def __init__(self): self.found_frames: List[ast_DataFrame] = [] def visit_ast_DataFrame(self, a: ast_DataFrame): self.found_frames.append(a) scanner = df_scanner() scanner.visit(a) assert (len(scanner.found_frames) > 0), 'All expressions must start with a dataframe' assert all(((f == scanner.found_frames[0]) for f in scanner.found_frames)), 'Only a single dataframe is supported in any expression' return scanner.found_frames[0]
Find the asts that represent dataframes. Limit to one or failure for now
hep_tables/utils.py
_find_dataframes
gordonwatts/hep_tables
2
python
def _find_dataframes(a: ast.AST) -> ast_DataFrame: class df_scanner(ast.NodeVisitor): def __init__(self): self.found_frames: List[ast_DataFrame] = [] def visit_ast_DataFrame(self, a: ast_DataFrame): self.found_frames.append(a) scanner = df_scanner() scanner.visit(a) assert (len(scanner.found_frames) > 0), 'All expressions must start with a dataframe' assert all(((f == scanner.found_frames[0]) for f in scanner.found_frames)), 'Only a single dataframe is supported in any expression' return scanner.found_frames[0]
def _find_dataframes(a: ast.AST) -> ast_DataFrame: class df_scanner(ast.NodeVisitor): def __init__(self): self.found_frames: List[ast_DataFrame] = [] def visit_ast_DataFrame(self, a: ast_DataFrame): self.found_frames.append(a) scanner = df_scanner() scanner.visit(a) assert (len(scanner.found_frames) > 0), 'All expressions must start with a dataframe' assert all(((f == scanner.found_frames[0]) for f in scanner.found_frames)), 'Only a single dataframe is supported in any expression' return scanner.found_frames[0]<|docstring|>Find the asts that represent dataframes. Limit to one or failure for now<|endoftext|>
2a0e739ea6844a1bf29b56dd6df5f92d8aa3ece8f9c8e64b42ed20a7191c62cd
def new_var_name(): '\n Returns the string for a new variable name. Each one is unique.\n ' global _var_name_counter assert (_var_name_counter < 10000) v = f'e{_var_name_counter:04}' _var_name_counter = (_var_name_counter + 1) return v
Returns the string for a new variable name. Each one is unique.
hep_tables/utils.py
new_var_name
gordonwatts/hep_tables
2
python
def new_var_name(): '\n \n ' global _var_name_counter assert (_var_name_counter < 10000) v = f'e{_var_name_counter:04}' _var_name_counter = (_var_name_counter + 1) return v
def new_var_name(): '\n \n ' global _var_name_counter assert (_var_name_counter < 10000) v = f'e{_var_name_counter:04}' _var_name_counter = (_var_name_counter + 1) return v<|docstring|>Returns the string for a new variable name. Each one is unique.<|endoftext|>
278cd7ad02e0ff7262097835b3d3ada3adc0c54b77731415b56d5be43c59d1cc
def new_term(t: Type): 'Return a new term of type t with a random name' from .render import term_info return term_info(new_var_name(), t)
Return a new term of type t with a random name
hep_tables/utils.py
new_term
gordonwatts/hep_tables
2
python
def new_term(t: Type): from .render import term_info return term_info(new_var_name(), t)
def new_term(t: Type): from .render import term_info return term_info(new_var_name(), t)<|docstring|>Return a new term of type t with a random name<|endoftext|>
0964ba90d1747b453e1b26f3c2c5521f10af041cc2b8a5e5f6e93407de213c4b
def to_ast(o: object) -> ast.AST: '\n Convert an object to an ast\n ' r = ast.parse(str(o)).body[0] assert isinstance(r, ast.Expr) return r.value
Convert an object to an ast
hep_tables/utils.py
to_ast
gordonwatts/hep_tables
2
python
def to_ast(o: object) -> ast.AST: '\n \n ' r = ast.parse(str(o)).body[0] assert isinstance(r, ast.Expr) return r.value
def to_ast(o: object) -> ast.AST: '\n \n ' r = ast.parse(str(o)).body[0] assert isinstance(r, ast.Expr) return r.value<|docstring|>Convert an object to an ast<|endoftext|>
12ab5a2da82103e18ff37f966aa72af28b4171a94ba7e315f7e2845c3ef2cf37
def to_args_from_keywords(kws: List[ast.keyword]) -> Dict[(str, Optional[object])]: "\n Given keywords return a dict of those ast's converted to something useful.\n " return {k.arg: to_object(k.value) for k in kws if isinstance(k.arg, str)}
Given keywords return a dict of those ast's converted to something useful.
hep_tables/utils.py
to_args_from_keywords
gordonwatts/hep_tables
2
python
def to_args_from_keywords(kws: List[ast.keyword]) -> Dict[(str, Optional[object])]: "\n \n " return {k.arg: to_object(k.value) for k in kws if isinstance(k.arg, str)}
def to_args_from_keywords(kws: List[ast.keyword]) -> Dict[(str, Optional[object])]: "\n \n " return {k.arg: to_object(k.value) for k in kws if isinstance(k.arg, str)}<|docstring|>Given keywords return a dict of those ast's converted to something useful.<|endoftext|>
10b214a8af0df7b586f9441c0dc35df719641b3f4e28777c931cee63a2e374c0
def _find_root_expr(expr: ast.AST, possible_root: ast.AST) -> Optional[ast.AST]: '\n Look to see if we can find the root expression for this ast. It will either be `a` or\n it will be an `ast_DataFrame` - return whichever one it is.\n\n Arguments:\n expr Expression to find a root\n possible_root Root\n\n Result:\n expr First hit in the standard ast.NodeVisitor algorithm that is\n either the a object or an instance of type `ast_DataFrame`.\n\n ## Notes:\n\n Logic is a bit subtle. Say that `possible_root` is df.jets.\n\n df.jets.pt --> df.jets\n df.eles.pt --> df\n sin(df.jets.pt) --> df.jets\n df.eles.DeltaR(df.jets) --> df\n\n ' class root_finder(ast.NodeVisitor): def __init__(self, possible_root: ast.AST): ast.NodeVisitor.__init__(self) self._possible = possible_root self.found: Optional[ast.AST] = None def visit(self, a: ast.AST): if (a is self._possible): if (self.found is None): self.found = a elif isinstance(a, ast_DataFrame): self.found = a else: ast.NodeVisitor.visit(self, a) r = root_finder(possible_root) r.visit(expr) return r.found
Look to see if we can find the root expression for this ast. It will either be `a` or it will be an `ast_DataFrame` - return whichever one it is. Arguments: expr Expression to find a root possible_root Root Result: expr First hit in the standard ast.NodeVisitor algorithm that is either the a object or an instance of type `ast_DataFrame`. ## Notes: Logic is a bit subtle. Say that `possible_root` is df.jets. df.jets.pt --> df.jets df.eles.pt --> df sin(df.jets.pt) --> df.jets df.eles.DeltaR(df.jets) --> df
hep_tables/utils.py
_find_root_expr
gordonwatts/hep_tables
2
python
def _find_root_expr(expr: ast.AST, possible_root: ast.AST) -> Optional[ast.AST]: '\n Look to see if we can find the root expression for this ast. It will either be `a` or\n it will be an `ast_DataFrame` - return whichever one it is.\n\n Arguments:\n expr Expression to find a root\n possible_root Root\n\n Result:\n expr First hit in the standard ast.NodeVisitor algorithm that is\n either the a object or an instance of type `ast_DataFrame`.\n\n ## Notes:\n\n Logic is a bit subtle. Say that `possible_root` is df.jets.\n\n df.jets.pt --> df.jets\n df.eles.pt --> df\n sin(df.jets.pt) --> df.jets\n df.eles.DeltaR(df.jets) --> df\n\n ' class root_finder(ast.NodeVisitor): def __init__(self, possible_root: ast.AST): ast.NodeVisitor.__init__(self) self._possible = possible_root self.found: Optional[ast.AST] = None def visit(self, a: ast.AST): if (a is self._possible): if (self.found is None): self.found = a elif isinstance(a, ast_DataFrame): self.found = a else: ast.NodeVisitor.visit(self, a) r = root_finder(possible_root) r.visit(expr) return r.found
def _find_root_expr(expr: ast.AST, possible_root: ast.AST) -> Optional[ast.AST]: '\n Look to see if we can find the root expression for this ast. It will either be `a` or\n it will be an `ast_DataFrame` - return whichever one it is.\n\n Arguments:\n expr Expression to find a root\n possible_root Root\n\n Result:\n expr First hit in the standard ast.NodeVisitor algorithm that is\n either the a object or an instance of type `ast_DataFrame`.\n\n ## Notes:\n\n Logic is a bit subtle. Say that `possible_root` is df.jets.\n\n df.jets.pt --> df.jets\n df.eles.pt --> df\n sin(df.jets.pt) --> df.jets\n df.eles.DeltaR(df.jets) --> df\n\n ' class root_finder(ast.NodeVisitor): def __init__(self, possible_root: ast.AST): ast.NodeVisitor.__init__(self) self._possible = possible_root self.found: Optional[ast.AST] = None def visit(self, a: ast.AST): if (a is self._possible): if (self.found is None): self.found = a elif isinstance(a, ast_DataFrame): self.found = a else: ast.NodeVisitor.visit(self, a) r = root_finder(possible_root) r.visit(expr) return r.found<|docstring|>Look to see if we can find the root expression for this ast. It will either be `a` or it will be an `ast_DataFrame` - return whichever one it is. Arguments: expr Expression to find a root possible_root Root Result: expr First hit in the standard ast.NodeVisitor algorithm that is either the a object or an instance of type `ast_DataFrame`. ## Notes: Logic is a bit subtle. Say that `possible_root` is df.jets. df.jets.pt --> df.jets df.eles.pt --> df sin(df.jets.pt) --> df.jets df.eles.DeltaR(df.jets) --> df<|endoftext|>
62cfecc34eecad7b33e81638638871510d89aefe7e0033be72c8cb23c59f9883
def _parse_elements(s: str) -> List[str]: '\n Return comma separated strings at the top level\n ' if ((s[0] != '(') and (s[1] != ')')): return [s] def parse_for_commas(part_list: str) -> Tuple[(List[int], int)]: result = [] ignore_before = 0 for (i, c) in enumerate(part_list): if (i >= ignore_before): if (c == ','): result.append((i + 1)) if (c == ')'): return (result, (i + 1)) if (c == '('): (r, pos) = parse_for_commas(part_list[(i + 1):]) ignore_before = ((i + pos) + 1) return (result, len(part_list)) (commas, _) = parse_for_commas(s[1:(- 1)]) bounds = (([1] + [(c + 1) for c in commas]) + [len(s)]) segments = [s[i:(j - 1)] for (i, j) in zip(bounds, bounds[1:])] return segments
Return comma separated strings at the top level
hep_tables/utils.py
_parse_elements
gordonwatts/hep_tables
2
python
def _parse_elements(s: str) -> List[str]: '\n \n ' if ((s[0] != '(') and (s[1] != ')')): return [s] def parse_for_commas(part_list: str) -> Tuple[(List[int], int)]: result = [] ignore_before = 0 for (i, c) in enumerate(part_list): if (i >= ignore_before): if (c == ','): result.append((i + 1)) if (c == ')'): return (result, (i + 1)) if (c == '('): (r, pos) = parse_for_commas(part_list[(i + 1):]) ignore_before = ((i + pos) + 1) return (result, len(part_list)) (commas, _) = parse_for_commas(s[1:(- 1)]) bounds = (([1] + [(c + 1) for c in commas]) + [len(s)]) segments = [s[i:(j - 1)] for (i, j) in zip(bounds, bounds[1:])] return segments
def _parse_elements(s: str) -> List[str]: '\n \n ' if ((s[0] != '(') and (s[1] != ')')): return [s] def parse_for_commas(part_list: str) -> Tuple[(List[int], int)]: result = [] ignore_before = 0 for (i, c) in enumerate(part_list): if (i >= ignore_before): if (c == ','): result.append((i + 1)) if (c == ')'): return (result, (i + 1)) if (c == '('): (r, pos) = parse_for_commas(part_list[(i + 1):]) ignore_before = ((i + pos) + 1) return (result, len(part_list)) (commas, _) = parse_for_commas(s[1:(- 1)]) bounds = (([1] + [(c + 1) for c in commas]) + [len(s)]) segments = [s[i:(j - 1)] for (i, j) in zip(bounds, bounds[1:])] return segments<|docstring|>Return comma separated strings at the top level<|endoftext|>
df182c3c01a519f001e3dc801099e1a2e293e61a95f70ebba5db5b544b7a68ba
def _index_text_tuple(s: str, index: int) -> str: "\n If s is a tuple, then return the index'th item\n " splits = _parse_elements(s) if (len(splits) == 1): return f'{s}[{index}]' if (len(splits) < index): raise Exception(f'Internal Error: attempt to index tuple fail: {s} - index {index}') return splits[index]
If s is a tuple, then return the index'th item
hep_tables/utils.py
_index_text_tuple
gordonwatts/hep_tables
2
python
def _index_text_tuple(s: str, index: int) -> str: "\n \n " splits = _parse_elements(s) if (len(splits) == 1): return f'{s}[{index}]' if (len(splits) < index): raise Exception(f'Internal Error: attempt to index tuple fail: {s} - index {index}') return splits[index]
def _index_text_tuple(s: str, index: int) -> str: "\n \n " splits = _parse_elements(s) if (len(splits) == 1): return f'{s}[{index}]' if (len(splits) < index): raise Exception(f'Internal Error: attempt to index tuple fail: {s} - index {index}') return splits[index]<|docstring|>If s is a tuple, then return the index'th item<|endoftext|>
f100e022461fa9dabb7d015660fa0ffece0a993685fc1d884c0bf38e37737f22
def _is_of_type(t1: Type, t2: Type) -> bool: '\n Returns true if t1 is of type t2\n ' if (t1 == t2): return True if ((t2 == object) and (not _is_list(t1))): return True if (not _same_generic_type(t1, t2)): return False for (a_t1, a_t2) in zip(t1.__args__, t2.__args__): if (not _is_of_type(a_t1, a_t2)): return False return True
Returns true if t1 is of type t2
hep_tables/utils.py
_is_of_type
gordonwatts/hep_tables
2
python
def _is_of_type(t1: Type, t2: Type) -> bool: '\n \n ' if (t1 == t2): return True if ((t2 == object) and (not _is_list(t1))): return True if (not _same_generic_type(t1, t2)): return False for (a_t1, a_t2) in zip(t1.__args__, t2.__args__): if (not _is_of_type(a_t1, a_t2)): return False return True
def _is_of_type(t1: Type, t2: Type) -> bool: '\n \n ' if (t1 == t2): return True if ((t2 == object) and (not _is_list(t1))): return True if (not _same_generic_type(t1, t2)): return False for (a_t1, a_t2) in zip(t1.__args__, t2.__args__): if (not _is_of_type(a_t1, a_t2)): return False return True<|docstring|>Returns true if t1 is of type t2<|endoftext|>
13970919b4041f672031862b583e60997369917419dca4bec66ccb7d1cfbe622
def _type_replace(source_type: Type, find: Type, replace: Type) -> Optional[Type]: "\n Find `find` as deeply in `source_type` as possible, and replace it with `replace'.\n\n `_type_replace(List[List[float]], List[object], int) -> List[int]`\n\n If source_type contains no `find`, then return None\n " from typing import _GenericAlias if isinstance(source_type, _GenericAlias): if (source_type._name == 'List'): r = _type_replace(source_type.__args__[0], find, replace) if (r is not None): return List[r] if _is_of_type(source_type, find): return replace return None
Find `find` as deeply in `source_type` as possible, and replace it with `replace'. `_type_replace(List[List[float]], List[object], int) -> List[int]` If source_type contains no `find`, then return None
hep_tables/utils.py
_type_replace
gordonwatts/hep_tables
2
python
def _type_replace(source_type: Type, find: Type, replace: Type) -> Optional[Type]: "\n Find `find` as deeply in `source_type` as possible, and replace it with `replace'.\n\n `_type_replace(List[List[float]], List[object], int) -> List[int]`\n\n If source_type contains no `find`, then return None\n " from typing import _GenericAlias if isinstance(source_type, _GenericAlias): if (source_type._name == 'List'): r = _type_replace(source_type.__args__[0], find, replace) if (r is not None): return List[r] if _is_of_type(source_type, find): return replace return None
def _type_replace(source_type: Type, find: Type, replace: Type) -> Optional[Type]: "\n Find `find` as deeply in `source_type` as possible, and replace it with `replace'.\n\n `_type_replace(List[List[float]], List[object], int) -> List[int]`\n\n If source_type contains no `find`, then return None\n " from typing import _GenericAlias if isinstance(source_type, _GenericAlias): if (source_type._name == 'List'): r = _type_replace(source_type.__args__[0], find, replace) if (r is not None): return List[r] if _is_of_type(source_type, find): return replace return None<|docstring|>Find `find` as deeply in `source_type` as possible, and replace it with `replace'. `_type_replace(List[List[float]], List[object], int) -> List[int]` If source_type contains no `find`, then return None<|endoftext|>
0b3a18cdd46863d4001cfc76da80b3628355518c1e4bed67a447dd0ffaa3aa86
def _count_list(t: Type) -> int: 'Count number of List in a nested List' from typing import _GenericAlias if (not isinstance(t, _GenericAlias)): return 0 if (t._name != 'List'): return 0 return (1 + _count_list(t.__args__[0]))
Count number of List in a nested List
hep_tables/utils.py
_count_list
gordonwatts/hep_tables
2
python
def _count_list(t: Type) -> int: from typing import _GenericAlias if (not isinstance(t, _GenericAlias)): return 0 if (t._name != 'List'): return 0 return (1 + _count_list(t.__args__[0]))
def _count_list(t: Type) -> int: from typing import _GenericAlias if (not isinstance(t, _GenericAlias)): return 0 if (t._name != 'List'): return 0 return (1 + _count_list(t.__args__[0]))<|docstring|>Count number of List in a nested List<|endoftext|>