body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
2c175ea563c91e47434fbbcb726056a96386b4d0b77d6e78f1d16477c2930699
|
def write_unsigned(self, value: int) -> None:
'Set the stored value as a 256-bit unsigned value'
raise NotImplementedError()
|
Set the stored value as a 256-bit unsigned value
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
write_unsigned
|
sha-ron/opentitan
| 1
|
python
|
def write_unsigned(self, value: int) -> None:
raise NotImplementedError()
|
def write_unsigned(self, value: int) -> None:
raise NotImplementedError()<|docstring|>Set the stored value as a 256-bit unsigned value<|endoftext|>
|
9d2b9ed9e6b87238732f6d74077d43c554e9ea865f67393702d9faabb5ba6f58
|
def read_signed(self) -> int:
'Get the stored value as a 256-bit signed value'
uval = self.read_unsigned()
return (uval - ((1 << 256) if (uval >> 255) else 0))
|
Get the stored value as a 256-bit signed value
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
read_signed
|
sha-ron/opentitan
| 1
|
python
|
def read_signed(self) -> int:
uval = self.read_unsigned()
return (uval - ((1 << 256) if (uval >> 255) else 0))
|
def read_signed(self) -> int:
uval = self.read_unsigned()
return (uval - ((1 << 256) if (uval >> 255) else 0))<|docstring|>Get the stored value as a 256-bit signed value<|endoftext|>
|
a52ed978b8e26c1704cc64eace3967d49a37698b7e6b6b72f4702c80b60766d7
|
def write_signed(self, value: int) -> None:
'Set the stored value as a 256-bit signed value'
assert ((- (1 << 255)) <= value < (1 << 255))
uval = (((1 << 256) + value) if (value < 0) else value)
self.write_unsigned(uval)
|
Set the stored value as a 256-bit signed value
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
write_signed
|
sha-ron/opentitan
| 1
|
python
|
def write_signed(self, value: int) -> None:
assert ((- (1 << 255)) <= value < (1 << 255))
uval = (((1 << 256) + value) if (value < 0) else value)
self.write_unsigned(uval)
|
def write_signed(self, value: int) -> None:
assert ((- (1 << 255)) <= value < (1 << 255))
uval = (((1 << 256) + value) if (value < 0) else value)
self.write_unsigned(uval)<|docstring|>Set the stored value as a 256-bit signed value<|endoftext|>
|
814e04470c444075c6fc350c0f9ec17a959a224b5619dbf8671df2cd1808c665
|
def commit(self) -> None:
'Commit pending changes'
return
|
Commit pending changes
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
commit
|
sha-ron/opentitan
| 1
|
python
|
def commit(self) -> None:
return
|
def commit(self) -> None:
return<|docstring|>Commit pending changes<|endoftext|>
|
455303d1951211a785dcd195a4919028ee8c74e8ae7ba280a0b1f2efdd3724a3
|
def abort(self) -> None:
'Abort pending changes'
return
|
Abort pending changes
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
abort
|
sha-ron/opentitan
| 1
|
python
|
def abort(self) -> None:
return
|
def abort(self) -> None:
return<|docstring|>Abort pending changes<|endoftext|>
|
347977b5afb7495e1d5414d9f18e0a496b9ffbfe4a592f70d4fff7c6147ba73c
|
def changes(self) -> List[TraceWSR]:
'Return list of pending architectural changes'
return []
|
Return list of pending architectural changes
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
changes
|
sha-ron/opentitan
| 1
|
python
|
def changes(self) -> List[TraceWSR]:
return []
|
def changes(self) -> List[TraceWSR]:
return []<|docstring|>Return list of pending architectural changes<|endoftext|>
|
a7c355b4df450a8b27bbdb51949fb156dbb3fb87ab797fadca4240a1110a7abe
|
def read_u32(self) -> int:
'Read a 32-bit unsigned result'
return (self._random_value & ((1 << 32) - 1))
|
Read a 32-bit unsigned result
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
read_u32
|
sha-ron/opentitan
| 1
|
python
|
def read_u32(self) -> int:
return (self._random_value & ((1 << 32) - 1))
|
def read_u32(self) -> int:
return (self._random_value & ((1 << 32) - 1))<|docstring|>Read a 32-bit unsigned result<|endoftext|>
|
932ffd85638300bcf29d6cf8dec90ee84bec696228415e84a22ca85cfa680509
|
def read_at_idx(self, idx: int) -> int:
'Read the WSR at idx as an unsigned 256-bit value'
return self._wsr_for_idx(idx).read_unsigned()
|
Read the WSR at idx as an unsigned 256-bit value
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
read_at_idx
|
sha-ron/opentitan
| 1
|
python
|
def read_at_idx(self, idx: int) -> int:
return self._wsr_for_idx(idx).read_unsigned()
|
def read_at_idx(self, idx: int) -> int:
return self._wsr_for_idx(idx).read_unsigned()<|docstring|>Read the WSR at idx as an unsigned 256-bit value<|endoftext|>
|
04d26e21bd29d42e91546980fc0c8733a09c206c12df6ee6ad28591095553856
|
def write_at_idx(self, idx: int, value: int) -> None:
'Write the WSR at idx as an unsigned 256-bit value'
return self._wsr_for_idx(idx).write_unsigned(value)
|
Write the WSR at idx as an unsigned 256-bit value
|
hw/ip/otbn/dv/otbnsim/sim/wsr.py
|
write_at_idx
|
sha-ron/opentitan
| 1
|
python
|
def write_at_idx(self, idx: int, value: int) -> None:
return self._wsr_for_idx(idx).write_unsigned(value)
|
def write_at_idx(self, idx: int, value: int) -> None:
return self._wsr_for_idx(idx).write_unsigned(value)<|docstring|>Write the WSR at idx as an unsigned 256-bit value<|endoftext|>
|
1fb07f7e72901eb8574bc1d33e9a7116b27a4a277310a93f68f7f5842185d549
|
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('-b', '--bold', is_flag=True, help='Print text in bold?')
@click.option('-c', '--color', required=True, type=click.Choice(['green', 'yellow', 'red']), help='Text color.')
@click.option('-t', '--text', required=True, help='Text to print.')
@click.version_option()
def main(bold, color, text):
' print text in color and optionally in bold '
click.secho(text, fg=color, bold=bold)
|
print text in color and optionally in bold
|
example_pkg_cloos/cli.py
|
main
|
cloos/python_example_pkg_cloos
| 0
|
python
|
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('-b', '--bold', is_flag=True, help='Print text in bold?')
@click.option('-c', '--color', required=True, type=click.Choice(['green', 'yellow', 'red']), help='Text color.')
@click.option('-t', '--text', required=True, help='Text to print.')
@click.version_option()
def main(bold, color, text):
' '
click.secho(text, fg=color, bold=bold)
|
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option('-b', '--bold', is_flag=True, help='Print text in bold?')
@click.option('-c', '--color', required=True, type=click.Choice(['green', 'yellow', 'red']), help='Text color.')
@click.option('-t', '--text', required=True, help='Text to print.')
@click.version_option()
def main(bold, color, text):
' '
click.secho(text, fg=color, bold=bold)<|docstring|>print text in color and optionally in bold<|endoftext|>
|
343a3695031716df85d0da40f9b552fa72709dae4e310d41736b26f7eba21c6c
|
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
'Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W).'
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input(input)
|
Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W).
|
kornia/augmentation/_2d/mix/base.py
|
transform_tensor
|
twsl/kornia
| 418
|
python
|
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input(input)
|
def transform_tensor(self, input: torch.Tensor) -> torch.Tensor:
_validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])
return _transform_input(input)<|docstring|>Convert any incoming (H, W), (C, H, W) and (B, C, H, W) into (B, C, H, W).<|endoftext|>
|
e9fee1bd575c5bd81b9ba0768086a88af6f4465a934ee98ad42cc362ede565f7
|
def log_decorator(f):
'Add logging for method of the model.'
patch_dict = {'function': f.__name__, 'line': inspect.getsourcelines(f)[1], 'name': inspect.getmodule(f).__name__}
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
tslogger.log(f'Calling method {f.__name__} of {self.__class__.__name__}', **patch_dict)
result = f(self, *args, **kwargs)
return result
return wrapper
|
Add logging for method of the model.
|
etna/models/base.py
|
log_decorator
|
Pacman1984/etna
| 96
|
python
|
def log_decorator(f):
patch_dict = {'function': f.__name__, 'line': inspect.getsourcelines(f)[1], 'name': inspect.getmodule(f).__name__}
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
tslogger.log(f'Calling method {f.__name__} of {self.__class__.__name__}', **patch_dict)
result = f(self, *args, **kwargs)
return result
return wrapper
|
def log_decorator(f):
patch_dict = {'function': f.__name__, 'line': inspect.getsourcelines(f)[1], 'name': inspect.getmodule(f).__name__}
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
tslogger.log(f'Calling method {f.__name__} of {self.__class__.__name__}', **patch_dict)
result = f(self, *args, **kwargs)
return result
return wrapper<|docstring|>Add logging for method of the model.<|endoftext|>
|
96d32052f58b1bd90e10a04fa12e853ac508f0ab1e458197caccdeed3ace2dc4
|
@abstractmethod
def fit(self, ts: TSDataset) -> 'Model':
'Fit model.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n Returns\n -------\n '
pass
|
Fit model.
Parameters
----------
ts:
Dataframe with features
Returns
-------
|
etna/models/base.py
|
fit
|
Pacman1984/etna
| 96
|
python
|
@abstractmethod
def fit(self, ts: TSDataset) -> 'Model':
'Fit model.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n Returns\n -------\n '
pass
|
@abstractmethod
def fit(self, ts: TSDataset) -> 'Model':
'Fit model.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n Returns\n -------\n '
pass<|docstring|>Fit model.
Parameters
----------
ts:
Dataframe with features
Returns
-------<|endoftext|>
|
51285ecaa02ff5009185a5f32e3c7de78a0c5f027516718de64ccdd8cae903d9
|
@abstractmethod
def forecast(self, ts: TSDataset, prediction_interval: bool=False, quantiles: Sequence[float]=(0.025, 0.975)) -> TSDataset:
'Make predictions.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n prediction_interval:\n If True returns prediction interval for forecast\n quantiles:\n Levels of prediction distribution. By default 2.5% and 97.5% taken to form a 95% prediction interval\n\n Returns\n -------\n TSDataset\n Models result\n '
pass
|
Make predictions.
Parameters
----------
ts:
Dataframe with features
prediction_interval:
If True returns prediction interval for forecast
quantiles:
Levels of prediction distribution. By default 2.5% and 97.5% taken to form a 95% prediction interval
Returns
-------
TSDataset
Models result
|
etna/models/base.py
|
forecast
|
Pacman1984/etna
| 96
|
python
|
@abstractmethod
def forecast(self, ts: TSDataset, prediction_interval: bool=False, quantiles: Sequence[float]=(0.025, 0.975)) -> TSDataset:
'Make predictions.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n prediction_interval:\n If True returns prediction interval for forecast\n quantiles:\n Levels of prediction distribution. By default 2.5% and 97.5% taken to form a 95% prediction interval\n\n Returns\n -------\n TSDataset\n Models result\n '
pass
|
@abstractmethod
def forecast(self, ts: TSDataset, prediction_interval: bool=False, quantiles: Sequence[float]=(0.025, 0.975)) -> TSDataset:
'Make predictions.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n prediction_interval:\n If True returns prediction interval for forecast\n quantiles:\n Levels of prediction distribution. By default 2.5% and 97.5% taken to form a 95% prediction interval\n\n Returns\n -------\n TSDataset\n Models result\n '
pass<|docstring|>Make predictions.
Parameters
----------
ts:
Dataframe with features
prediction_interval:
If True returns prediction interval for forecast
quantiles:
Levels of prediction distribution. By default 2.5% and 97.5% taken to form a 95% prediction interval
Returns
-------
TSDataset
Models result<|endoftext|>
|
1a76038ba8c42d5ea7c879550d53b79202f9365be270fe437a6f09483e304ec4
|
@log_decorator
def fit(self, ts: TSDataset) -> 'PerSegmentModel':
'Fit model.'
self._segments = ts.segments
self._build_models()
for segment in self._segments:
model = self._models[segment]
segment_features = ts[(:, segment, :)]
segment_features = segment_features.dropna()
segment_features = segment_features.droplevel('segment', axis=1)
segment_features = segment_features.reset_index()
model.fit(df=segment_features)
return self
|
Fit model.
|
etna/models/base.py
|
fit
|
Pacman1984/etna
| 96
|
python
|
@log_decorator
def fit(self, ts: TSDataset) -> 'PerSegmentModel':
self._segments = ts.segments
self._build_models()
for segment in self._segments:
model = self._models[segment]
segment_features = ts[(:, segment, :)]
segment_features = segment_features.dropna()
segment_features = segment_features.droplevel('segment', axis=1)
segment_features = segment_features.reset_index()
model.fit(df=segment_features)
return self
|
@log_decorator
def fit(self, ts: TSDataset) -> 'PerSegmentModel':
self._segments = ts.segments
self._build_models()
for segment in self._segments:
model = self._models[segment]
segment_features = ts[(:, segment, :)]
segment_features = segment_features.dropna()
segment_features = segment_features.droplevel('segment', axis=1)
segment_features = segment_features.reset_index()
model.fit(df=segment_features)
return self<|docstring|>Fit model.<|endoftext|>
|
7201dc276df37dbc863e2425dcdf7d6e34aa2bbfaf9370e3bfe9d7dc86018ab2
|
@log_decorator
def forecast(self, ts: TSDataset) -> TSDataset:
'Make predictions.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n Returns\n -------\n DataFrame\n Models result\n '
if (self._segments is None):
raise ValueError('The model is not fitted yet, use fit() to train it')
result_list = list()
for segment in self._segments:
model = self._models[segment]
segment_predict = self._forecast_segment(model, segment, ts)
result_list.append(segment_predict)
result_df = pd.concat(result_list, ignore_index=True)
result_df = result_df.set_index(['timestamp', 'segment'])
df = ts.to_pandas(flatten=True)
df = df.set_index(['timestamp', 'segment'])
df = df.combine_first(result_df).reset_index()
df = TSDataset.to_dataset(df)
ts.df = df
ts.inverse_transform()
return ts
|
Make predictions.
Parameters
----------
ts:
Dataframe with features
Returns
-------
DataFrame
Models result
|
etna/models/base.py
|
forecast
|
Pacman1984/etna
| 96
|
python
|
@log_decorator
def forecast(self, ts: TSDataset) -> TSDataset:
'Make predictions.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n Returns\n -------\n DataFrame\n Models result\n '
if (self._segments is None):
raise ValueError('The model is not fitted yet, use fit() to train it')
result_list = list()
for segment in self._segments:
model = self._models[segment]
segment_predict = self._forecast_segment(model, segment, ts)
result_list.append(segment_predict)
result_df = pd.concat(result_list, ignore_index=True)
result_df = result_df.set_index(['timestamp', 'segment'])
df = ts.to_pandas(flatten=True)
df = df.set_index(['timestamp', 'segment'])
df = df.combine_first(result_df).reset_index()
df = TSDataset.to_dataset(df)
ts.df = df
ts.inverse_transform()
return ts
|
@log_decorator
def forecast(self, ts: TSDataset) -> TSDataset:
'Make predictions.\n\n Parameters\n ----------\n ts:\n Dataframe with features\n Returns\n -------\n DataFrame\n Models result\n '
if (self._segments is None):
raise ValueError('The model is not fitted yet, use fit() to train it')
result_list = list()
for segment in self._segments:
model = self._models[segment]
segment_predict = self._forecast_segment(model, segment, ts)
result_list.append(segment_predict)
result_df = pd.concat(result_list, ignore_index=True)
result_df = result_df.set_index(['timestamp', 'segment'])
df = ts.to_pandas(flatten=True)
df = df.set_index(['timestamp', 'segment'])
df = df.combine_first(result_df).reset_index()
df = TSDataset.to_dataset(df)
ts.df = df
ts.inverse_transform()
return ts<|docstring|>Make predictions.
Parameters
----------
ts:
Dataframe with features
Returns
-------
DataFrame
Models result<|endoftext|>
|
445a8032df95ff81c319da282b959f4afe8f0ec71755847d7e7583348dfe7f2c
|
def _build_models(self):
'Create a dict with models for each segment (if required).'
self._models = {}
for segment in self._segments:
self._models[segment] = deepcopy(self._base_model)
|
Create a dict with models for each segment (if required).
|
etna/models/base.py
|
_build_models
|
Pacman1984/etna
| 96
|
python
|
def _build_models(self):
self._models = {}
for segment in self._segments:
self._models[segment] = deepcopy(self._base_model)
|
def _build_models(self):
self._models = {}
for segment in self._segments:
self._models[segment] = deepcopy(self._base_model)<|docstring|>Create a dict with models for each segment (if required).<|endoftext|>
|
cfc90d910b49897135ba00166920b8867fe1db374589919f0a0077a6d69c6084
|
async def startup(self) -> None:
'Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n '
(await self.on_startup.send(self))
|
Causes on_startup signal
Should be called in the event loop along with the request handler.
|
aiohttp/web_app.py
|
startup
|
danielgtaylor/aiohttp
| 0
|
python
|
async def startup(self) -> None:
'Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n '
(await self.on_startup.send(self))
|
async def startup(self) -> None:
'Causes on_startup signal\n\n Should be called in the event loop along with the request handler.\n '
(await self.on_startup.send(self))<|docstring|>Causes on_startup signal
Should be called in the event loop along with the request handler.<|endoftext|>
|
4a43b4f829f9c8d0f75c12a42bf22fed8d091a311798f0d83483321affcc5ad4
|
async def shutdown(self) -> None:
'Causes on_shutdown signal\n\n Should be called before cleanup()\n '
(await self.on_shutdown.send(self))
|
Causes on_shutdown signal
Should be called before cleanup()
|
aiohttp/web_app.py
|
shutdown
|
danielgtaylor/aiohttp
| 0
|
python
|
async def shutdown(self) -> None:
'Causes on_shutdown signal\n\n Should be called before cleanup()\n '
(await self.on_shutdown.send(self))
|
async def shutdown(self) -> None:
'Causes on_shutdown signal\n\n Should be called before cleanup()\n '
(await self.on_shutdown.send(self))<|docstring|>Causes on_shutdown signal
Should be called before cleanup()<|endoftext|>
|
9188c5390f7e78a8f625cd7063968be8259cacac97fd00e848062dd9b349a0e2
|
async def cleanup(self) -> None:
'Causes on_cleanup signal\n\n Should be called after shutdown()\n '
(await self.on_cleanup.send(self))
|
Causes on_cleanup signal
Should be called after shutdown()
|
aiohttp/web_app.py
|
cleanup
|
danielgtaylor/aiohttp
| 0
|
python
|
async def cleanup(self) -> None:
'Causes on_cleanup signal\n\n Should be called after shutdown()\n '
(await self.on_cleanup.send(self))
|
async def cleanup(self) -> None:
'Causes on_cleanup signal\n\n Should be called after shutdown()\n '
(await self.on_cleanup.send(self))<|docstring|>Causes on_cleanup signal
Should be called after shutdown()<|endoftext|>
|
6a72a7a20e6f8f3b65254ee459b6a7a0a5d7acaeeb168149bac2593e90ec143d
|
def __call__(self):
'gunicorn compatibility'
return self
|
gunicorn compatibility
|
aiohttp/web_app.py
|
__call__
|
danielgtaylor/aiohttp
| 0
|
python
|
def __call__(self):
return self
|
def __call__(self):
return self<|docstring|>gunicorn compatibility<|endoftext|>
|
1c29957cbf5bfa05489f99a066c16417881597f5d7760c02fb5bcc8f09ebbf1e
|
def neighbours_update(self, beliefs, inf_graph, **kwargs):
"Applies the classic update function as matrix multiplication.\n \n For each agent, update their beliefs factoring the authority bias and\n the beliefs of all the agents' neighbors.\n\n Equivalent to:\n\n [blf_ai + np.mean([inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]\n\n "
neighbours = [np.count_nonzero(inf_graph[(:, i)]) for (i, _) in enumerate(beliefs)]
return ((((beliefs @ inf_graph) - (np.add.reduce(inf_graph) * beliefs)) / neighbours) + beliefs)
|
Applies the classic update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias and
the beliefs of all the agents' neighbors.
Equivalent to:
[blf_ai + np.mean([inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]
|
update_functions.py
|
neighbours_update
|
bolaabcd/Polarization
| 0
|
python
|
def neighbours_update(self, beliefs, inf_graph, **kwargs):
"Applies the classic update function as matrix multiplication.\n \n For each agent, update their beliefs factoring the authority bias and\n the beliefs of all the agents' neighbors.\n\n Equivalent to:\n\n [blf_ai + np.mean([inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]\n\n "
neighbours = [np.count_nonzero(inf_graph[(:, i)]) for (i, _) in enumerate(beliefs)]
return ((((beliefs @ inf_graph) - (np.add.reduce(inf_graph) * beliefs)) / neighbours) + beliefs)
|
def neighbours_update(self, beliefs, inf_graph, **kwargs):
"Applies the classic update function as matrix multiplication.\n \n For each agent, update their beliefs factoring the authority bias and\n the beliefs of all the agents' neighbors.\n\n Equivalent to:\n\n [blf_ai + np.mean([inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]\n\n "
neighbours = [np.count_nonzero(inf_graph[(:, i)]) for (i, _) in enumerate(beliefs)]
return ((((beliefs @ inf_graph) - (np.add.reduce(inf_graph) * beliefs)) / neighbours) + beliefs)<|docstring|>Applies the classic update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias and
the beliefs of all the agents' neighbors.
Equivalent to:
[blf_ai + np.mean([inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]<|endoftext|>
|
4319b48fabe10a261b1d40d0899f1bfc340515a75b0b8de2b0925d666bd246a8
|
def neighbours_cb_update(self, beliefs, inf_graph, **kwargs):
"Applies the confirmation-bias update function as matrix multiplication.\n \n For each agent, update their beliefs factoring the authority bias,\n confirmation-bias and the beliefs of all the agents' neighbors.\n\n Equivalent to:\n [blf_ai + np.mean([(1 - np.abs(blf_aj - blf_ai)) * inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]\n "
neighbours = [np.count_nonzero(inf_graph[(:, i)]) for (i, _) in enumerate(beliefs)]
diff = (np.ones((len(beliefs), 1)) @ np.asarray(beliefs)[np.newaxis])
diff = (np.transpose(diff) - diff)
infs = ((inf_graph * (1 - np.abs(diff))) * diff)
return ((np.add.reduce(infs) / neighbours) + beliefs)
|
Applies the confirmation-bias update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias,
confirmation-bias and the beliefs of all the agents' neighbors.
Equivalent to:
[blf_ai + np.mean([(1 - np.abs(blf_aj - blf_ai)) * inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]
|
update_functions.py
|
neighbours_cb_update
|
bolaabcd/Polarization
| 0
|
python
|
def neighbours_cb_update(self, beliefs, inf_graph, **kwargs):
"Applies the confirmation-bias update function as matrix multiplication.\n \n For each agent, update their beliefs factoring the authority bias,\n confirmation-bias and the beliefs of all the agents' neighbors.\n\n Equivalent to:\n [blf_ai + np.mean([(1 - np.abs(blf_aj - blf_ai)) * inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]\n "
neighbours = [np.count_nonzero(inf_graph[(:, i)]) for (i, _) in enumerate(beliefs)]
diff = (np.ones((len(beliefs), 1)) @ np.asarray(beliefs)[np.newaxis])
diff = (np.transpose(diff) - diff)
infs = ((inf_graph * (1 - np.abs(diff))) * diff)
return ((np.add.reduce(infs) / neighbours) + beliefs)
|
def neighbours_cb_update(self, beliefs, inf_graph, **kwargs):
"Applies the confirmation-bias update function as matrix multiplication.\n \n For each agent, update their beliefs factoring the authority bias,\n confirmation-bias and the beliefs of all the agents' neighbors.\n\n Equivalent to:\n [blf_ai + np.mean([(1 - np.abs(blf_aj - blf_ai)) * inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]\n "
neighbours = [np.count_nonzero(inf_graph[(:, i)]) for (i, _) in enumerate(beliefs)]
diff = (np.ones((len(beliefs), 1)) @ np.asarray(beliefs)[np.newaxis])
diff = (np.transpose(diff) - diff)
infs = ((inf_graph * (1 - np.abs(diff))) * diff)
return ((np.add.reduce(infs) / neighbours) + beliefs)<|docstring|>Applies the confirmation-bias update function as matrix multiplication.
For each agent, update their beliefs factoring the authority bias,
confirmation-bias and the beliefs of all the agents' neighbors.
Equivalent to:
[blf_ai + np.mean([(1 - np.abs(blf_aj - blf_ai)) * inf_graph[other, agent] * (blf_aj - blf_ai) for other, blf_aj in enumerate(beliefs) if inf_graph[other, agent] > 0]) for agent, blf_ai in enumerate(beliefs)]<|endoftext|>
|
dbf455bdb54788dd0e15c6889f81e3bf4580b911b102ba904b3c5b1230c93186
|
async def async_setup_platform(hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None:
'Set up the season sensor platform.'
hass.async_create_task(hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_IMPORT}, data=config))
|
Set up the season sensor platform.
|
homeassistant/components/season/sensor.py
|
async_setup_platform
|
a-p-z/core
| 30,023
|
python
|
async def async_setup_platform(hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None:
hass.async_create_task(hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_IMPORT}, data=config))
|
async def async_setup_platform(hass: HomeAssistant, config: ConfigType, async_add_entities: AddEntitiesCallback, discovery_info: (DiscoveryInfoType | None)=None) -> None:
hass.async_create_task(hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_IMPORT}, data=config))<|docstring|>Set up the season sensor platform.<|endoftext|>
|
ae58710922a964ccc384188fdd193ef4c843e332b1040957a14ac5744296c231
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:
'Set up the platform from config entry.'
hemisphere = EQUATOR
if (hass.config.latitude < 0):
hemisphere = SOUTHERN
elif (hass.config.latitude > 0):
hemisphere = NORTHERN
async_add_entities([SeasonSensorEntity(entry, hemisphere)], True)
|
Set up the platform from config entry.
|
homeassistant/components/season/sensor.py
|
async_setup_entry
|
a-p-z/core
| 30,023
|
python
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:
hemisphere = EQUATOR
if (hass.config.latitude < 0):
hemisphere = SOUTHERN
elif (hass.config.latitude > 0):
hemisphere = NORTHERN
async_add_entities([SeasonSensorEntity(entry, hemisphere)], True)
|
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None:
hemisphere = EQUATOR
if (hass.config.latitude < 0):
hemisphere = SOUTHERN
elif (hass.config.latitude > 0):
hemisphere = NORTHERN
async_add_entities([SeasonSensorEntity(entry, hemisphere)], True)<|docstring|>Set up the platform from config entry.<|endoftext|>
|
b7028c8ce54c3e886d0721ea4e04082469bcadd174e5ee2a26b77f1d73db92b8
|
def get_season(current_date: date, hemisphere: str, season_tracking_type: str) -> (str | None):
'Calculate the current season.'
if (hemisphere == 'equator'):
return None
if (season_tracking_type == TYPE_ASTRONOMICAL):
spring_start = ephem.next_equinox(str(current_date.year)).datetime()
summer_start = ephem.next_solstice(str(current_date.year)).datetime()
autumn_start = ephem.next_equinox(spring_start).datetime()
winter_start = ephem.next_solstice(summer_start).datetime()
else:
spring_start = datetime(2017, 3, 1).replace(year=current_date.year)
summer_start = spring_start.replace(month=6)
autumn_start = spring_start.replace(month=9)
winter_start = spring_start.replace(month=12)
season = STATE_WINTER
if (spring_start <= current_date < summer_start):
season = STATE_SPRING
elif (summer_start <= current_date < autumn_start):
season = STATE_SUMMER
elif (autumn_start <= current_date < winter_start):
season = STATE_AUTUMN
if (hemisphere == NORTHERN):
return season
return HEMISPHERE_SEASON_SWAP.get(season)
|
Calculate the current season.
|
homeassistant/components/season/sensor.py
|
get_season
|
a-p-z/core
| 30,023
|
python
|
def get_season(current_date: date, hemisphere: str, season_tracking_type: str) -> (str | None):
if (hemisphere == 'equator'):
return None
if (season_tracking_type == TYPE_ASTRONOMICAL):
spring_start = ephem.next_equinox(str(current_date.year)).datetime()
summer_start = ephem.next_solstice(str(current_date.year)).datetime()
autumn_start = ephem.next_equinox(spring_start).datetime()
winter_start = ephem.next_solstice(summer_start).datetime()
else:
spring_start = datetime(2017, 3, 1).replace(year=current_date.year)
summer_start = spring_start.replace(month=6)
autumn_start = spring_start.replace(month=9)
winter_start = spring_start.replace(month=12)
season = STATE_WINTER
if (spring_start <= current_date < summer_start):
season = STATE_SPRING
elif (summer_start <= current_date < autumn_start):
season = STATE_SUMMER
elif (autumn_start <= current_date < winter_start):
season = STATE_AUTUMN
if (hemisphere == NORTHERN):
return season
return HEMISPHERE_SEASON_SWAP.get(season)
|
def get_season(current_date: date, hemisphere: str, season_tracking_type: str) -> (str | None):
if (hemisphere == 'equator'):
return None
if (season_tracking_type == TYPE_ASTRONOMICAL):
spring_start = ephem.next_equinox(str(current_date.year)).datetime()
summer_start = ephem.next_solstice(str(current_date.year)).datetime()
autumn_start = ephem.next_equinox(spring_start).datetime()
winter_start = ephem.next_solstice(summer_start).datetime()
else:
spring_start = datetime(2017, 3, 1).replace(year=current_date.year)
summer_start = spring_start.replace(month=6)
autumn_start = spring_start.replace(month=9)
winter_start = spring_start.replace(month=12)
season = STATE_WINTER
if (spring_start <= current_date < summer_start):
season = STATE_SPRING
elif (summer_start <= current_date < autumn_start):
season = STATE_SUMMER
elif (autumn_start <= current_date < winter_start):
season = STATE_AUTUMN
if (hemisphere == NORTHERN):
return season
return HEMISPHERE_SEASON_SWAP.get(season)<|docstring|>Calculate the current season.<|endoftext|>
|
b83ae235bba875fdad83f0690d84be4f1b7ceff5fe43996764528dffb1df87c0
|
def __init__(self, entry: ConfigEntry, hemisphere: str) -> None:
'Initialize the season.'
self._attr_name = entry.title
self._attr_unique_id = entry.entry_id
self.hemisphere = hemisphere
self.type = entry.data[CONF_TYPE]
|
Initialize the season.
|
homeassistant/components/season/sensor.py
|
__init__
|
a-p-z/core
| 30,023
|
python
|
def __init__(self, entry: ConfigEntry, hemisphere: str) -> None:
self._attr_name = entry.title
self._attr_unique_id = entry.entry_id
self.hemisphere = hemisphere
self.type = entry.data[CONF_TYPE]
|
def __init__(self, entry: ConfigEntry, hemisphere: str) -> None:
self._attr_name = entry.title
self._attr_unique_id = entry.entry_id
self.hemisphere = hemisphere
self.type = entry.data[CONF_TYPE]<|docstring|>Initialize the season.<|endoftext|>
|
0387c8f4d9cdd614e0658d1ad353b2a4dab40c7b051be1b4915bcf006627d9cb
|
def update(self) -> None:
'Update season.'
self._attr_native_value = get_season(utcnow().replace(tzinfo=None), self.hemisphere, self.type)
self._attr_icon = 'mdi:cloud'
if self._attr_native_value:
self._attr_icon = SEASON_ICONS[self._attr_native_value]
|
Update season.
|
homeassistant/components/season/sensor.py
|
update
|
a-p-z/core
| 30,023
|
python
|
def update(self) -> None:
self._attr_native_value = get_season(utcnow().replace(tzinfo=None), self.hemisphere, self.type)
self._attr_icon = 'mdi:cloud'
if self._attr_native_value:
self._attr_icon = SEASON_ICONS[self._attr_native_value]
|
def update(self) -> None:
self._attr_native_value = get_season(utcnow().replace(tzinfo=None), self.hemisphere, self.type)
self._attr_icon = 'mdi:cloud'
if self._attr_native_value:
self._attr_icon = SEASON_ICONS[self._attr_native_value]<|docstring|>Update season.<|endoftext|>
|
58f4edf11c33c0c1311511d5c211f69778dd62b8e874b63bf0a1aba06c9a6487
|
def test_rank_estimation(self):
'Test rank estimation is accurate.'
N = 100
D = 5000
k = 3
U = np.random.standard_normal(size=(N, k))
V = np.random.standard_normal(size=(k, D))
Y = (U @ V)
mask = np.random.random(size=(N, D))
Y[(mask > 0.5)] = 0
total_nonzeros = np.count_nonzero(Y)
eps = (total_nonzeros / np.sqrt((N * D)))
self.assertEqual(k, rank_estimate(Y, eps))
|
Test rank estimation is accurate.
|
gemelli/tests/test_optspace.py
|
test_rank_estimation
|
ElDeveloper/gemelli
| 32
|
python
|
def test_rank_estimation(self):
N = 100
D = 5000
k = 3
U = np.random.standard_normal(size=(N, k))
V = np.random.standard_normal(size=(k, D))
Y = (U @ V)
mask = np.random.random(size=(N, D))
Y[(mask > 0.5)] = 0
total_nonzeros = np.count_nonzero(Y)
eps = (total_nonzeros / np.sqrt((N * D)))
self.assertEqual(k, rank_estimate(Y, eps))
|
def test_rank_estimation(self):
N = 100
D = 5000
k = 3
U = np.random.standard_normal(size=(N, k))
V = np.random.standard_normal(size=(k, D))
Y = (U @ V)
mask = np.random.random(size=(N, D))
Y[(mask > 0.5)] = 0
total_nonzeros = np.count_nonzero(Y)
eps = (total_nonzeros / np.sqrt((N * D)))
self.assertEqual(k, rank_estimate(Y, eps))<|docstring|>Test rank estimation is accurate.<|endoftext|>
|
e2ee4f2a4387ed06f4731ab4e612b18da46a97c8595200018c667f29437eb0b3
|
def test_G(self):
'Test first grassmann manifold runs.'
X = np.ones((10, 10))
m0 = 2
r = 2
exp = grassmann_manifold_one(X, m0, r)
self.assertAlmostEqual(exp, 0.644944589179)
|
Test first grassmann manifold runs.
|
gemelli/tests/test_optspace.py
|
test_G
|
ElDeveloper/gemelli
| 32
|
python
|
def test_G(self):
X = np.ones((10, 10))
m0 = 2
r = 2
exp = grassmann_manifold_one(X, m0, r)
self.assertAlmostEqual(exp, 0.644944589179)
|
def test_G(self):
X = np.ones((10, 10))
m0 = 2
r = 2
exp = grassmann_manifold_one(X, m0, r)
self.assertAlmostEqual(exp, 0.644944589179)<|docstring|>Test first grassmann manifold runs.<|endoftext|>
|
fe761e82e602c0e0d3fa455f5f5d1d1ab4f6f2aa21160556b44d94f93e852fca
|
def test_G_z_0(self):
'Test first grassmann manifold converges.'
X = np.array([[1, 3], [4, 1], [2, 1]])
m0 = 2
r = 2
exp = grassmann_manifold_one(X, m0, r)
self.assertAlmostEqual(exp, 2.60980232)
|
Test first grassmann manifold converges.
|
gemelli/tests/test_optspace.py
|
test_G_z_0
|
ElDeveloper/gemelli
| 32
|
python
|
def test_G_z_0(self):
X = np.array([[1, 3], [4, 1], [2, 1]])
m0 = 2
r = 2
exp = grassmann_manifold_one(X, m0, r)
self.assertAlmostEqual(exp, 2.60980232)
|
def test_G_z_0(self):
X = np.array([[1, 3], [4, 1], [2, 1]])
m0 = 2
r = 2
exp = grassmann_manifold_one(X, m0, r)
self.assertAlmostEqual(exp, 2.60980232)<|docstring|>Test first grassmann manifold converges.<|endoftext|>
|
19626d32cfb585ad152e0c9d78980e4471e68bd375e18f1cafeeebb9bdb87584
|
def test_F_t(self):
'Test cost function coverages.'
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = cost_function(X, Y, S, M_E, E, m0, rho)
exp = 1
assert_array_almost_equal(res, exp, decimal=3)
|
Test cost function coverages.
|
gemelli/tests/test_optspace.py
|
test_F_t
|
ElDeveloper/gemelli
| 32
|
python
|
def test_F_t(self):
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = cost_function(X, Y, S, M_E, E, m0, rho)
exp = 1
assert_array_almost_equal(res, exp, decimal=3)
|
def test_F_t(self):
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = cost_function(X, Y, S, M_E, E, m0, rho)
exp = 1
assert_array_almost_equal(res, exp, decimal=3)<|docstring|>Test cost function coverages.<|endoftext|>
|
5840cb32195e4089349a1e146e3d4b04bbd8a2412b9200817807e116153b3205
|
def test_F_t_random(self):
'Test cost function on random values.'
np.random.seed(0)
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.random.choice([0, 1], size=(5, 5))
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = cost_function(X, Y, S, M_E, E, m0, rho)
self.assertAlmostEqual(res, 6.5)
|
Test cost function on random values.
|
gemelli/tests/test_optspace.py
|
test_F_t_random
|
ElDeveloper/gemelli
| 32
|
python
|
def test_F_t_random(self):
np.random.seed(0)
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.random.choice([0, 1], size=(5, 5))
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = cost_function(X, Y, S, M_E, E, m0, rho)
self.assertAlmostEqual(res, 6.5)
|
def test_F_t_random(self):
np.random.seed(0)
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.random.choice([0, 1], size=(5, 5))
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = cost_function(X, Y, S, M_E, E, m0, rho)
self.assertAlmostEqual(res, 6.5)<|docstring|>Test cost function on random values.<|endoftext|>
|
f4aebad85966d0a461c259fb03979acfff0e5a426a5f4dd4264bec71b50ef1dd
|
def test_gradF_t(self):
'Test gradient decent converges.'
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = gradient_decent(X, Y, S, M_E, E, m0, rho)
exp = np.array([[[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]], [[2.0, 2.0, 2.0, 2.0, 2.0], [0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]]])
npt.assert_allclose(exp, res)
|
Test gradient decent converges.
|
gemelli/tests/test_optspace.py
|
test_gradF_t
|
ElDeveloper/gemelli
| 32
|
python
|
def test_gradF_t(self):
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = gradient_decent(X, Y, S, M_E, E, m0, rho)
exp = np.array([[[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]], [[2.0, 2.0, 2.0, 2.0, 2.0], [0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]]])
npt.assert_allclose(exp, res)
|
def test_gradF_t(self):
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
res = gradient_decent(X, Y, S, M_E, E, m0, rho)
exp = np.array([[[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]], [[2.0, 2.0, 2.0, 2.0, 2.0], [0.0, 0.0, 0.0, 0.0, 0.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0], [2.0, 2.0, 2.0, 2.0, 2.0]]])
npt.assert_allclose(exp, res)<|docstring|>Test gradient decent converges.<|endoftext|>
|
421d21338bdafe05d506179db54bad9f3b4c3cd782a455a3ae1cb855bae994f9
|
def test_Gp(self):
'Test second grassmann manifold converges.'
X = (np.ones((5, 5)) * 3)
X[(0, 0)] = 2
m0 = 2
r = 5
res = grassmann_manifold_two(X, m0, r)
exp = np.array([[1.08731273, 1.6309691, 1.6309691, 1.6309691, 1.6309691], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989]])
npt.assert_allclose(exp, res)
|
Test second grassmann manifold converges.
|
gemelli/tests/test_optspace.py
|
test_Gp
|
ElDeveloper/gemelli
| 32
|
python
|
def test_Gp(self):
X = (np.ones((5, 5)) * 3)
X[(0, 0)] = 2
m0 = 2
r = 5
res = grassmann_manifold_two(X, m0, r)
exp = np.array([[1.08731273, 1.6309691, 1.6309691, 1.6309691, 1.6309691], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989]])
npt.assert_allclose(exp, res)
|
def test_Gp(self):
X = (np.ones((5, 5)) * 3)
X[(0, 0)] = 2
m0 = 2
r = 5
res = grassmann_manifold_two(X, m0, r)
exp = np.array([[1.08731273, 1.6309691, 1.6309691, 1.6309691, 1.6309691], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989], [3.57804989, 3.57804989, 3.57804989, 3.57804989, 3.57804989]])
npt.assert_allclose(exp, res)<|docstring|>Test second grassmann manifold converges.<|endoftext|>
|
a729e4af81cd51d101e90b7c4d47f68a34efac6da7254a56f925c0d52cf98181
|
def test_getoptT(self):
'Test gradient decent line search.'
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
(W, Z) = gradient_decent(X, Y, S, M_E, E, m0, rho)
res = line_search(X, W, Y, Z, S, M_E, E, m0, rho)
exp = (- 9.5367431640625e-08)
npt.assert_allclose(exp, res)
|
Test gradient decent line search.
|
gemelli/tests/test_optspace.py
|
test_getoptT
|
ElDeveloper/gemelli
| 32
|
python
|
def test_getoptT(self):
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
(W, Z) = gradient_decent(X, Y, S, M_E, E, m0, rho)
res = line_search(X, W, Y, Z, S, M_E, E, m0, rho)
exp = (- 9.5367431640625e-08)
npt.assert_allclose(exp, res)
|
def test_getoptT(self):
X = np.ones((5, 5))
Y = np.ones((5, 5))
E = np.zeros((5, 5))
E[(0, 1)] = 1
E[(1, 1)] = 1
S = np.eye(5)
M_E = (np.ones((5, 5)) * 6)
M_E[(0, 0)] = 1
m0 = 2
rho = 0.5
(W, Z) = gradient_decent(X, Y, S, M_E, E, m0, rho)
res = line_search(X, W, Y, Z, S, M_E, E, m0, rho)
exp = (- 9.5367431640625e-08)
npt.assert_allclose(exp, res)<|docstring|>Test gradient decent line search.<|endoftext|>
|
84b0e60020bad6901f9fc9b2877994b0d5bde59a0431b2776b33ffeb24742c3d
|
def test_getoptS_small(self):
'Test singular values from U and V.'
data = loadmat(get_data_path('small_test.mat'))
M_E = np.array(data['M_E'].todense())
E = data['E']
x = data['x']
y = data['y']
res = singular_values(x, y, M_E, E)
exp = np.array([[0.93639499, 0.07644197, (- 0.02828782)], [(- 0.03960841), 0.60787383, 0.00521257], [0.00729038, 0.00785834, 0.67853083]])
npt.assert_allclose(res, exp, atol=1e-05)
|
Test singular values from U and V.
|
gemelli/tests/test_optspace.py
|
test_getoptS_small
|
ElDeveloper/gemelli
| 32
|
python
|
def test_getoptS_small(self):
data = loadmat(get_data_path('small_test.mat'))
M_E = np.array(data['M_E'].todense())
E = data['E']
x = data['x']
y = data['y']
res = singular_values(x, y, M_E, E)
exp = np.array([[0.93639499, 0.07644197, (- 0.02828782)], [(- 0.03960841), 0.60787383, 0.00521257], [0.00729038, 0.00785834, 0.67853083]])
npt.assert_allclose(res, exp, atol=1e-05)
|
def test_getoptS_small(self):
data = loadmat(get_data_path('small_test.mat'))
M_E = np.array(data['M_E'].todense())
E = data['E']
x = data['x']
y = data['y']
res = singular_values(x, y, M_E, E)
exp = np.array([[0.93639499, 0.07644197, (- 0.02828782)], [(- 0.03960841), 0.60787383, 0.00521257], [0.00729038, 0.00785834, 0.67853083]])
npt.assert_allclose(res, exp, atol=1e-05)<|docstring|>Test singular values from U and V.<|endoftext|>
|
c582611bbdbd8ec8428a197bb0267f936ec92f6b4aa353eb4f81c9ba0b3f4431
|
def test_optspace_original(self):
'Test OptSpace converges on test dataset.'
M0 = loadmat(get_data_path('large_test.mat'))['M0']
M_E = loadmat(get_data_path('large_test.mat'))['M_E']
M0 = M0.astype(np.float)
M_E = np.array(M_E.todense()).astype(np.float)
(X, S, Y) = OptSpace(n_components=3, max_iterations=11, tol=1e-08).solve(M_E)
err = (X[(:, ::(- 1))].dot(S).dot(Y[(:, ::(- 1))].T) - M0)
(n, m) = M0.shape
res = (norm(err, 'fro') / np.sqrt((m * n)))
exp = 0.179
assert_array_almost_equal(res, exp, decimal=1)
|
Test OptSpace converges on test dataset.
|
gemelli/tests/test_optspace.py
|
test_optspace_original
|
ElDeveloper/gemelli
| 32
|
python
|
def test_optspace_original(self):
M0 = loadmat(get_data_path('large_test.mat'))['M0']
M_E = loadmat(get_data_path('large_test.mat'))['M_E']
M0 = M0.astype(np.float)
M_E = np.array(M_E.todense()).astype(np.float)
(X, S, Y) = OptSpace(n_components=3, max_iterations=11, tol=1e-08).solve(M_E)
err = (X[(:, ::(- 1))].dot(S).dot(Y[(:, ::(- 1))].T) - M0)
(n, m) = M0.shape
res = (norm(err, 'fro') / np.sqrt((m * n)))
exp = 0.179
assert_array_almost_equal(res, exp, decimal=1)
|
def test_optspace_original(self):
M0 = loadmat(get_data_path('large_test.mat'))['M0']
M_E = loadmat(get_data_path('large_test.mat'))['M_E']
M0 = M0.astype(np.float)
M_E = np.array(M_E.todense()).astype(np.float)
(X, S, Y) = OptSpace(n_components=3, max_iterations=11, tol=1e-08).solve(M_E)
err = (X[(:, ::(- 1))].dot(S).dot(Y[(:, ::(- 1))].T) - M0)
(n, m) = M0.shape
res = (norm(err, 'fro') / np.sqrt((m * n)))
exp = 0.179
assert_array_almost_equal(res, exp, decimal=1)<|docstring|>Test OptSpace converges on test dataset.<|endoftext|>
|
c7f17993c74edc17c4bfe73d5ba877bc400f6567556ad49d04653fffe3ad3531
|
def test_optspace_ordering(self):
'Test OptSpace produces reproducible loadings.'
s_exp = np.array([[5, 4, 1], [8, 3, 0], [7, 9, 2]])
U_exp = np.array([[6, 3, 0], [7, 4, 1], [8, 5, 2]])
V_exp = np.array([[6, 3, 0], [7, 4, 1], [8, 5, 2]])
s_test = np.array([[5, 1, 4], [7, 2, 9], [8, 0, 3]])
U_test = np.array([[6, 0, 3], [7, 1, 4], [8, 2, 5]])
V_test = np.array([[6, 0, 3], [7, 1, 4], [8, 2, 5]])
(U_res, s_res, V_res) = svd_sort(U_test, s_test, V_test)
assert_array_almost_equal(U_res, U_exp, decimal=3)
assert_array_almost_equal(s_res, s_exp, decimal=3)
assert_array_almost_equal(V_res, V_exp, decimal=3)
|
Test OptSpace produces reproducible loadings.
|
gemelli/tests/test_optspace.py
|
test_optspace_ordering
|
ElDeveloper/gemelli
| 32
|
python
|
def test_optspace_ordering(self):
s_exp = np.array([[5, 4, 1], [8, 3, 0], [7, 9, 2]])
U_exp = np.array([[6, 3, 0], [7, 4, 1], [8, 5, 2]])
V_exp = np.array([[6, 3, 0], [7, 4, 1], [8, 5, 2]])
s_test = np.array([[5, 1, 4], [7, 2, 9], [8, 0, 3]])
U_test = np.array([[6, 0, 3], [7, 1, 4], [8, 2, 5]])
V_test = np.array([[6, 0, 3], [7, 1, 4], [8, 2, 5]])
(U_res, s_res, V_res) = svd_sort(U_test, s_test, V_test)
assert_array_almost_equal(U_res, U_exp, decimal=3)
assert_array_almost_equal(s_res, s_exp, decimal=3)
assert_array_almost_equal(V_res, V_exp, decimal=3)
|
def test_optspace_ordering(self):
s_exp = np.array([[5, 4, 1], [8, 3, 0], [7, 9, 2]])
U_exp = np.array([[6, 3, 0], [7, 4, 1], [8, 5, 2]])
V_exp = np.array([[6, 3, 0], [7, 4, 1], [8, 5, 2]])
s_test = np.array([[5, 1, 4], [7, 2, 9], [8, 0, 3]])
U_test = np.array([[6, 0, 3], [7, 1, 4], [8, 2, 5]])
V_test = np.array([[6, 0, 3], [7, 1, 4], [8, 2, 5]])
(U_res, s_res, V_res) = svd_sort(U_test, s_test, V_test)
assert_array_almost_equal(U_res, U_exp, decimal=3)
assert_array_almost_equal(s_res, s_exp, decimal=3)
assert_array_almost_equal(V_res, V_exp, decimal=3)<|docstring|>Test OptSpace produces reproducible loadings.<|endoftext|>
|
2a108025845837c9c1028125e0867bbf499f1b07ec050dc78965f5eba330321e
|
@jit
def eval_polynomial(P, x):
"\n Compute polynomial P(x) where P is a vector of coefficients, highest\n order coefficient at P[0]. Uses Horner's Method.\n "
result = 0
for coeff in P:
result = ((x * result) + coeff)
return result
|
Compute polynomial P(x) where P is a vector of coefficients, highest
order coefficient at P[0]. Uses Horner's Method.
|
utils/fixbad.py
|
eval_polynomial
|
emit-sds/emit-sds-l1b
| 0
|
python
|
@jit
def eval_polynomial(P, x):
"\n Compute polynomial P(x) where P is a vector of coefficients, highest\n order coefficient at P[0]. Uses Horner's Method.\n "
result = 0
for coeff in P:
result = ((x * result) + coeff)
return result
|
@jit
def eval_polynomial(P, x):
"\n Compute polynomial P(x) where P is a vector of coefficients, highest\n order coefficient at P[0]. Uses Horner's Method.\n "
result = 0
for coeff in P:
result = ((x * result) + coeff)
return result<|docstring|>Compute polynomial P(x) where P is a vector of coefficients, highest
order coefficient at P[0]. Uses Horner's Method.<|endoftext|>
|
a6a7641d0897be8d7908b4e5501c448376065a8b207208b72f00f0e64140ec76
|
def experiment(save_key, model):
'common code for all experiments\n '
exper_dir = core.experiment_output_path()
(save_path, model_save_path, _, _, _, _) = evaluate.get_paths(exper_dir, save_key)
(model, history, dat) = train(model, model_save_path)
run_evaluation(exper_dir, save_key, history, dat, model)
print('done!')
print('Results saved to {}'.format(save_path))
|
common code for all experiments
|
deepsalience/experiment.py
|
experiment
|
gdoras/ismir2017-deepsalience
| 67
|
python
|
def experiment(save_key, model):
'\n '
exper_dir = core.experiment_output_path()
(save_path, model_save_path, _, _, _, _) = evaluate.get_paths(exper_dir, save_key)
(model, history, dat) = train(model, model_save_path)
run_evaluation(exper_dir, save_key, history, dat, model)
print('done!')
print('Results saved to {}'.format(save_path))
|
def experiment(save_key, model):
'\n '
exper_dir = core.experiment_output_path()
(save_path, model_save_path, _, _, _, _) = evaluate.get_paths(exper_dir, save_key)
(model, history, dat) = train(model, model_save_path)
run_evaluation(exper_dir, save_key, history, dat, model)
print('done!')
print('Results saved to {}'.format(save_path))<|docstring|>common code for all experiments<|endoftext|>
|
7010a30ea905ff780096ed90cce8151a5c1675ec9817e77a8c33db1ee2b4137f
|
def run_command(self, name, *args, **options):
'\n Run a management command and capture its stdout/stderr along with any\n exceptions.\n '
command_runner = options.pop('command_runner', call_command)
stdin_fileobj = options.pop('stdin_fileobj', None)
options.setdefault('verbosity', 1)
options.setdefault('interactive', False)
original_stdin = sys.stdin
original_stdout = sys.stdout
original_stderr = sys.stderr
if stdin_fileobj:
sys.stdin = stdin_fileobj
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
result = None
try:
result = command_runner(name, *args, **options)
except Exception as e:
result = e
finally:
captured_stdout = sys.stdout.getvalue()
captured_stderr = sys.stderr.getvalue()
sys.stdin = original_stdin
sys.stdout = original_stdout
sys.stderr = original_stderr
return (result, captured_stdout, captured_stderr)
|
Run a management command and capture its stdout/stderr along with any
exceptions.
|
awx/main/tests/old/commands/commands_monolithic.py
|
run_command
|
tota45/awx
| 1
|
python
|
def run_command(self, name, *args, **options):
'\n Run a management command and capture its stdout/stderr along with any\n exceptions.\n '
command_runner = options.pop('command_runner', call_command)
stdin_fileobj = options.pop('stdin_fileobj', None)
options.setdefault('verbosity', 1)
options.setdefault('interactive', False)
original_stdin = sys.stdin
original_stdout = sys.stdout
original_stderr = sys.stderr
if stdin_fileobj:
sys.stdin = stdin_fileobj
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
result = None
try:
result = command_runner(name, *args, **options)
except Exception as e:
result = e
finally:
captured_stdout = sys.stdout.getvalue()
captured_stderr = sys.stderr.getvalue()
sys.stdin = original_stdin
sys.stdout = original_stdout
sys.stderr = original_stderr
return (result, captured_stdout, captured_stderr)
|
def run_command(self, name, *args, **options):
'\n Run a management command and capture its stdout/stderr along with any\n exceptions.\n '
command_runner = options.pop('command_runner', call_command)
stdin_fileobj = options.pop('stdin_fileobj', None)
options.setdefault('verbosity', 1)
options.setdefault('interactive', False)
original_stdin = sys.stdin
original_stdout = sys.stdout
original_stderr = sys.stderr
if stdin_fileobj:
sys.stdin = stdin_fileobj
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
result = None
try:
result = command_runner(name, *args, **options)
except Exception as e:
result = e
finally:
captured_stdout = sys.stdout.getvalue()
captured_stderr = sys.stderr.getvalue()
sys.stdin = original_stdin
sys.stdout = original_stdout
sys.stderr = original_stderr
return (result, captured_stdout, captured_stderr)<|docstring|>Run a management command and capture its stdout/stderr along with any
exceptions.<|endoftext|>
|
255813e30c0b6190e06cbfe76a8ab3f8350eaae189285eef04ab82f5602da823
|
def write_default(config_path: Path) -> None:
'Write the default config to file.'
parser = configparser.ConfigParser()
parser.read_dict(DEFAULT_CONFIG)
with config_path.open('w') as fp:
parser.write(fp)
|
Write the default config to file.
|
rofi_tpb/config.py
|
write_default
|
loiccoyle/rofi-tpb
| 3
|
python
|
def write_default(config_path: Path) -> None:
parser = configparser.ConfigParser()
parser.read_dict(DEFAULT_CONFIG)
with config_path.open('w') as fp:
parser.write(fp)
|
def write_default(config_path: Path) -> None:
parser = configparser.ConfigParser()
parser.read_dict(DEFAULT_CONFIG)
with config_path.open('w') as fp:
parser.write(fp)<|docstring|>Write the default config to file.<|endoftext|>
|
5768b6a0c09be98e6ff6db75ebed69f0d7ce7815768ee8c229a173e872f9d4f9
|
def load_config(config_path: Path) -> configparser.ConfigParser:
'Parse the config and return the ConfigParser instance.\n\n Returns:\n Parsed ConfigPraser instance.\n '
parser = configparser.ConfigParser()
parser.read_dict(DEFAULT_CONFIG)
parser.read(config_path)
return parser
|
Parse the config and return the ConfigParser instance.
Returns:
Parsed ConfigPraser instance.
|
rofi_tpb/config.py
|
load_config
|
loiccoyle/rofi-tpb
| 3
|
python
|
def load_config(config_path: Path) -> configparser.ConfigParser:
'Parse the config and return the ConfigParser instance.\n\n Returns:\n Parsed ConfigPraser instance.\n '
parser = configparser.ConfigParser()
parser.read_dict(DEFAULT_CONFIG)
parser.read(config_path)
return parser
|
def load_config(config_path: Path) -> configparser.ConfigParser:
'Parse the config and return the ConfigParser instance.\n\n Returns:\n Parsed ConfigPraser instance.\n '
parser = configparser.ConfigParser()
parser.read_dict(DEFAULT_CONFIG)
parser.read(config_path)
return parser<|docstring|>Parse the config and return the ConfigParser instance.
Returns:
Parsed ConfigPraser instance.<|endoftext|>
|
25cc1e207d8e763f510015eed831492f59c5e02c2f34918a99d2a54289217a41
|
def save_pairs(filename, pairs, verbose=True):
' \n Save pairwise ranking indexes to a file\n\n pairs - N-by-2 numpy array of index values \n '
assert (type(pairs) is np.ndarray)
assert ((pairs.ndim == 2) and (pairs.shape[1] == 2))
d = {'pairs': pairs}
np.savez(filename, **d)
if verbose:
print('{} pairwise ranking indexes saved to {}'.format(pairs.shape[0], filename))
|
Save pairwise ranking indexes to a file
pairs - N-by-2 numpy array of index values
|
code/rsir.py
|
save_pairs
|
joebockhorst/ecml2017
| 1
|
python
|
def save_pairs(filename, pairs, verbose=True):
' \n Save pairwise ranking indexes to a file\n\n pairs - N-by-2 numpy array of index values \n '
assert (type(pairs) is np.ndarray)
assert ((pairs.ndim == 2) and (pairs.shape[1] == 2))
d = {'pairs': pairs}
np.savez(filename, **d)
if verbose:
print('{} pairwise ranking indexes saved to {}'.format(pairs.shape[0], filename))
|
def save_pairs(filename, pairs, verbose=True):
' \n Save pairwise ranking indexes to a file\n\n pairs - N-by-2 numpy array of index values \n '
assert (type(pairs) is np.ndarray)
assert ((pairs.ndim == 2) and (pairs.shape[1] == 2))
d = {'pairs': pairs}
np.savez(filename, **d)
if verbose:
print('{} pairwise ranking indexes saved to {}'.format(pairs.shape[0], filename))<|docstring|>Save pairwise ranking indexes to a file
pairs - N-by-2 numpy array of index values<|endoftext|>
|
1eea5ad20af5f1a9c2d5f9b24c1eecf3503ed45e473a67853735eaae9473d038
|
def load_pairs(filename, verbose=False):
' Load ranking indexes, previously saved with save_pairs(), from file'
if verbose:
print('loading pairs from {}'.format(filename))
result = np.load(filename)['pairs']
return result
|
Load ranking indexes, previously saved with save_pairs(), from file
|
code/rsir.py
|
load_pairs
|
joebockhorst/ecml2017
| 1
|
python
|
def load_pairs(filename, verbose=False):
' '
if verbose:
print('loading pairs from {}'.format(filename))
result = np.load(filename)['pairs']
return result
|
def load_pairs(filename, verbose=False):
' '
if verbose:
print('loading pairs from {}'.format(filename))
result = np.load(filename)['pairs']
return result<|docstring|>Load ranking indexes, previously saved with save_pairs(), from file<|endoftext|>
|
476236ec02c131ff19d1da0a749092ca49d335d9232a7e4b59e9624c881bb5e4
|
def create_ranking_pairs(y, elgible=None, verbose=False):
'Return pairs of indexes elgible for training a pairwise ranking classifier.\n \n Parameters\n ----------\n y : listlike, shape = [n_examples]\n elgible : function(y1, y2), optional\n Elgibility function that returns True when a valid ranking\n pair can be made from examples with labels y1 and y2. \n Default is lambda y1, y2: y1 != y2\n \n \n Return\n ------\n array : shape = [-1, 2]\n An array with pairs in the rows. Returned pair (i, j) means \n that y[i] < y[j]\n \n\n Example\n -------\n >>> create_ranking_pairs([0,1,1,0])\n [(0, 1), (0, 2), (3, 1), (3, 2)]\n \n >>> create_ranking_pairs([False, True, False])\n [(0, 1), (2, 1)]\n '
if verbose:
print('creating_ranking_pairs()')
y = np.array(y)
if ((y.ndim > 2) or ((y.ndim == 2) and (y.shape[1] != 1))):
raise ValueError('y should be 1-dim or N-by-1')
elgible = (elgible if (not (elgible is None)) else (lambda y1, y2: (y1 != y2)))
result = []
comb = itertools.combinations(range(y.size), 2)
N_check = ((y.size * (y.size - 1)) / 2)
for (idx, (i, j)) in enumerate(comb):
if (verbose and ((idx % 1000000.0) == 0)):
print('checking pair {} of {} ({:.2f}%) : ({}, {})'.format(idx, N_check, ((100.0 * idx) / N_check), i, j))
(yi, yj) = (y[i], y[j])
if elgible(yi, yj):
if (yi < yj):
result.append((i, j))
else:
result.append((j, i))
return np.array(result)
|
Return pairs of indexes elgible for training a pairwise ranking classifier.
Parameters
----------
y : listlike, shape = [n_examples]
elgible : function(y1, y2), optional
Elgibility function that returns True when a valid ranking
pair can be made from examples with labels y1 and y2.
Default is lambda y1, y2: y1 != y2
Return
------
array : shape = [-1, 2]
An array with pairs in the rows. Returned pair (i, j) means
that y[i] < y[j]
Example
-------
>>> create_ranking_pairs([0,1,1,0])
[(0, 1), (0, 2), (3, 1), (3, 2)]
>>> create_ranking_pairs([False, True, False])
[(0, 1), (2, 1)]
|
code/rsir.py
|
create_ranking_pairs
|
joebockhorst/ecml2017
| 1
|
python
|
def create_ranking_pairs(y, elgible=None, verbose=False):
'Return pairs of indexes elgible for training a pairwise ranking classifier.\n \n Parameters\n ----------\n y : listlike, shape = [n_examples]\n elgible : function(y1, y2), optional\n Elgibility function that returns True when a valid ranking\n pair can be made from examples with labels y1 and y2. \n Default is lambda y1, y2: y1 != y2\n \n \n Return\n ------\n array : shape = [-1, 2]\n An array with pairs in the rows. Returned pair (i, j) means \n that y[i] < y[j]\n \n\n Example\n -------\n >>> create_ranking_pairs([0,1,1,0])\n [(0, 1), (0, 2), (3, 1), (3, 2)]\n \n >>> create_ranking_pairs([False, True, False])\n [(0, 1), (2, 1)]\n '
if verbose:
print('creating_ranking_pairs()')
y = np.array(y)
if ((y.ndim > 2) or ((y.ndim == 2) and (y.shape[1] != 1))):
raise ValueError('y should be 1-dim or N-by-1')
elgible = (elgible if (not (elgible is None)) else (lambda y1, y2: (y1 != y2)))
result = []
comb = itertools.combinations(range(y.size), 2)
N_check = ((y.size * (y.size - 1)) / 2)
for (idx, (i, j)) in enumerate(comb):
if (verbose and ((idx % 1000000.0) == 0)):
print('checking pair {} of {} ({:.2f}%) : ({}, {})'.format(idx, N_check, ((100.0 * idx) / N_check), i, j))
(yi, yj) = (y[i], y[j])
if elgible(yi, yj):
if (yi < yj):
result.append((i, j))
else:
result.append((j, i))
return np.array(result)
|
def create_ranking_pairs(y, elgible=None, verbose=False):
'Return pairs of indexes elgible for training a pairwise ranking classifier.\n \n Parameters\n ----------\n y : listlike, shape = [n_examples]\n elgible : function(y1, y2), optional\n Elgibility function that returns True when a valid ranking\n pair can be made from examples with labels y1 and y2. \n Default is lambda y1, y2: y1 != y2\n \n \n Return\n ------\n array : shape = [-1, 2]\n An array with pairs in the rows. Returned pair (i, j) means \n that y[i] < y[j]\n \n\n Example\n -------\n >>> create_ranking_pairs([0,1,1,0])\n [(0, 1), (0, 2), (3, 1), (3, 2)]\n \n >>> create_ranking_pairs([False, True, False])\n [(0, 1), (2, 1)]\n '
if verbose:
print('creating_ranking_pairs()')
y = np.array(y)
if ((y.ndim > 2) or ((y.ndim == 2) and (y.shape[1] != 1))):
raise ValueError('y should be 1-dim or N-by-1')
elgible = (elgible if (not (elgible is None)) else (lambda y1, y2: (y1 != y2)))
result = []
comb = itertools.combinations(range(y.size), 2)
N_check = ((y.size * (y.size - 1)) / 2)
for (idx, (i, j)) in enumerate(comb):
if (verbose and ((idx % 1000000.0) == 0)):
print('checking pair {} of {} ({:.2f}%) : ({}, {})'.format(idx, N_check, ((100.0 * idx) / N_check), i, j))
(yi, yj) = (y[i], y[j])
if elgible(yi, yj):
if (yi < yj):
result.append((i, j))
else:
result.append((j, i))
return np.array(result)<|docstring|>Return pairs of indexes elgible for training a pairwise ranking classifier.
Parameters
----------
y : listlike, shape = [n_examples]
elgible : function(y1, y2), optional
Elgibility function that returns True when a valid ranking
pair can be made from examples with labels y1 and y2.
Default is lambda y1, y2: y1 != y2
Return
------
array : shape = [-1, 2]
An array with pairs in the rows. Returned pair (i, j) means
that y[i] < y[j]
Example
-------
>>> create_ranking_pairs([0,1,1,0])
[(0, 1), (0, 2), (3, 1), (3, 2)]
>>> create_ranking_pairs([False, True, False])
[(0, 1), (2, 1)]<|endoftext|>
|
3af7da155b7a4a6d8ccc730b9aa5e0adbf54453feb384f2dca1f69e32237e380
|
def sample_pairwise_examples(n, X, pairs, with_replacement=True, whitelist=None, seed=None):
'Sample pairwise examples\n \n Parameters\n ----------\n n : int, number of pairwise examples to sample\n X : array, shape = [n_examples, n_features] \n array of original feature values\n pairs : array, shape = [n_pairs, 2]\n Pairs to sample from. For format see create_ranking_pairs()\n with_replacement : bool, optional\n If True sample with replacement\n whitelist : Iterable, optional\n If set provides a list of elgible example indexes. A pair (i, j) will\n only be returned if both i and j are in the elgible list. Helpful when\n splitting examples X for cross-validation purposes.\n seed : int, default=None\n If not None, np.random.seed(seed) is called prior to sampling\n \n Returns\n -------\n X_pairwise : array, shape = [n, num_features]\n The pairwise examples. \n Y_pairwise : array, shape = [n]\n Class values Y_out will be approximately balanced.\n sampled_pairs : array, shape = [n, 2]\n The list of sample example indexes. If kth element in sample_pairs \n is (i, j) means X_pairwise[k,:] = X[i, :] - X[j, :] \n \n '
if (whitelist is None):
pairs = pairs
else:
whitelist = set(whitelist)
pairs = np.array([(p[0], p[1]) for p in pairs if ((p[0] in whitelist) and (p[1] in whitelist))])
if (not (seed is None)):
np.random.seed(seed)
N = pairs.shape[0]
if with_replacement:
indexes = np.random.randint(N, size=n)
else:
if (N > n):
raise ValueError('Cannot sample n times without replacement from set smaller than n')
indexes = np.random.permutation(N)[:n]
X_pairwise = (np.zeros((n, X.shape[1])) + np.nan)
Y_pairwise = (np.zeros((n,)) + np.nan)
sampled_pairs = np.zeros((n, 2), dtype=int)
for (ii, idx) in enumerate(indexes):
(i, j) = pairs[(idx, :)]
sampled_pairs[(ii, :)] = (i, j)
if ((ii % 2) == 0):
X_pairwise[(ii, :)] = (X[(j, :)] - X[(i, :)])
Y_pairwise[ii] = 1
sampled_pairs[(ii, :)] = (i, j)
else:
X_pairwise[(ii, :)] = (X[(i, :)] - X[(j, :)])
Y_pairwise[ii] = (- 1)
sampled_pairs[(ii, :)] = (j, i)
assert (np.isnan(X_pairwise).sum() == 0)
assert (np.isnan(Y_pairwise).sum() == 0)
return (X_pairwise, Y_pairwise, sampled_pairs)
|
Sample pairwise examples
Parameters
----------
n : int, number of pairwise examples to sample
X : array, shape = [n_examples, n_features]
array of original feature values
pairs : array, shape = [n_pairs, 2]
Pairs to sample from. For format see create_ranking_pairs()
with_replacement : bool, optional
If True sample with replacement
whitelist : Iterable, optional
If set provides a list of elgible example indexes. A pair (i, j) will
only be returned if both i and j are in the elgible list. Helpful when
splitting examples X for cross-validation purposes.
seed : int, default=None
If not None, np.random.seed(seed) is called prior to sampling
Returns
-------
X_pairwise : array, shape = [n, num_features]
The pairwise examples.
Y_pairwise : array, shape = [n]
Class values Y_out will be approximately balanced.
sampled_pairs : array, shape = [n, 2]
The list of sample example indexes. If kth element in sample_pairs
is (i, j) means X_pairwise[k,:] = X[i, :] - X[j, :]
|
code/rsir.py
|
sample_pairwise_examples
|
joebockhorst/ecml2017
| 1
|
python
|
def sample_pairwise_examples(n, X, pairs, with_replacement=True, whitelist=None, seed=None):
'Sample pairwise examples\n \n Parameters\n ----------\n n : int, number of pairwise examples to sample\n X : array, shape = [n_examples, n_features] \n array of original feature values\n pairs : array, shape = [n_pairs, 2]\n Pairs to sample from. For format see create_ranking_pairs()\n with_replacement : bool, optional\n If True sample with replacement\n whitelist : Iterable, optional\n If set provides a list of elgible example indexes. A pair (i, j) will\n only be returned if both i and j are in the elgible list. Helpful when\n splitting examples X for cross-validation purposes.\n seed : int, default=None\n If not None, np.random.seed(seed) is called prior to sampling\n \n Returns\n -------\n X_pairwise : array, shape = [n, num_features]\n The pairwise examples. \n Y_pairwise : array, shape = [n]\n Class values Y_out will be approximately balanced.\n sampled_pairs : array, shape = [n, 2]\n The list of sample example indexes. If kth element in sample_pairs \n is (i, j) means X_pairwise[k,:] = X[i, :] - X[j, :] \n \n '
if (whitelist is None):
pairs = pairs
else:
whitelist = set(whitelist)
pairs = np.array([(p[0], p[1]) for p in pairs if ((p[0] in whitelist) and (p[1] in whitelist))])
if (not (seed is None)):
np.random.seed(seed)
N = pairs.shape[0]
if with_replacement:
indexes = np.random.randint(N, size=n)
else:
if (N > n):
raise ValueError('Cannot sample n times without replacement from set smaller than n')
indexes = np.random.permutation(N)[:n]
X_pairwise = (np.zeros((n, X.shape[1])) + np.nan)
Y_pairwise = (np.zeros((n,)) + np.nan)
sampled_pairs = np.zeros((n, 2), dtype=int)
for (ii, idx) in enumerate(indexes):
(i, j) = pairs[(idx, :)]
sampled_pairs[(ii, :)] = (i, j)
if ((ii % 2) == 0):
X_pairwise[(ii, :)] = (X[(j, :)] - X[(i, :)])
Y_pairwise[ii] = 1
sampled_pairs[(ii, :)] = (i, j)
else:
X_pairwise[(ii, :)] = (X[(i, :)] - X[(j, :)])
Y_pairwise[ii] = (- 1)
sampled_pairs[(ii, :)] = (j, i)
assert (np.isnan(X_pairwise).sum() == 0)
assert (np.isnan(Y_pairwise).sum() == 0)
return (X_pairwise, Y_pairwise, sampled_pairs)
|
def sample_pairwise_examples(n, X, pairs, with_replacement=True, whitelist=None, seed=None):
'Sample pairwise examples\n \n Parameters\n ----------\n n : int, number of pairwise examples to sample\n X : array, shape = [n_examples, n_features] \n array of original feature values\n pairs : array, shape = [n_pairs, 2]\n Pairs to sample from. For format see create_ranking_pairs()\n with_replacement : bool, optional\n If True sample with replacement\n whitelist : Iterable, optional\n If set provides a list of elgible example indexes. A pair (i, j) will\n only be returned if both i and j are in the elgible list. Helpful when\n splitting examples X for cross-validation purposes.\n seed : int, default=None\n If not None, np.random.seed(seed) is called prior to sampling\n \n Returns\n -------\n X_pairwise : array, shape = [n, num_features]\n The pairwise examples. \n Y_pairwise : array, shape = [n]\n Class values Y_out will be approximately balanced.\n sampled_pairs : array, shape = [n, 2]\n The list of sample example indexes. If kth element in sample_pairs \n is (i, j) means X_pairwise[k,:] = X[i, :] - X[j, :] \n \n '
if (whitelist is None):
pairs = pairs
else:
whitelist = set(whitelist)
pairs = np.array([(p[0], p[1]) for p in pairs if ((p[0] in whitelist) and (p[1] in whitelist))])
if (not (seed is None)):
np.random.seed(seed)
N = pairs.shape[0]
if with_replacement:
indexes = np.random.randint(N, size=n)
else:
if (N > n):
raise ValueError('Cannot sample n times without replacement from set smaller than n')
indexes = np.random.permutation(N)[:n]
X_pairwise = (np.zeros((n, X.shape[1])) + np.nan)
Y_pairwise = (np.zeros((n,)) + np.nan)
sampled_pairs = np.zeros((n, 2), dtype=int)
for (ii, idx) in enumerate(indexes):
(i, j) = pairs[(idx, :)]
sampled_pairs[(ii, :)] = (i, j)
if ((ii % 2) == 0):
X_pairwise[(ii, :)] = (X[(j, :)] - X[(i, :)])
Y_pairwise[ii] = 1
sampled_pairs[(ii, :)] = (i, j)
else:
X_pairwise[(ii, :)] = (X[(i, :)] - X[(j, :)])
Y_pairwise[ii] = (- 1)
sampled_pairs[(ii, :)] = (j, i)
assert (np.isnan(X_pairwise).sum() == 0)
assert (np.isnan(Y_pairwise).sum() == 0)
return (X_pairwise, Y_pairwise, sampled_pairs)<|docstring|>Sample pairwise examples
Parameters
----------
n : int, number of pairwise examples to sample
X : array, shape = [n_examples, n_features]
array of original feature values
pairs : array, shape = [n_pairs, 2]
Pairs to sample from. For format see create_ranking_pairs()
with_replacement : bool, optional
If True sample with replacement
whitelist : Iterable, optional
If set provides a list of elgible example indexes. A pair (i, j) will
only be returned if both i and j are in the elgible list. Helpful when
splitting examples X for cross-validation purposes.
seed : int, default=None
If not None, np.random.seed(seed) is called prior to sampling
Returns
-------
X_pairwise : array, shape = [n, num_features]
The pairwise examples.
Y_pairwise : array, shape = [n]
Class values Y_out will be approximately balanced.
sampled_pairs : array, shape = [n, 2]
The list of sample example indexes. If kth element in sample_pairs
is (i, j) means X_pairwise[k,:] = X[i, :] - X[j, :]<|endoftext|>
|
78fb02b9e8dacda1bb0ce0bde0131263bffcb84bfa10bdb2690d3068e35b12f8
|
def __init__(self, mask_size=100, pr_args={}, ir_args={'out_of_bounds': 'clip'}):
'\n Parameters\n ----------\n mask_size : int, (default=100)\n Length of the mask for smoothing rank scores\n pr_args : dict, (default={})\n Keyword arguments to PairwiseRankClf constructor\n ir_args : dict, (default={"out_of_bounds":"clip"})\n Keyword arguments to IsotonicRegression constructor\n\n '
self.ir_args = ir_args
self.pr_args = pr_args
self.pr_clf = PairwiseRankClf(**pr_args)
self.ir_model = IsotonicRegression(**ir_args)
self.mask_size = mask_size
|
Parameters
----------
mask_size : int, (default=100)
Length of the mask for smoothing rank scores
pr_args : dict, (default={})
Keyword arguments to PairwiseRankClf constructor
ir_args : dict, (default={"out_of_bounds":"clip"})
Keyword arguments to IsotonicRegression constructor
|
code/rsir.py
|
__init__
|
joebockhorst/ecml2017
| 1
|
python
|
def __init__(self, mask_size=100, pr_args={}, ir_args={'out_of_bounds': 'clip'}):
'\n Parameters\n ----------\n mask_size : int, (default=100)\n Length of the mask for smoothing rank scores\n pr_args : dict, (default={})\n Keyword arguments to PairwiseRankClf constructor\n ir_args : dict, (default={"out_of_bounds":"clip"})\n Keyword arguments to IsotonicRegression constructor\n\n '
self.ir_args = ir_args
self.pr_args = pr_args
self.pr_clf = PairwiseRankClf(**pr_args)
self.ir_model = IsotonicRegression(**ir_args)
self.mask_size = mask_size
|
def __init__(self, mask_size=100, pr_args={}, ir_args={'out_of_bounds': 'clip'}):
'\n Parameters\n ----------\n mask_size : int, (default=100)\n Length of the mask for smoothing rank scores\n pr_args : dict, (default={})\n Keyword arguments to PairwiseRankClf constructor\n ir_args : dict, (default={"out_of_bounds":"clip"})\n Keyword arguments to IsotonicRegression constructor\n\n '
self.ir_args = ir_args
self.pr_args = pr_args
self.pr_clf = PairwiseRankClf(**pr_args)
self.ir_model = IsotonicRegression(**ir_args)
self.mask_size = mask_size<|docstring|>Parameters
----------
mask_size : int, (default=100)
Length of the mask for smoothing rank scores
pr_args : dict, (default={})
Keyword arguments to PairwiseRankClf constructor
ir_args : dict, (default={"out_of_bounds":"clip"})
Keyword arguments to IsotonicRegression constructor<|endoftext|>
|
1de7aadf11043ba6949a68444062e78bb75c6396bef3b79e87be90aeb7788451
|
def __init__(self, param_grid=None, tuning_fraction=0.75, pairs_filename=None, n_tuning_training_samples=10000, n_tuning_eval_samples=10000, n_training_samples=10000, scoring='roc_auc', seed=None, verbose=False):
"\n Parameters\n ----------\n param_grid : dict, optional\n parameter grid for tuning hyperparameters. Default is \n PairwiseRankClf.CLF_DEFAULT_ARGS\n tuning_fraction : float, (default=0.75)\n Fraction of training examples passed to fit to use as training set \n while tuning hyper-parameters. The remainder of the examples are used \n for estimating performance.\n pairs_filename : string, optional\n If set, pairs are read from the specified file if it exists or \n written to that file otherwise. Caching pairs in a file can help\n speed up training when training sets are large. '.npz' is appended\n (by numpy) to the filename if it does not end with '.npz'\n n_tuning_training_samples : int, (default=10000)\n The number of pairwise samples to use for training models\n during hyperpararmeter tuning. That is, the size of the train_prime set.\n n_tuning_eval_samples : int, (default=10000)\n The number of pairwise samples used to evaluate trained models\n during hyperparameter tuning. That is, the size of the tuning set.\n n_training_samples : int, (default=10000)\n The number of pairwise samples to use for training the underlying \n classifier after hyperparametrs have been tuned. \n scoring : string or callable, (default='roc_auc')\n the scoring parameter for GridSearchCV()\n seed : int, optional\n if set np.rand(seed) is called at the start of fit()\n "
self.param_grid = (param_grid if (not (param_grid is None)) else PairwiseRankClf.CLF_DEFAULT_ARGS)
self._pairwise_clf = None
self.tuning_fraction = tuning_fraction
self.seed = seed
self.pairs_filename = pairs_filename
self.n_tuning_training_samples = n_tuning_training_samples
self.n_tuning_eval_samples = n_tuning_eval_samples
self.n_training_samples = n_training_samples
self.scoring = scoring
self.verbose = verbose
if ((not (pairs_filename is None)) and (not pairs_filename.endswith('.npz'))):
self.pairs_filename = (pairs_filename + '.npz')
|
Parameters
----------
param_grid : dict, optional
parameter grid for tuning hyperparameters. Default is
PairwiseRankClf.CLF_DEFAULT_ARGS
tuning_fraction : float, (default=0.75)
Fraction of training examples passed to fit to use as training set
while tuning hyper-parameters. The remainder of the examples are used
for estimating performance.
pairs_filename : string, optional
If set, pairs are read from the specified file if it exists or
written to that file otherwise. Caching pairs in a file can help
speed up training when training sets are large. '.npz' is appended
(by numpy) to the filename if it does not end with '.npz'
n_tuning_training_samples : int, (default=10000)
The number of pairwise samples to use for training models
during hyperpararmeter tuning. That is, the size of the train_prime set.
n_tuning_eval_samples : int, (default=10000)
The number of pairwise samples used to evaluate trained models
during hyperparameter tuning. That is, the size of the tuning set.
n_training_samples : int, (default=10000)
The number of pairwise samples to use for training the underlying
classifier after hyperparametrs have been tuned.
scoring : string or callable, (default='roc_auc')
the scoring parameter for GridSearchCV()
seed : int, optional
if set np.rand(seed) is called at the start of fit()
|
code/rsir.py
|
__init__
|
joebockhorst/ecml2017
| 1
|
python
|
def __init__(self, param_grid=None, tuning_fraction=0.75, pairs_filename=None, n_tuning_training_samples=10000, n_tuning_eval_samples=10000, n_training_samples=10000, scoring='roc_auc', seed=None, verbose=False):
"\n Parameters\n ----------\n param_grid : dict, optional\n parameter grid for tuning hyperparameters. Default is \n PairwiseRankClf.CLF_DEFAULT_ARGS\n tuning_fraction : float, (default=0.75)\n Fraction of training examples passed to fit to use as training set \n while tuning hyper-parameters. The remainder of the examples are used \n for estimating performance.\n pairs_filename : string, optional\n If set, pairs are read from the specified file if it exists or \n written to that file otherwise. Caching pairs in a file can help\n speed up training when training sets are large. '.npz' is appended\n (by numpy) to the filename if it does not end with '.npz'\n n_tuning_training_samples : int, (default=10000)\n The number of pairwise samples to use for training models\n during hyperpararmeter tuning. That is, the size of the train_prime set.\n n_tuning_eval_samples : int, (default=10000)\n The number of pairwise samples used to evaluate trained models\n during hyperparameter tuning. That is, the size of the tuning set.\n n_training_samples : int, (default=10000)\n The number of pairwise samples to use for training the underlying \n classifier after hyperparametrs have been tuned. \n scoring : string or callable, (default='roc_auc')\n the scoring parameter for GridSearchCV()\n seed : int, optional\n if set np.rand(seed) is called at the start of fit()\n "
self.param_grid = (param_grid if (not (param_grid is None)) else PairwiseRankClf.CLF_DEFAULT_ARGS)
self._pairwise_clf = None
self.tuning_fraction = tuning_fraction
self.seed = seed
self.pairs_filename = pairs_filename
self.n_tuning_training_samples = n_tuning_training_samples
self.n_tuning_eval_samples = n_tuning_eval_samples
self.n_training_samples = n_training_samples
self.scoring = scoring
self.verbose = verbose
if ((not (pairs_filename is None)) and (not pairs_filename.endswith('.npz'))):
self.pairs_filename = (pairs_filename + '.npz')
|
def __init__(self, param_grid=None, tuning_fraction=0.75, pairs_filename=None, n_tuning_training_samples=10000, n_tuning_eval_samples=10000, n_training_samples=10000, scoring='roc_auc', seed=None, verbose=False):
"\n Parameters\n ----------\n param_grid : dict, optional\n parameter grid for tuning hyperparameters. Default is \n PairwiseRankClf.CLF_DEFAULT_ARGS\n tuning_fraction : float, (default=0.75)\n Fraction of training examples passed to fit to use as training set \n while tuning hyper-parameters. The remainder of the examples are used \n for estimating performance.\n pairs_filename : string, optional\n If set, pairs are read from the specified file if it exists or \n written to that file otherwise. Caching pairs in a file can help\n speed up training when training sets are large. '.npz' is appended\n (by numpy) to the filename if it does not end with '.npz'\n n_tuning_training_samples : int, (default=10000)\n The number of pairwise samples to use for training models\n during hyperpararmeter tuning. That is, the size of the train_prime set.\n n_tuning_eval_samples : int, (default=10000)\n The number of pairwise samples used to evaluate trained models\n during hyperparameter tuning. That is, the size of the tuning set.\n n_training_samples : int, (default=10000)\n The number of pairwise samples to use for training the underlying \n classifier after hyperparametrs have been tuned. \n scoring : string or callable, (default='roc_auc')\n the scoring parameter for GridSearchCV()\n seed : int, optional\n if set np.rand(seed) is called at the start of fit()\n "
self.param_grid = (param_grid if (not (param_grid is None)) else PairwiseRankClf.CLF_DEFAULT_ARGS)
self._pairwise_clf = None
self.tuning_fraction = tuning_fraction
self.seed = seed
self.pairs_filename = pairs_filename
self.n_tuning_training_samples = n_tuning_training_samples
self.n_tuning_eval_samples = n_tuning_eval_samples
self.n_training_samples = n_training_samples
self.scoring = scoring
self.verbose = verbose
if ((not (pairs_filename is None)) and (not pairs_filename.endswith('.npz'))):
self.pairs_filename = (pairs_filename + '.npz')<|docstring|>Parameters
----------
param_grid : dict, optional
parameter grid for tuning hyperparameters. Default is
PairwiseRankClf.CLF_DEFAULT_ARGS
tuning_fraction : float, (default=0.75)
Fraction of training examples passed to fit to use as training set
while tuning hyper-parameters. The remainder of the examples are used
for estimating performance.
pairs_filename : string, optional
If set, pairs are read from the specified file if it exists or
written to that file otherwise. Caching pairs in a file can help
speed up training when training sets are large. '.npz' is appended
(by numpy) to the filename if it does not end with '.npz'
n_tuning_training_samples : int, (default=10000)
The number of pairwise samples to use for training models
during hyperpararmeter tuning. That is, the size of the train_prime set.
n_tuning_eval_samples : int, (default=10000)
The number of pairwise samples used to evaluate trained models
during hyperparameter tuning. That is, the size of the tuning set.
n_training_samples : int, (default=10000)
The number of pairwise samples to use for training the underlying
classifier after hyperparametrs have been tuned.
scoring : string or callable, (default='roc_auc')
the scoring parameter for GridSearchCV()
seed : int, optional
if set np.rand(seed) is called at the start of fit()<|endoftext|>
|
046b2572892cd1ee5331b31bcd031f395974bbbc730d0ab1d9a5efc875d9df41
|
def fit(self, X, y):
'Train the model\n \n Parameters\n ----------\n X : array, shape=[n_examples, n_features]\n y : array, shape=[n_examples]\n '
if (not (self.seed is None)):
np.random.seed(self.seed)
n_tr_prime = int((X.shape[0] * self.tuning_fraction))
self.rand_idx = np.random.permutation(X.shape[0])
self.tr_prime_idx = self.rand_idx[:n_tr_prime]
self.tu_idx = self.rand_idx[n_tr_prime:]
pairs = self._get_pairs(y)
(X_pairwise_tr_prime, y_pairwise_tr_prime, sp_tr_prime) = sample_pairwise_examples(n=self.n_tuning_training_samples, pairs=pairs, whitelist=self.tr_prime_idx, X=X, seed=self.seed)
(X_pairwise_tune, y_pairwise_tune, sp_tune) = sample_pairwise_examples(n=self.n_tuning_eval_samples, pairs=pairs, whitelist=self.tu_idx, X=X, seed=self.seed)
test_idx = (([(- 1)] * self.n_tuning_training_samples) + ([0] * self.n_tuning_eval_samples))
cv = PredefinedSplit(test_idx)
self._gridsearch = GridSearchCV(LinearSVC(), param_grid=self.param_grid, cv=cv, scoring=self.scoring, refit=False)
X_tmp = np.concatenate((X_pairwise_tr_prime, X_pairwise_tune))
y_tmp = np.concatenate((y_pairwise_tr_prime, y_pairwise_tune))
self._gridsearch.fit(X_tmp, y_tmp)
if self.verbose:
print('Training final model with best_params: {}'.format(self._gridsearch.best_params_))
(X_pairwise_tr, Y_pairwise_tr, _) = sample_pairwise_examples(n=self.n_training_samples, pairs=pairs, X=X, seed=self.seed)
self._pairwise_clf = LinearSVC(**self._gridsearch.best_params_)
self._pairwise_clf.fit(X_pairwise_tr, Y_pairwise_tr)
|
Train the model
Parameters
----------
X : array, shape=[n_examples, n_features]
y : array, shape=[n_examples]
|
code/rsir.py
|
fit
|
joebockhorst/ecml2017
| 1
|
python
|
def fit(self, X, y):
'Train the model\n \n Parameters\n ----------\n X : array, shape=[n_examples, n_features]\n y : array, shape=[n_examples]\n '
if (not (self.seed is None)):
np.random.seed(self.seed)
n_tr_prime = int((X.shape[0] * self.tuning_fraction))
self.rand_idx = np.random.permutation(X.shape[0])
self.tr_prime_idx = self.rand_idx[:n_tr_prime]
self.tu_idx = self.rand_idx[n_tr_prime:]
pairs = self._get_pairs(y)
(X_pairwise_tr_prime, y_pairwise_tr_prime, sp_tr_prime) = sample_pairwise_examples(n=self.n_tuning_training_samples, pairs=pairs, whitelist=self.tr_prime_idx, X=X, seed=self.seed)
(X_pairwise_tune, y_pairwise_tune, sp_tune) = sample_pairwise_examples(n=self.n_tuning_eval_samples, pairs=pairs, whitelist=self.tu_idx, X=X, seed=self.seed)
test_idx = (([(- 1)] * self.n_tuning_training_samples) + ([0] * self.n_tuning_eval_samples))
cv = PredefinedSplit(test_idx)
self._gridsearch = GridSearchCV(LinearSVC(), param_grid=self.param_grid, cv=cv, scoring=self.scoring, refit=False)
X_tmp = np.concatenate((X_pairwise_tr_prime, X_pairwise_tune))
y_tmp = np.concatenate((y_pairwise_tr_prime, y_pairwise_tune))
self._gridsearch.fit(X_tmp, y_tmp)
if self.verbose:
print('Training final model with best_params: {}'.format(self._gridsearch.best_params_))
(X_pairwise_tr, Y_pairwise_tr, _) = sample_pairwise_examples(n=self.n_training_samples, pairs=pairs, X=X, seed=self.seed)
self._pairwise_clf = LinearSVC(**self._gridsearch.best_params_)
self._pairwise_clf.fit(X_pairwise_tr, Y_pairwise_tr)
|
def fit(self, X, y):
'Train the model\n \n Parameters\n ----------\n X : array, shape=[n_examples, n_features]\n y : array, shape=[n_examples]\n '
if (not (self.seed is None)):
np.random.seed(self.seed)
n_tr_prime = int((X.shape[0] * self.tuning_fraction))
self.rand_idx = np.random.permutation(X.shape[0])
self.tr_prime_idx = self.rand_idx[:n_tr_prime]
self.tu_idx = self.rand_idx[n_tr_prime:]
pairs = self._get_pairs(y)
(X_pairwise_tr_prime, y_pairwise_tr_prime, sp_tr_prime) = sample_pairwise_examples(n=self.n_tuning_training_samples, pairs=pairs, whitelist=self.tr_prime_idx, X=X, seed=self.seed)
(X_pairwise_tune, y_pairwise_tune, sp_tune) = sample_pairwise_examples(n=self.n_tuning_eval_samples, pairs=pairs, whitelist=self.tu_idx, X=X, seed=self.seed)
test_idx = (([(- 1)] * self.n_tuning_training_samples) + ([0] * self.n_tuning_eval_samples))
cv = PredefinedSplit(test_idx)
self._gridsearch = GridSearchCV(LinearSVC(), param_grid=self.param_grid, cv=cv, scoring=self.scoring, refit=False)
X_tmp = np.concatenate((X_pairwise_tr_prime, X_pairwise_tune))
y_tmp = np.concatenate((y_pairwise_tr_prime, y_pairwise_tune))
self._gridsearch.fit(X_tmp, y_tmp)
if self.verbose:
print('Training final model with best_params: {}'.format(self._gridsearch.best_params_))
(X_pairwise_tr, Y_pairwise_tr, _) = sample_pairwise_examples(n=self.n_training_samples, pairs=pairs, X=X, seed=self.seed)
self._pairwise_clf = LinearSVC(**self._gridsearch.best_params_)
self._pairwise_clf.fit(X_pairwise_tr, Y_pairwise_tr)<|docstring|>Train the model
Parameters
----------
X : array, shape=[n_examples, n_features]
y : array, shape=[n_examples]<|endoftext|>
|
2b866bfe27fff2879daa57427613518269231393ca7f6401b561a361912e775e
|
def _compute_priority(self, priority):
' p = (p + 𝝐)**𝛼 '
priority = ((self._per_eta * tf.math.reduce_max(priority, axis=1)) + ((1 - self._per_eta) * tf.math.reduce_mean(priority, axis=1)))
priority += self._per_epsilon
priority **= self._per_alpha
return priority
|
p = (p + 𝝐)**𝛼
|
algo/mrdqn/base.py
|
_compute_priority
|
xlnwel/grl
| 5
|
python
|
def _compute_priority(self, priority):
' '
priority = ((self._per_eta * tf.math.reduce_max(priority, axis=1)) + ((1 - self._per_eta) * tf.math.reduce_mean(priority, axis=1)))
priority += self._per_epsilon
priority **= self._per_alpha
return priority
|
def _compute_priority(self, priority):
' '
priority = ((self._per_eta * tf.math.reduce_max(priority, axis=1)) + ((1 - self._per_eta) * tf.math.reduce_mean(priority, axis=1)))
priority += self._per_epsilon
priority **= self._per_alpha
return priority<|docstring|>p = (p + 𝝐)**𝛼<|endoftext|>
|
ba547b4b55f1306d21389fd38444b16d1a7410869af8254ac5e36b2f721d7238
|
def host_passes(self, host_state, filter_properties):
'Returns True for only active compute nodes.'
service = host_state.service
if service['disabled']:
LOG.debug('%(host_state)s is disabled, reason: %(reason)s', {'host_state': host_state, 'reason': service.get('disabled_reason')})
return False
elif (not self.servicegroup_api.service_is_up(service)):
LOG.warning(_LW('%(host_state)s has not been heard from in a while'), {'host_state': host_state})
return False
return True
|
Returns True for only active compute nodes.
|
patron/scheduler/filters/compute_filter.py
|
host_passes
|
casbin/openstack-patron
| 0
|
python
|
def host_passes(self, host_state, filter_properties):
service = host_state.service
if service['disabled']:
LOG.debug('%(host_state)s is disabled, reason: %(reason)s', {'host_state': host_state, 'reason': service.get('disabled_reason')})
return False
elif (not self.servicegroup_api.service_is_up(service)):
LOG.warning(_LW('%(host_state)s has not been heard from in a while'), {'host_state': host_state})
return False
return True
|
def host_passes(self, host_state, filter_properties):
service = host_state.service
if service['disabled']:
LOG.debug('%(host_state)s is disabled, reason: %(reason)s', {'host_state': host_state, 'reason': service.get('disabled_reason')})
return False
elif (not self.servicegroup_api.service_is_up(service)):
LOG.warning(_LW('%(host_state)s has not been heard from in a while'), {'host_state': host_state})
return False
return True<|docstring|>Returns True for only active compute nodes.<|endoftext|>
|
20f9f1e9f49a12ad42c40f7dc5dfc79d6cf6484ed66f88562ce1d4cc20919520
|
def add_label(self, lab=None):
'\n Add a forward vertex to the pattern\n :param lab: label\n :return: int id of the new vertex Graph.__init__(self, data=data,name=name,**attr)\n\n '
vid = self.number_of_nodes()
self.add_node(vid)
self.node[vid][NodeLab] = lab
return vid
|
Add a forward vertex to the pattern
:param lab: label
:return: int id of the new vertex Graph.__init__(self, data=data,name=name,**attr)
|
miner/DS/pattern.py
|
add_label
|
PranayAnchuri/probgraphminer
| 0
|
python
|
def add_label(self, lab=None):
'\n Add a forward vertex to the pattern\n :param lab: label\n :return: int id of the new vertex Graph.__init__(self, data=data,name=name,**attr)\n\n '
vid = self.number_of_nodes()
self.add_node(vid)
self.node[vid][NodeLab] = lab
return vid
|
def add_label(self, lab=None):
'\n Add a forward vertex to the pattern\n :param lab: label\n :return: int id of the new vertex Graph.__init__(self, data=data,name=name,**attr)\n\n '
vid = self.number_of_nodes()
self.add_node(vid)
self.node[vid][NodeLab] = lab
return vid<|docstring|>Add a forward vertex to the pattern
:param lab: label
:return: int id of the new vertex Graph.__init__(self, data=data,name=name,**attr)<|endoftext|>
|
74427325b5b95f849796aebe2128f2428409919332cf46fe730c51beb83295d1
|
def add_single_edge(self, l1, l2):
'\n Make a single edge from pair of labels, call this method only on empty graph\n :param l1:\n :param l2:\n :return:\n '
if (not self):
vid1 = self.add_label(l1)
vid2 = self.add_label(l2)
self.add_edge(vid1, vid2)
else:
raise RuntimeError('Cannot call add_single_edge method on non emtpy graph')
|
Make a single edge from pair of labels, call this method only on empty graph
:param l1:
:param l2:
:return:
|
miner/DS/pattern.py
|
add_single_edge
|
PranayAnchuri/probgraphminer
| 0
|
python
|
def add_single_edge(self, l1, l2):
'\n Make a single edge from pair of labels, call this method only on empty graph\n :param l1:\n :param l2:\n :return:\n '
if (not self):
vid1 = self.add_label(l1)
vid2 = self.add_label(l2)
self.add_edge(vid1, vid2)
else:
raise RuntimeError('Cannot call add_single_edge method on non emtpy graph')
|
def add_single_edge(self, l1, l2):
'\n Make a single edge from pair of labels, call this method only on empty graph\n :param l1:\n :param l2:\n :return:\n '
if (not self):
vid1 = self.add_label(l1)
vid2 = self.add_label(l2)
self.add_edge(vid1, vid2)
else:
raise RuntimeError('Cannot call add_single_edge method on non emtpy graph')<|docstring|>Make a single edge from pair of labels, call this method only on empty graph
:param l1:
:param l2:
:return:<|endoftext|>
|
ec7ae82f2396a3f7fe7614c9de6a68520e9b71fc0a35a79c44338da861c8eee0
|
def edit_mode(self):
"Switches edit_mode on/off.\n\n When switching edit mode on this function first makes mouse\n cursor visible when on top of this module and makes background\n highlight visible by changing it's color to yellow, then\n appropriate event handlers are bind to left mouse button click\n (<Button-1>) and mouse motion with left mouse button pressed\n (<B1-Motion>) for every component.\n\n When switching edit mode off cursor is first hidden, highlight\n made invisible by changing it's color to black, then mouse\n input event handlers are unbound from all components.\n "
if (not self._frame_in_edit_mode):
self._frame_in_edit_mode = True
self.config(highlightbackground='yellow', cursor='arrow')
for label in self.winfo_children():
label.bind('<Button-1>', self._mouse_left_button_click)
label.bind('<B1-Motion>', self._mouse_left_button_motion)
label.bind('<ButtonRelease-1>', self._mouse_left_button_release)
else:
self._frame_in_edit_mode = False
self.config(highlightbackground='black', cursor='none')
for label in self.winfo_children():
label.unbind('<B1-Motion>')
label.unbind('<Button-1>')
label.unbind('<ButtonRelease-1>')
|
Switches edit_mode on/off.
When switching edit mode on this function first makes mouse
cursor visible when on top of this module and makes background
highlight visible by changing it's color to yellow, then
appropriate event handlers are bind to left mouse button click
(<Button-1>) and mouse motion with left mouse button pressed
(<B1-Motion>) for every component.
When switching edit mode off cursor is first hidden, highlight
made invisible by changing it's color to black, then mouse
input event handlers are unbound from all components.
|
smartmirror/clock.py
|
edit_mode
|
bbialoskorski/SmartMirror
| 0
|
python
|
def edit_mode(self):
"Switches edit_mode on/off.\n\n When switching edit mode on this function first makes mouse\n cursor visible when on top of this module and makes background\n highlight visible by changing it's color to yellow, then\n appropriate event handlers are bind to left mouse button click\n (<Button-1>) and mouse motion with left mouse button pressed\n (<B1-Motion>) for every component.\n\n When switching edit mode off cursor is first hidden, highlight\n made invisible by changing it's color to black, then mouse\n input event handlers are unbound from all components.\n "
if (not self._frame_in_edit_mode):
self._frame_in_edit_mode = True
self.config(highlightbackground='yellow', cursor='arrow')
for label in self.winfo_children():
label.bind('<Button-1>', self._mouse_left_button_click)
label.bind('<B1-Motion>', self._mouse_left_button_motion)
label.bind('<ButtonRelease-1>', self._mouse_left_button_release)
else:
self._frame_in_edit_mode = False
self.config(highlightbackground='black', cursor='none')
for label in self.winfo_children():
label.unbind('<B1-Motion>')
label.unbind('<Button-1>')
label.unbind('<ButtonRelease-1>')
|
def edit_mode(self):
"Switches edit_mode on/off.\n\n When switching edit mode on this function first makes mouse\n cursor visible when on top of this module and makes background\n highlight visible by changing it's color to yellow, then\n appropriate event handlers are bind to left mouse button click\n (<Button-1>) and mouse motion with left mouse button pressed\n (<B1-Motion>) for every component.\n\n When switching edit mode off cursor is first hidden, highlight\n made invisible by changing it's color to black, then mouse\n input event handlers are unbound from all components.\n "
if (not self._frame_in_edit_mode):
self._frame_in_edit_mode = True
self.config(highlightbackground='yellow', cursor='arrow')
for label in self.winfo_children():
label.bind('<Button-1>', self._mouse_left_button_click)
label.bind('<B1-Motion>', self._mouse_left_button_motion)
label.bind('<ButtonRelease-1>', self._mouse_left_button_release)
else:
self._frame_in_edit_mode = False
self.config(highlightbackground='black', cursor='none')
for label in self.winfo_children():
label.unbind('<B1-Motion>')
label.unbind('<Button-1>')
label.unbind('<ButtonRelease-1>')<|docstring|>Switches edit_mode on/off.
When switching edit mode on this function first makes mouse
cursor visible when on top of this module and makes background
highlight visible by changing it's color to yellow, then
appropriate event handlers are bind to left mouse button click
(<Button-1>) and mouse motion with left mouse button pressed
(<B1-Motion>) for every component.
When switching edit mode off cursor is first hidden, highlight
made invisible by changing it's color to black, then mouse
input event handlers are unbound from all components.<|endoftext|>
|
0bc41982ce087d0f16ed1060c874592856c9e73ff33beef520a8e44fb7dda799
|
def _mouse_left_button_click(self, event):
"Saves coordinates of left mouse button click relative to\n this class's frame."
self._mouse_left_button_click_x_cord = (event.widget.winfo_x() + event.x)
self._mouse_left_button_click_y_cord = (event.widget.winfo_y() + event.y)
|
Saves coordinates of left mouse button click relative to
this class's frame.
|
smartmirror/clock.py
|
_mouse_left_button_click
|
bbialoskorski/SmartMirror
| 0
|
python
|
def _mouse_left_button_click(self, event):
"Saves coordinates of left mouse button click relative to\n this class's frame."
self._mouse_left_button_click_x_cord = (event.widget.winfo_x() + event.x)
self._mouse_left_button_click_y_cord = (event.widget.winfo_y() + event.y)
|
def _mouse_left_button_click(self, event):
"Saves coordinates of left mouse button click relative to\n this class's frame."
self._mouse_left_button_click_x_cord = (event.widget.winfo_x() + event.x)
self._mouse_left_button_click_y_cord = (event.widget.winfo_y() + event.y)<|docstring|>Saves coordinates of left mouse button click relative to
this class's frame.<|endoftext|>
|
86b6bdea1d76154ae31f6235e5c02cffc164815392e3353ea78650dc501beb54
|
def _mouse_left_button_motion(self, event):
'Repositions frame according to mouse cursor movement while\n left button is pressed.'
self.place(x=(event.x_root - self._mouse_left_button_click_x_cord), y=(event.y_root - self._mouse_left_button_click_y_cord))
self._framename_coords_dict['Clock'] = ((event.x_root - self._mouse_left_button_click_x_cord), (event.y_root - self._mouse_left_button_click_y_cord))
|
Repositions frame according to mouse cursor movement while
left button is pressed.
|
smartmirror/clock.py
|
_mouse_left_button_motion
|
bbialoskorski/SmartMirror
| 0
|
python
|
def _mouse_left_button_motion(self, event):
'Repositions frame according to mouse cursor movement while\n left button is pressed.'
self.place(x=(event.x_root - self._mouse_left_button_click_x_cord), y=(event.y_root - self._mouse_left_button_click_y_cord))
self._framename_coords_dict['Clock'] = ((event.x_root - self._mouse_left_button_click_x_cord), (event.y_root - self._mouse_left_button_click_y_cord))
|
def _mouse_left_button_motion(self, event):
'Repositions frame according to mouse cursor movement while\n left button is pressed.'
self.place(x=(event.x_root - self._mouse_left_button_click_x_cord), y=(event.y_root - self._mouse_left_button_click_y_cord))
self._framename_coords_dict['Clock'] = ((event.x_root - self._mouse_left_button_click_x_cord), (event.y_root - self._mouse_left_button_click_y_cord))<|docstring|>Repositions frame according to mouse cursor movement while
left button is pressed.<|endoftext|>
|
cc6905d72f83c073c3697c177580a4415ee001cc0d18255e2839fc10a64a4be1
|
def _mouse_left_button_release(self, event):
'Saves new position to json file.'
with open('../resources/dicts/framename_coords_dict.json', 'w') as dict_json:
json.dump(self._framename_coords_dict, dict_json)
|
Saves new position to json file.
|
smartmirror/clock.py
|
_mouse_left_button_release
|
bbialoskorski/SmartMirror
| 0
|
python
|
def _mouse_left_button_release(self, event):
with open('../resources/dicts/framename_coords_dict.json', 'w') as dict_json:
json.dump(self._framename_coords_dict, dict_json)
|
def _mouse_left_button_release(self, event):
with open('../resources/dicts/framename_coords_dict.json', 'w') as dict_json:
json.dump(self._framename_coords_dict, dict_json)<|docstring|>Saves new position to json file.<|endoftext|>
|
8249aa9ea31c6a9eb4f462549a5b7445b6438194508e056b9cec28e54965231c
|
def _display_time(self):
'Updates labels with current time.'
time = dt.datetime.time(dt.datetime.now())
hour = str(time.hour)
minute = time.strftime('%M')
if (len(hour) == 1):
hour = ('0' + hour)
self._hours_label.config(text=hour)
self._minutes_label.config(text=minute)
self.after(200, self._display_time)
|
Updates labels with current time.
|
smartmirror/clock.py
|
_display_time
|
bbialoskorski/SmartMirror
| 0
|
python
|
def _display_time(self):
time = dt.datetime.time(dt.datetime.now())
hour = str(time.hour)
minute = time.strftime('%M')
if (len(hour) == 1):
hour = ('0' + hour)
self._hours_label.config(text=hour)
self._minutes_label.config(text=minute)
self.after(200, self._display_time)
|
def _display_time(self):
time = dt.datetime.time(dt.datetime.now())
hour = str(time.hour)
minute = time.strftime('%M')
if (len(hour) == 1):
hour = ('0' + hour)
self._hours_label.config(text=hour)
self._minutes_label.config(text=minute)
self.after(200, self._display_time)<|docstring|>Updates labels with current time.<|endoftext|>
|
748cd5f706034892095203e49db1d2e7a8b5c9e836b92211c57ec864f7afb9de
|
def _display_colon(self):
'Displays blinking colon animation.'
self.after(500, self._display_colon)
next_color = 'black'
if (self._colon_label.cget('foreground') == 'black'):
next_color = 'white'
self._colon_label.config(fg=next_color)
|
Displays blinking colon animation.
|
smartmirror/clock.py
|
_display_colon
|
bbialoskorski/SmartMirror
| 0
|
python
|
def _display_colon(self):
self.after(500, self._display_colon)
next_color = 'black'
if (self._colon_label.cget('foreground') == 'black'):
next_color = 'white'
self._colon_label.config(fg=next_color)
|
def _display_colon(self):
self.after(500, self._display_colon)
next_color = 'black'
if (self._colon_label.cget('foreground') == 'black'):
next_color = 'white'
self._colon_label.config(fg=next_color)<|docstring|>Displays blinking colon animation.<|endoftext|>
|
d8cd22efabfcf94514d2d48a755ffa4a80f6b20138779b10e66832e44f3daa9c
|
def _display_date(self):
'Updates label with date everyday at midnight.'
current_date = dt.datetime.now()
time = dt.datetime.time(current_date)
month = current_date.strftime('%b')
weekday = current_date.strftime('%a')
day = str(current_date.date().day)
date = ((((weekday + ', ') + month) + ' ') + day)
self._date_label.config(text=date)
callback_time = ((((23 - time.hour) * 3600000) + ((60 - time.minute) * 60000)) + 100)
self.after(callback_time, self._display_date)
|
Updates label with date everyday at midnight.
|
smartmirror/clock.py
|
_display_date
|
bbialoskorski/SmartMirror
| 0
|
python
|
def _display_date(self):
current_date = dt.datetime.now()
time = dt.datetime.time(current_date)
month = current_date.strftime('%b')
weekday = current_date.strftime('%a')
day = str(current_date.date().day)
date = ((((weekday + ', ') + month) + ' ') + day)
self._date_label.config(text=date)
callback_time = ((((23 - time.hour) * 3600000) + ((60 - time.minute) * 60000)) + 100)
self.after(callback_time, self._display_date)
|
def _display_date(self):
current_date = dt.datetime.now()
time = dt.datetime.time(current_date)
month = current_date.strftime('%b')
weekday = current_date.strftime('%a')
day = str(current_date.date().day)
date = ((((weekday + ', ') + month) + ' ') + day)
self._date_label.config(text=date)
callback_time = ((((23 - time.hour) * 3600000) + ((60 - time.minute) * 60000)) + 100)
self.after(callback_time, self._display_date)<|docstring|>Updates label with date everyday at midnight.<|endoftext|>
|
c16e8eb59157bc804580ecbfb17ee6a3115f7cad0d8e2c0e4a07aa5cf3ef996e
|
def str_to_class(module):
'Obtiene el modelo de la clase ingresada en el path.'
return getattr(sys.modules[__name__], module)
|
Obtiene el modelo de la clase ingresada en el path.
|
andromeda/tasks/reports/report.py
|
str_to_class
|
sango09/andromeda_api_rest
| 1
|
python
|
def str_to_class(module):
return getattr(sys.modules[__name__], module)
|
def str_to_class(module):
return getattr(sys.modules[__name__], module)<|docstring|>Obtiene el modelo de la clase ingresada en el path.<|endoftext|>
|
5e281064880d402f109510dd53189da21660e72a56550f662f2fae92b14e1f29
|
def most_requested_implements(request):
'Grafico de los implementos mas solicitados por los usuarios.'
data = Loans.objects.values_list('implement', flat=True)
implements = collections.Counter(data)
names = []
for implement_id in implements.keys():
x = InventoryLoans.objects.get(pk=implement_id)
names.append(x.implement.name)
values = list(implements.values())
(fig, axs) = plt.subplots(figsize=(10, 4))
axs.yaxis.set_major_locator(MaxNLocator(integer=True))
axs.set_ylabel('Solicitudes')
axs.bar(names, values)
fig.suptitle('Implementos mas solicitados')
return get_image()
|
Grafico de los implementos mas solicitados por los usuarios.
|
andromeda/tasks/reports/report.py
|
most_requested_implements
|
sango09/andromeda_api_rest
| 1
|
python
|
def most_requested_implements(request):
data = Loans.objects.values_list('implement', flat=True)
implements = collections.Counter(data)
names = []
for implement_id in implements.keys():
x = InventoryLoans.objects.get(pk=implement_id)
names.append(x.implement.name)
values = list(implements.values())
(fig, axs) = plt.subplots(figsize=(10, 4))
axs.yaxis.set_major_locator(MaxNLocator(integer=True))
axs.set_ylabel('Solicitudes')
axs.bar(names, values)
fig.suptitle('Implementos mas solicitados')
return get_image()
|
def most_requested_implements(request):
data = Loans.objects.values_list('implement', flat=True)
implements = collections.Counter(data)
names = []
for implement_id in implements.keys():
x = InventoryLoans.objects.get(pk=implement_id)
names.append(x.implement.name)
values = list(implements.values())
(fig, axs) = plt.subplots(figsize=(10, 4))
axs.yaxis.set_major_locator(MaxNLocator(integer=True))
axs.set_ylabel('Solicitudes')
axs.bar(names, values)
fig.suptitle('Implementos mas solicitados')
return get_image()<|docstring|>Grafico de los implementos mas solicitados por los usuarios.<|endoftext|>
|
d341f046f2da17c554861d02ab04cb9325aefb6ea9ece6e0cf9a73f7a5df8afb
|
def graph_users(request):
'Grafico plot del modelo de usuarios.'
user = User.objects.all().values()
df = pd.DataFrame(user, columns=['date_joined'])
data = df['date_joined'].dt.month_name().value_counts()
data = data.sort_values(ascending=True)
data.plot.bar(xlabel='Mes', ylabel='Usuarios', figsize=(8, 8))
return get_image()
|
Grafico plot del modelo de usuarios.
|
andromeda/tasks/reports/report.py
|
graph_users
|
sango09/andromeda_api_rest
| 1
|
python
|
def graph_users(request):
user = User.objects.all().values()
df = pd.DataFrame(user, columns=['date_joined'])
data = df['date_joined'].dt.month_name().value_counts()
data = data.sort_values(ascending=True)
data.plot.bar(xlabel='Mes', ylabel='Usuarios', figsize=(8, 8))
return get_image()
|
def graph_users(request):
user = User.objects.all().values()
df = pd.DataFrame(user, columns=['date_joined'])
data = df['date_joined'].dt.month_name().value_counts()
data = data.sort_values(ascending=True)
data.plot.bar(xlabel='Mes', ylabel='Usuarios', figsize=(8, 8))
return get_image()<|docstring|>Grafico plot del modelo de usuarios.<|endoftext|>
|
ef589031123cae8f55477e65f91d94b497d5b6dffcb02e98e22b043ecac2baaa
|
def get_context_data(self, **kwargs):
'Contexto de datos.'
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
if (self.kwargs['module'] == 'loans'):
context['total_loans'] = Loans.objects.count()
context['implements_total'] = get_total_implements()
context['best'] = get_best_auxiliary(Loans)
elif (self.kwargs['module'] == 'inventory'):
context['implements_total'] = Inventory.objects.count()
context['tech_tabs_total'] = TechnicalDataSheet.objects.count()
context['disabled_implements'] = Inventory.objects.filter(status_implement='Inactivo').count()
elif (self.kwargs['module'] == 'maintenance'):
context['maintenance_total'] = Maintenance.objects.filter(is_active=False).count()
context['implements_maintenance_total'] = Inventory.objects.filter(status_implement='En mantenimiento').count()
context['best_auxiliary_maintenance'] = get_best_auxiliary(Maintenance)
elif (self.kwargs['module'] == 'support'):
context['supports_completed'] = Support.objects.filter(status_support='Completado').count()
context['supports_total'] = Support.objects.count()
context['best_auxiliary_support'] = get_best_auxiliary(Support)
else:
context['module'] = True
return context
|
Contexto de datos.
|
andromeda/tasks/reports/report.py
|
get_context_data
|
sango09/andromeda_api_rest
| 1
|
python
|
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
if (self.kwargs['module'] == 'loans'):
context['total_loans'] = Loans.objects.count()
context['implements_total'] = get_total_implements()
context['best'] = get_best_auxiliary(Loans)
elif (self.kwargs['module'] == 'inventory'):
context['implements_total'] = Inventory.objects.count()
context['tech_tabs_total'] = TechnicalDataSheet.objects.count()
context['disabled_implements'] = Inventory.objects.filter(status_implement='Inactivo').count()
elif (self.kwargs['module'] == 'maintenance'):
context['maintenance_total'] = Maintenance.objects.filter(is_active=False).count()
context['implements_maintenance_total'] = Inventory.objects.filter(status_implement='En mantenimiento').count()
context['best_auxiliary_maintenance'] = get_best_auxiliary(Maintenance)
elif (self.kwargs['module'] == 'support'):
context['supports_completed'] = Support.objects.filter(status_support='Completado').count()
context['supports_total'] = Support.objects.count()
context['best_auxiliary_support'] = get_best_auxiliary(Support)
else:
context['module'] = True
return context
|
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['now'] = timezone.now()
if (self.kwargs['module'] == 'loans'):
context['total_loans'] = Loans.objects.count()
context['implements_total'] = get_total_implements()
context['best'] = get_best_auxiliary(Loans)
elif (self.kwargs['module'] == 'inventory'):
context['implements_total'] = Inventory.objects.count()
context['tech_tabs_total'] = TechnicalDataSheet.objects.count()
context['disabled_implements'] = Inventory.objects.filter(status_implement='Inactivo').count()
elif (self.kwargs['module'] == 'maintenance'):
context['maintenance_total'] = Maintenance.objects.filter(is_active=False).count()
context['implements_maintenance_total'] = Inventory.objects.filter(status_implement='En mantenimiento').count()
context['best_auxiliary_maintenance'] = get_best_auxiliary(Maintenance)
elif (self.kwargs['module'] == 'support'):
context['supports_completed'] = Support.objects.filter(status_support='Completado').count()
context['supports_total'] = Support.objects.count()
context['best_auxiliary_support'] = get_best_auxiliary(Support)
else:
context['module'] = True
return context<|docstring|>Contexto de datos.<|endoftext|>
|
ae9eacdb582b143efc16490fb65acb05e6ed882c67a38938d454bd1d2d3a6a39
|
@profile
def dzip(list1, list2):
'\n Zips elementwise pairs between list1 and list2 into a dictionary. Values\n from list2 can be broadcast onto list1.\n\n Args:\n list1 (sequence): full sequence\n list2 (sequence): can either be a sequence of one item or a sequence of\n equal length to `list1`\n\n SeeAlso:\n util_list.broadcast_zip\n\n Returns:\n dict: similar to dict(zip(list1, list2))\n\n CommandLine:\n python -m utool.util_dict dzip\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}\n >>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}\n >>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])\n >>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])\n >>> ut.assert_raises(ValueError, dzip, [], [4])\n >>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])\n >>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])\n '
try:
len(list1)
except TypeError:
list1 = list(list1)
try:
len(list2)
except TypeError:
list2 = list(list2)
if ((len(list1) == 0) and (len(list2) == 1)):
list2 = []
if ((len(list2) == 1) and (len(list1) > 1)):
list2 = (list2 * len(list1))
if (len(list1) != len(list2)):
raise ValueError(('out of alignment len(list1)=%r, len(list2)=%r' % (len(list1), len(list2))))
return dict(zip(list1, list2))
|
Zips elementwise pairs between list1 and list2 into a dictionary. Values
from list2 can be broadcast onto list1.
Args:
list1 (sequence): full sequence
list2 (sequence): can either be a sequence of one item or a sequence of
equal length to `list1`
SeeAlso:
util_list.broadcast_zip
Returns:
dict: similar to dict(zip(list1, list2))
CommandLine:
python -m utool.util_dict dzip
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])
>>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [], [4])
>>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])
|
utool/util_dict.py
|
dzip
|
Erotemic/utool
| 8
|
python
|
@profile
def dzip(list1, list2):
'\n Zips elementwise pairs between list1 and list2 into a dictionary. Values\n from list2 can be broadcast onto list1.\n\n Args:\n list1 (sequence): full sequence\n list2 (sequence): can either be a sequence of one item or a sequence of\n equal length to `list1`\n\n SeeAlso:\n util_list.broadcast_zip\n\n Returns:\n dict: similar to dict(zip(list1, list2))\n\n CommandLine:\n python -m utool.util_dict dzip\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}\n >>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}\n >>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])\n >>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])\n >>> ut.assert_raises(ValueError, dzip, [], [4])\n >>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])\n >>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])\n '
try:
len(list1)
except TypeError:
list1 = list(list1)
try:
len(list2)
except TypeError:
list2 = list(list2)
if ((len(list1) == 0) and (len(list2) == 1)):
list2 = []
if ((len(list2) == 1) and (len(list1) > 1)):
list2 = (list2 * len(list1))
if (len(list1) != len(list2)):
raise ValueError(('out of alignment len(list1)=%r, len(list2)=%r' % (len(list1), len(list2))))
return dict(zip(list1, list2))
|
@profile
def dzip(list1, list2):
'\n Zips elementwise pairs between list1 and list2 into a dictionary. Values\n from list2 can be broadcast onto list1.\n\n Args:\n list1 (sequence): full sequence\n list2 (sequence): can either be a sequence of one item or a sequence of\n equal length to `list1`\n\n SeeAlso:\n util_list.broadcast_zip\n\n Returns:\n dict: similar to dict(zip(list1, list2))\n\n CommandLine:\n python -m utool.util_dict dzip\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}\n >>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}\n >>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])\n >>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])\n >>> ut.assert_raises(ValueError, dzip, [], [4])\n >>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])\n >>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])\n '
try:
len(list1)
except TypeError:
list1 = list(list1)
try:
len(list2)
except TypeError:
list2 = list(list2)
if ((len(list1) == 0) and (len(list2) == 1)):
list2 = []
if ((len(list2) == 1) and (len(list1) > 1)):
list2 = (list2 * len(list1))
if (len(list1) != len(list2)):
raise ValueError(('out of alignment len(list1)=%r, len(list2)=%r' % (len(list1), len(list2))))
return dict(zip(list1, list2))<|docstring|>Zips elementwise pairs between list1 and list2 into a dictionary. Values
from list2 can be broadcast onto list1.
Args:
list1 (sequence): full sequence
list2 (sequence): can either be a sequence of one item or a sequence of
equal length to `list1`
SeeAlso:
util_list.broadcast_zip
Returns:
dict: similar to dict(zip(list1, list2))
CommandLine:
python -m utool.util_dict dzip
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [])
>>> ut.assert_raises(ValueError, dzip, [], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [], [4])
>>> ut.assert_raises(ValueError, dzip, [1, 2], [4, 5, 6])
>>> ut.assert_raises(ValueError, dzip, [1, 2, 3], [4, 5])<|endoftext|>
|
f79bc06e5817634638c71c52f672def65fa27166f3a27e2cd5338b2ca670d778
|
def map_dict_vals(func, dict_):
" applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m utool.util_dict --test-map_dict_vals\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = len\n >>> newdict = map_dict_vals(func, dict_)\n >>> result = ut.repr2(newdict)\n >>> print(result)\n {'a': 3, 'b': 0}\n "
if (not hasattr(func, '__call__')):
func = func.__getitem__
keyval_list = [(key, func(val)) for (key, val) in six.iteritems(dict_)]
dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)
newdict = dictclass(keyval_list)
return newdict
|
applies a function to each of the keys in a dictionary
Args:
func (callable): a function
dict_ (dict): a dictionary
Returns:
newdict: transformed dictionary
CommandLine:
python -m utool.util_dict --test-map_dict_vals
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [1, 2, 3], 'b': []}
>>> func = len
>>> newdict = map_dict_vals(func, dict_)
>>> result = ut.repr2(newdict)
>>> print(result)
{'a': 3, 'b': 0}
|
utool/util_dict.py
|
map_dict_vals
|
Erotemic/utool
| 8
|
python
|
def map_dict_vals(func, dict_):
" applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m utool.util_dict --test-map_dict_vals\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = len\n >>> newdict = map_dict_vals(func, dict_)\n >>> result = ut.repr2(newdict)\n >>> print(result)\n {'a': 3, 'b': 0}\n "
if (not hasattr(func, '__call__')):
func = func.__getitem__
keyval_list = [(key, func(val)) for (key, val) in six.iteritems(dict_)]
dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)
newdict = dictclass(keyval_list)
return newdict
|
def map_dict_vals(func, dict_):
" applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m utool.util_dict --test-map_dict_vals\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = len\n >>> newdict = map_dict_vals(func, dict_)\n >>> result = ut.repr2(newdict)\n >>> print(result)\n {'a': 3, 'b': 0}\n "
if (not hasattr(func, '__call__')):
func = func.__getitem__
keyval_list = [(key, func(val)) for (key, val) in six.iteritems(dict_)]
dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)
newdict = dictclass(keyval_list)
return newdict<|docstring|>applies a function to each of the keys in a dictionary
Args:
func (callable): a function
dict_ (dict): a dictionary
Returns:
newdict: transformed dictionary
CommandLine:
python -m utool.util_dict --test-map_dict_vals
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [1, 2, 3], 'b': []}
>>> func = len
>>> newdict = map_dict_vals(func, dict_)
>>> result = ut.repr2(newdict)
>>> print(result)
{'a': 3, 'b': 0}<|endoftext|>
|
66168725d5753306c12270ff0f6af8dbf61e595e4f6d6faefd9eea1980cf7ca4
|
def map_dict_keys(func, dict_):
" applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m utool.util_dict --test-map_dict_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = ord\n >>> newdict = map_dict_keys(func, dict_)\n >>> result = ut.repr2(newdict)\n >>> ut.assert_raises(AssertionError, map_dict_keys, len, dict_)\n >>> print(result)\n {97: [1, 2, 3], 98: []}\n "
if (not hasattr(func, '__call__')):
func = func.__getitem__
keyval_list = [(func(key), val) for (key, val) in six.iteritems(dict_)]
dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)
newdict = dictclass(keyval_list)
assert (len(newdict) == len(dict_)), 'multiple input keys were mapped to the same output key'
return newdict
|
applies a function to each of the keys in a dictionary
Args:
func (callable): a function
dict_ (dict): a dictionary
Returns:
newdict: transformed dictionary
CommandLine:
python -m utool.util_dict --test-map_dict_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [1, 2, 3], 'b': []}
>>> func = ord
>>> newdict = map_dict_keys(func, dict_)
>>> result = ut.repr2(newdict)
>>> ut.assert_raises(AssertionError, map_dict_keys, len, dict_)
>>> print(result)
{97: [1, 2, 3], 98: []}
|
utool/util_dict.py
|
map_dict_keys
|
Erotemic/utool
| 8
|
python
|
def map_dict_keys(func, dict_):
" applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m utool.util_dict --test-map_dict_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = ord\n >>> newdict = map_dict_keys(func, dict_)\n >>> result = ut.repr2(newdict)\n >>> ut.assert_raises(AssertionError, map_dict_keys, len, dict_)\n >>> print(result)\n {97: [1, 2, 3], 98: []}\n "
if (not hasattr(func, '__call__')):
func = func.__getitem__
keyval_list = [(func(key), val) for (key, val) in six.iteritems(dict_)]
dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)
newdict = dictclass(keyval_list)
assert (len(newdict) == len(dict_)), 'multiple input keys were mapped to the same output key'
return newdict
|
def map_dict_keys(func, dict_):
" applies a function to each of the keys in a dictionary\n\n Args:\n func (callable): a function\n dict_ (dict): a dictionary\n\n Returns:\n newdict: transformed dictionary\n\n CommandLine:\n python -m utool.util_dict --test-map_dict_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [1, 2, 3], 'b': []}\n >>> func = ord\n >>> newdict = map_dict_keys(func, dict_)\n >>> result = ut.repr2(newdict)\n >>> ut.assert_raises(AssertionError, map_dict_keys, len, dict_)\n >>> print(result)\n {97: [1, 2, 3], 98: []}\n "
if (not hasattr(func, '__call__')):
func = func.__getitem__
keyval_list = [(func(key), val) for (key, val) in six.iteritems(dict_)]
dictclass = (OrderedDict if isinstance(dict_, OrderedDict) else dict)
newdict = dictclass(keyval_list)
assert (len(newdict) == len(dict_)), 'multiple input keys were mapped to the same output key'
return newdict<|docstring|>applies a function to each of the keys in a dictionary
Args:
func (callable): a function
dict_ (dict): a dictionary
Returns:
newdict: transformed dictionary
CommandLine:
python -m utool.util_dict --test-map_dict_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [1, 2, 3], 'b': []}
>>> func = ord
>>> newdict = map_dict_keys(func, dict_)
>>> result = ut.repr2(newdict)
>>> ut.assert_raises(AssertionError, map_dict_keys, len, dict_)
>>> print(result)
{97: [1, 2, 3], 98: []}<|endoftext|>
|
5097b19812bedb481e072fb7c85a6b2745be0a6484042fa750607dbfbcf7c601
|
def get_dict_hashid(dict_):
"\n Args:\n dict_ (dict):\n\n Returns:\n int: id hash\n\n References:\n http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary\n\n CommandLine:\n python -m utool.util_dict --test-get_dict_hashid\n python3 -m utool.util_dict --test-get_dict_hashid\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {}\n >>> dict_ = {'a': 'b'}\n >>> dict_ = {'a': {'c': 'd'}}\n >>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}\n >>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism\n >>> hashid = get_dict_hashid(dict_)\n >>> result = str(hashid)\n >>> print(result)\n mxgkepoboqjerkhb\n\n oegknoalkrkojumi\n "
import utool as ut
raw_text = ut.repr4(dict_, sorted_=True, strvals=True, nl=2)
hashid = ut.hashstr27(raw_text)
return hashid
|
Args:
dict_ (dict):
Returns:
int: id hash
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
CommandLine:
python -m utool.util_dict --test-get_dict_hashid
python3 -m utool.util_dict --test-get_dict_hashid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {}
>>> dict_ = {'a': 'b'}
>>> dict_ = {'a': {'c': 'd'}}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism
>>> hashid = get_dict_hashid(dict_)
>>> result = str(hashid)
>>> print(result)
mxgkepoboqjerkhb
oegknoalkrkojumi
|
utool/util_dict.py
|
get_dict_hashid
|
Erotemic/utool
| 8
|
python
|
def get_dict_hashid(dict_):
"\n Args:\n dict_ (dict):\n\n Returns:\n int: id hash\n\n References:\n http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary\n\n CommandLine:\n python -m utool.util_dict --test-get_dict_hashid\n python3 -m utool.util_dict --test-get_dict_hashid\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {}\n >>> dict_ = {'a': 'b'}\n >>> dict_ = {'a': {'c': 'd'}}\n >>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}\n >>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism\n >>> hashid = get_dict_hashid(dict_)\n >>> result = str(hashid)\n >>> print(result)\n mxgkepoboqjerkhb\n\n oegknoalkrkojumi\n "
import utool as ut
raw_text = ut.repr4(dict_, sorted_=True, strvals=True, nl=2)
hashid = ut.hashstr27(raw_text)
return hashid
|
def get_dict_hashid(dict_):
"\n Args:\n dict_ (dict):\n\n Returns:\n int: id hash\n\n References:\n http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary\n\n CommandLine:\n python -m utool.util_dict --test-get_dict_hashid\n python3 -m utool.util_dict --test-get_dict_hashid\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {}\n >>> dict_ = {'a': 'b'}\n >>> dict_ = {'a': {'c': 'd'}}\n >>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}\n >>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism\n >>> hashid = get_dict_hashid(dict_)\n >>> result = str(hashid)\n >>> print(result)\n mxgkepoboqjerkhb\n\n oegknoalkrkojumi\n "
import utool as ut
raw_text = ut.repr4(dict_, sorted_=True, strvals=True, nl=2)
hashid = ut.hashstr27(raw_text)
return hashid<|docstring|>Args:
dict_ (dict):
Returns:
int: id hash
References:
http://stackoverflow.com/questions/5884066/hashing-a-python-dictionary
CommandLine:
python -m utool.util_dict --test-get_dict_hashid
python3 -m utool.util_dict --test-get_dict_hashid
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {}
>>> dict_ = {'a': 'b'}
>>> dict_ = {'a': {'c': 'd'}}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143, dict: set}
>>> #dict_ = {'a': {'c': 'd'}, 1: 143 } non-determenism
>>> hashid = get_dict_hashid(dict_)
>>> result = str(hashid)
>>> print(result)
mxgkepoboqjerkhb
oegknoalkrkojumi<|endoftext|>
|
c6415d610c15b17a17a961b884c44db02769641c9f8072e01f240aa33370f68b
|
def dict_stack(dict_list, key_prefix=''):
"\n stacks values from two dicts into a new dict where the values are list of\n the input values. the keys are the same.\n\n DEPRICATE in favor of dict_stack2\n\n Args:\n dict_list (list): list of dicts with similar keys\n\n Returns:\n dict dict_stacked\n\n CommandLine:\n python -m utool.util_dict --test-dict_stack\n python -m utool.util_dict --test-dict_stack:1\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_stacked = dict_stack([dict1_, dict2_])\n >>> result = ut.repr2(dict_stacked, sorted_=True)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [4]}\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> # Get equivalent behavior with dict_stack2?\n >>> # Almost, as long as None is not part of the list\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_stacked_ = dict_stack2([dict1_, dict2_])\n >>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()}\n >>> result = ut.repr2(dict_stacked, sorted_=True)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [4]}\n "
dict_stacked_ = defaultdict(list)
for dict_ in dict_list:
for (key, val) in six.iteritems(dict_):
dict_stacked_[(key_prefix + key)].append(val)
dict_stacked = dict(dict_stacked_)
return dict_stacked
|
stacks values from two dicts into a new dict where the values are list of
the input values. the keys are the same.
DEPRICATE in favor of dict_stack2
Args:
dict_list (list): list of dicts with similar keys
Returns:
dict dict_stacked
CommandLine:
python -m utool.util_dict --test-dict_stack
python -m utool.util_dict --test-dict_stack:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict2_ = {'a': 2, 'b': 3, 'c': 4}
>>> dict_stacked = dict_stack([dict1_, dict2_])
>>> result = ut.repr2(dict_stacked, sorted_=True)
>>> print(result)
{'a': [1, 2], 'b': [2, 3], 'c': [4]}
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> # Get equivalent behavior with dict_stack2?
>>> # Almost, as long as None is not part of the list
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict2_ = {'a': 2, 'b': 3, 'c': 4}
>>> dict_stacked_ = dict_stack2([dict1_, dict2_])
>>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()}
>>> result = ut.repr2(dict_stacked, sorted_=True)
>>> print(result)
{'a': [1, 2], 'b': [2, 3], 'c': [4]}
|
utool/util_dict.py
|
dict_stack
|
Erotemic/utool
| 8
|
python
|
def dict_stack(dict_list, key_prefix=):
"\n stacks values from two dicts into a new dict where the values are list of\n the input values. the keys are the same.\n\n DEPRICATE in favor of dict_stack2\n\n Args:\n dict_list (list): list of dicts with similar keys\n\n Returns:\n dict dict_stacked\n\n CommandLine:\n python -m utool.util_dict --test-dict_stack\n python -m utool.util_dict --test-dict_stack:1\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_stacked = dict_stack([dict1_, dict2_])\n >>> result = ut.repr2(dict_stacked, sorted_=True)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [4]}\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> # Get equivalent behavior with dict_stack2?\n >>> # Almost, as long as None is not part of the list\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_stacked_ = dict_stack2([dict1_, dict2_])\n >>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()}\n >>> result = ut.repr2(dict_stacked, sorted_=True)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [4]}\n "
dict_stacked_ = defaultdict(list)
for dict_ in dict_list:
for (key, val) in six.iteritems(dict_):
dict_stacked_[(key_prefix + key)].append(val)
dict_stacked = dict(dict_stacked_)
return dict_stacked
|
def dict_stack(dict_list, key_prefix=):
"\n stacks values from two dicts into a new dict where the values are list of\n the input values. the keys are the same.\n\n DEPRICATE in favor of dict_stack2\n\n Args:\n dict_list (list): list of dicts with similar keys\n\n Returns:\n dict dict_stacked\n\n CommandLine:\n python -m utool.util_dict --test-dict_stack\n python -m utool.util_dict --test-dict_stack:1\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_stacked = dict_stack([dict1_, dict2_])\n >>> result = ut.repr2(dict_stacked, sorted_=True)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [4]}\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> # Get equivalent behavior with dict_stack2?\n >>> # Almost, as long as None is not part of the list\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_stacked_ = dict_stack2([dict1_, dict2_])\n >>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()}\n >>> result = ut.repr2(dict_stacked, sorted_=True)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [4]}\n "
dict_stacked_ = defaultdict(list)
for dict_ in dict_list:
for (key, val) in six.iteritems(dict_):
dict_stacked_[(key_prefix + key)].append(val)
dict_stacked = dict(dict_stacked_)
return dict_stacked<|docstring|>stacks values from two dicts into a new dict where the values are list of
the input values. the keys are the same.
DEPRICATE in favor of dict_stack2
Args:
dict_list (list): list of dicts with similar keys
Returns:
dict dict_stacked
CommandLine:
python -m utool.util_dict --test-dict_stack
python -m utool.util_dict --test-dict_stack:1
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict2_ = {'a': 2, 'b': 3, 'c': 4}
>>> dict_stacked = dict_stack([dict1_, dict2_])
>>> result = ut.repr2(dict_stacked, sorted_=True)
>>> print(result)
{'a': [1, 2], 'b': [2, 3], 'c': [4]}
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> # Get equivalent behavior with dict_stack2?
>>> # Almost, as long as None is not part of the list
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict2_ = {'a': 2, 'b': 3, 'c': 4}
>>> dict_stacked_ = dict_stack2([dict1_, dict2_])
>>> dict_stacked = {key: ut.filter_Nones(val) for key, val in dict_stacked_.items()}
>>> result = ut.repr2(dict_stacked, sorted_=True)
>>> print(result)
{'a': [1, 2], 'b': [2, 3], 'c': [4]}<|endoftext|>
|
ccdbd0c43b31cd2aa6a890a3e988640f072187e2910479386475af8961c869a1
|
def dict_stack2(dict_list, key_suffix=None, default=None):
"\n Stacks vals from a list of dicts into a dict of lists. Inserts Nones in\n place of empty items to preserve order.\n\n Args:\n dict_list (list): list of dicts\n key_suffix (str): (default = None)\n\n Returns:\n dict: stacked_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> # Usual case: multiple dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [None, 4]}\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: one dict as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict_list = [dict1_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1], 'b': [2]}\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: zero dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = []\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {}\n\n Example3:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: empty dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = [{}]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {}\n\n Example4:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: one dict is empty\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': [1, 2], 'b': [2, 3]}\n >>> dict2_ = {}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [[1, 2], None], 'b': [[2, 3], None]}\n\n Example5:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: disjoint dicts\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': [1, 2], 'b': [2, 3]}\n >>> dict2_ = {'c': 4}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [[1, 2], None], 'b': [[2, 3], None], 'c': [None, 4]}\n\n Example6:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: 3 dicts\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = [{'a': 1}, {'b': 1}, {'c': 1}, {'b': 2}]\n >>> default = None\n >>> dict_stacked = dict_stack2(dict_list, default=default)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1, None, None, None], 'b': [None, 1, None, 2], 'c': [None, None, 1, None]}\n "
if (len(dict_list) > 0):
dict_list_ = [map_dict_vals((lambda x: [x]), kw) for kw in dict_list]
default1 = []
default2 = [default]
accum_ = dict_list_[0]
for dict_ in dict_list_[1:]:
default1.append(default)
accum_ = dict_union_combine(accum_, dict_, default=default1, default2=default2)
stacked_dict = accum_
else:
stacked_dict = {}
if (key_suffix is not None):
stacked_dict = map_dict_keys((lambda x: (x + key_suffix)), stacked_dict)
return stacked_dict
|
Stacks vals from a list of dicts into a dict of lists. Inserts Nones in
place of empty items to preserve order.
Args:
dict_list (list): list of dicts
key_suffix (str): (default = None)
Returns:
dict: stacked_dict
Example:
>>> # ENABLE_DOCTEST
>>> # Usual case: multiple dicts as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict2_ = {'a': 2, 'b': 3, 'c': 4}
>>> dict_list = [dict1_, dict2_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [1, 2], 'b': [2, 3], 'c': [None, 4]}
Example1:
>>> # ENABLE_DOCTEST
>>> # Corner case: one dict as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict_list = [dict1_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [1], 'b': [2]}
Example2:
>>> # ENABLE_DOCTEST
>>> # Corner case: zero dicts as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_list = []
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{}
Example3:
>>> # ENABLE_DOCTEST
>>> # Corner case: empty dicts as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_list = [{}]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{}
Example4:
>>> # ENABLE_DOCTEST
>>> # Corner case: one dict is empty
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': [1, 2], 'b': [2, 3]}
>>> dict2_ = {}
>>> dict_list = [dict1_, dict2_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [[1, 2], None], 'b': [[2, 3], None]}
Example5:
>>> # ENABLE_DOCTEST
>>> # Corner case: disjoint dicts
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': [1, 2], 'b': [2, 3]}
>>> dict2_ = {'c': 4}
>>> dict_list = [dict1_, dict2_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [[1, 2], None], 'b': [[2, 3], None], 'c': [None, 4]}
Example6:
>>> # ENABLE_DOCTEST
>>> # Corner case: 3 dicts
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_list = [{'a': 1}, {'b': 1}, {'c': 1}, {'b': 2}]
>>> default = None
>>> dict_stacked = dict_stack2(dict_list, default=default)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [1, None, None, None], 'b': [None, 1, None, 2], 'c': [None, None, 1, None]}
|
utool/util_dict.py
|
dict_stack2
|
Erotemic/utool
| 8
|
python
|
def dict_stack2(dict_list, key_suffix=None, default=None):
"\n Stacks vals from a list of dicts into a dict of lists. Inserts Nones in\n place of empty items to preserve order.\n\n Args:\n dict_list (list): list of dicts\n key_suffix (str): (default = None)\n\n Returns:\n dict: stacked_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> # Usual case: multiple dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [None, 4]}\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: one dict as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict_list = [dict1_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1], 'b': [2]}\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: zero dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = []\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {}\n\n Example3:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: empty dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = [{}]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {}\n\n Example4:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: one dict is empty\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': [1, 2], 'b': [2, 3]}\n >>> dict2_ = {}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [[1, 2], None], 'b': [[2, 3], None]}\n\n Example5:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: disjoint dicts\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': [1, 2], 'b': [2, 3]}\n >>> dict2_ = {'c': 4}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [[1, 2], None], 'b': [[2, 3], None], 'c': [None, 4]}\n\n Example6:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: 3 dicts\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = [{'a': 1}, {'b': 1}, {'c': 1}, {'b': 2}]\n >>> default = None\n >>> dict_stacked = dict_stack2(dict_list, default=default)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1, None, None, None], 'b': [None, 1, None, 2], 'c': [None, None, 1, None]}\n "
if (len(dict_list) > 0):
dict_list_ = [map_dict_vals((lambda x: [x]), kw) for kw in dict_list]
default1 = []
default2 = [default]
accum_ = dict_list_[0]
for dict_ in dict_list_[1:]:
default1.append(default)
accum_ = dict_union_combine(accum_, dict_, default=default1, default2=default2)
stacked_dict = accum_
else:
stacked_dict = {}
if (key_suffix is not None):
stacked_dict = map_dict_keys((lambda x: (x + key_suffix)), stacked_dict)
return stacked_dict
|
def dict_stack2(dict_list, key_suffix=None, default=None):
"\n Stacks vals from a list of dicts into a dict of lists. Inserts Nones in\n place of empty items to preserve order.\n\n Args:\n dict_list (list): list of dicts\n key_suffix (str): (default = None)\n\n Returns:\n dict: stacked_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> # Usual case: multiple dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict2_ = {'a': 2, 'b': 3, 'c': 4}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1, 2], 'b': [2, 3], 'c': [None, 4]}\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: one dict as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': 1, 'b': 2}\n >>> dict_list = [dict1_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1], 'b': [2]}\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: zero dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = []\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {}\n\n Example3:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: empty dicts as input\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = [{}]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {}\n\n Example4:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: one dict is empty\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': [1, 2], 'b': [2, 3]}\n >>> dict2_ = {}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [[1, 2], None], 'b': [[2, 3], None]}\n\n Example5:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: disjoint dicts\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1_ = {'a': [1, 2], 'b': [2, 3]}\n >>> dict2_ = {'c': 4}\n >>> dict_list = [dict1_, dict2_]\n >>> dict_stacked = dict_stack2(dict_list)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [[1, 2], None], 'b': [[2, 3], None], 'c': [None, 4]}\n\n Example6:\n >>> # ENABLE_DOCTEST\n >>> # Corner case: 3 dicts\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_list = [{'a': 1}, {'b': 1}, {'c': 1}, {'b': 2}]\n >>> default = None\n >>> dict_stacked = dict_stack2(dict_list, default=default)\n >>> result = ut.repr2(dict_stacked)\n >>> print(result)\n {'a': [1, None, None, None], 'b': [None, 1, None, 2], 'c': [None, None, 1, None]}\n "
if (len(dict_list) > 0):
dict_list_ = [map_dict_vals((lambda x: [x]), kw) for kw in dict_list]
default1 = []
default2 = [default]
accum_ = dict_list_[0]
for dict_ in dict_list_[1:]:
default1.append(default)
accum_ = dict_union_combine(accum_, dict_, default=default1, default2=default2)
stacked_dict = accum_
else:
stacked_dict = {}
if (key_suffix is not None):
stacked_dict = map_dict_keys((lambda x: (x + key_suffix)), stacked_dict)
return stacked_dict<|docstring|>Stacks vals from a list of dicts into a dict of lists. Inserts Nones in
place of empty items to preserve order.
Args:
dict_list (list): list of dicts
key_suffix (str): (default = None)
Returns:
dict: stacked_dict
Example:
>>> # ENABLE_DOCTEST
>>> # Usual case: multiple dicts as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict2_ = {'a': 2, 'b': 3, 'c': 4}
>>> dict_list = [dict1_, dict2_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [1, 2], 'b': [2, 3], 'c': [None, 4]}
Example1:
>>> # ENABLE_DOCTEST
>>> # Corner case: one dict as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': 1, 'b': 2}
>>> dict_list = [dict1_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [1], 'b': [2]}
Example2:
>>> # ENABLE_DOCTEST
>>> # Corner case: zero dicts as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_list = []
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{}
Example3:
>>> # ENABLE_DOCTEST
>>> # Corner case: empty dicts as input
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_list = [{}]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{}
Example4:
>>> # ENABLE_DOCTEST
>>> # Corner case: one dict is empty
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': [1, 2], 'b': [2, 3]}
>>> dict2_ = {}
>>> dict_list = [dict1_, dict2_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [[1, 2], None], 'b': [[2, 3], None]}
Example5:
>>> # ENABLE_DOCTEST
>>> # Corner case: disjoint dicts
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1_ = {'a': [1, 2], 'b': [2, 3]}
>>> dict2_ = {'c': 4}
>>> dict_list = [dict1_, dict2_]
>>> dict_stacked = dict_stack2(dict_list)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [[1, 2], None], 'b': [[2, 3], None], 'c': [None, 4]}
Example6:
>>> # ENABLE_DOCTEST
>>> # Corner case: 3 dicts
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_list = [{'a': 1}, {'b': 1}, {'c': 1}, {'b': 2}]
>>> default = None
>>> dict_stacked = dict_stack2(dict_list, default=default)
>>> result = ut.repr2(dict_stacked)
>>> print(result)
{'a': [1, None, None, None], 'b': [None, 1, None, 2], 'c': [None, None, 1, None]}<|endoftext|>
|
4eb6169e4620f6ace46aeeb44e6ae7c5f6deaf0325adb895ac8419d4c61f2206
|
def invert_dict(dict_, unique_vals=True):
"\n Reverses the keys and values in a dictionary. Set unique_vals to False if\n the values in the dict are not unique.\n\n Args:\n dict_ (dict_): dictionary\n unique_vals (bool): if False, inverted keys are returned in a list.\n\n Returns:\n dict: inverted_dict\n\n CommandLine:\n python -m utool.util_dict --test-invert_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 1, 'b': 2}\n >>> inverted_dict = invert_dict(dict_)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {1: 'a', 2: 'b'}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = OrderedDict([(2, 'good',), (1, 'ok',), (0, 'junk',), (None, 'UNKNOWN',)])\n >>> inverted_dict = invert_dict(dict_)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {'good': 2, 'ok': 1, 'junk': 0, 'UNKNOWN': None}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 2}\n >>> inverted_dict = invert_dict(dict_, unique_vals=False)\n >>> inverted_dict = ut.map_dict_vals(sorted, inverted_dict)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {0: ['b', 'c', 'd', 'e'], 1: ['a'], 2: ['f']}\n "
if unique_vals:
inverted_items = [(val, key) for (key, val) in six.iteritems(dict_)]
inverted_dict = type(dict_)(inverted_items)
else:
inverted_dict = group_items(dict_.keys(), dict_.values())
return inverted_dict
|
Reverses the keys and values in a dictionary. Set unique_vals to False if
the values in the dict are not unique.
Args:
dict_ (dict_): dictionary
unique_vals (bool): if False, inverted keys are returned in a list.
Returns:
dict: inverted_dict
CommandLine:
python -m utool.util_dict --test-invert_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': 1, 'b': 2}
>>> inverted_dict = invert_dict(dict_)
>>> result = ut.repr4(inverted_dict, nl=False)
>>> print(result)
{1: 'a', 2: 'b'}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = OrderedDict([(2, 'good',), (1, 'ok',), (0, 'junk',), (None, 'UNKNOWN',)])
>>> inverted_dict = invert_dict(dict_)
>>> result = ut.repr4(inverted_dict, nl=False)
>>> print(result)
{'good': 2, 'ok': 1, 'junk': 0, 'UNKNOWN': None}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 2}
>>> inverted_dict = invert_dict(dict_, unique_vals=False)
>>> inverted_dict = ut.map_dict_vals(sorted, inverted_dict)
>>> result = ut.repr4(inverted_dict, nl=False)
>>> print(result)
{0: ['b', 'c', 'd', 'e'], 1: ['a'], 2: ['f']}
|
utool/util_dict.py
|
invert_dict
|
Erotemic/utool
| 8
|
python
|
def invert_dict(dict_, unique_vals=True):
"\n Reverses the keys and values in a dictionary. Set unique_vals to False if\n the values in the dict are not unique.\n\n Args:\n dict_ (dict_): dictionary\n unique_vals (bool): if False, inverted keys are returned in a list.\n\n Returns:\n dict: inverted_dict\n\n CommandLine:\n python -m utool.util_dict --test-invert_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 1, 'b': 2}\n >>> inverted_dict = invert_dict(dict_)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {1: 'a', 2: 'b'}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = OrderedDict([(2, 'good',), (1, 'ok',), (0, 'junk',), (None, 'UNKNOWN',)])\n >>> inverted_dict = invert_dict(dict_)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {'good': 2, 'ok': 1, 'junk': 0, 'UNKNOWN': None}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 2}\n >>> inverted_dict = invert_dict(dict_, unique_vals=False)\n >>> inverted_dict = ut.map_dict_vals(sorted, inverted_dict)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {0: ['b', 'c', 'd', 'e'], 1: ['a'], 2: ['f']}\n "
if unique_vals:
inverted_items = [(val, key) for (key, val) in six.iteritems(dict_)]
inverted_dict = type(dict_)(inverted_items)
else:
inverted_dict = group_items(dict_.keys(), dict_.values())
return inverted_dict
|
def invert_dict(dict_, unique_vals=True):
"\n Reverses the keys and values in a dictionary. Set unique_vals to False if\n the values in the dict are not unique.\n\n Args:\n dict_ (dict_): dictionary\n unique_vals (bool): if False, inverted keys are returned in a list.\n\n Returns:\n dict: inverted_dict\n\n CommandLine:\n python -m utool.util_dict --test-invert_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 1, 'b': 2}\n >>> inverted_dict = invert_dict(dict_)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {1: 'a', 2: 'b'}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = OrderedDict([(2, 'good',), (1, 'ok',), (0, 'junk',), (None, 'UNKNOWN',)])\n >>> inverted_dict = invert_dict(dict_)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {'good': 2, 'ok': 1, 'junk': 0, 'UNKNOWN': None}\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 2}\n >>> inverted_dict = invert_dict(dict_, unique_vals=False)\n >>> inverted_dict = ut.map_dict_vals(sorted, inverted_dict)\n >>> result = ut.repr4(inverted_dict, nl=False)\n >>> print(result)\n {0: ['b', 'c', 'd', 'e'], 1: ['a'], 2: ['f']}\n "
if unique_vals:
inverted_items = [(val, key) for (key, val) in six.iteritems(dict_)]
inverted_dict = type(dict_)(inverted_items)
else:
inverted_dict = group_items(dict_.keys(), dict_.values())
return inverted_dict<|docstring|>Reverses the keys and values in a dictionary. Set unique_vals to False if
the values in the dict are not unique.
Args:
dict_ (dict_): dictionary
unique_vals (bool): if False, inverted keys are returned in a list.
Returns:
dict: inverted_dict
CommandLine:
python -m utool.util_dict --test-invert_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': 1, 'b': 2}
>>> inverted_dict = invert_dict(dict_)
>>> result = ut.repr4(inverted_dict, nl=False)
>>> print(result)
{1: 'a', 2: 'b'}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = OrderedDict([(2, 'good',), (1, 'ok',), (0, 'junk',), (None, 'UNKNOWN',)])
>>> inverted_dict = invert_dict(dict_)
>>> result = ut.repr4(inverted_dict, nl=False)
>>> print(result)
{'good': 2, 'ok': 1, 'junk': 0, 'UNKNOWN': None}
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': 1, 'b': 0, 'c': 0, 'd': 0, 'e': 0, 'f': 2}
>>> inverted_dict = invert_dict(dict_, unique_vals=False)
>>> inverted_dict = ut.map_dict_vals(sorted, inverted_dict)
>>> result = ut.repr4(inverted_dict, nl=False)
>>> print(result)
{0: ['b', 'c', 'd', 'e'], 1: ['a'], 2: ['f']}<|endoftext|>
|
01246d6556853524ffa3df537c8dff941e0b4e7bce167702483250a047571db4
|
def iter_all_dict_combinations_ordered(varied_dict):
'\n Same as all_dict_combinations but preserves order\n '
tups_list = [[(key, val) for val in val_list] for (key, val_list) in six.iteritems(varied_dict)]
dict_iter = (OrderedDict(tups) for tups in it.product(*tups_list))
return dict_iter
|
Same as all_dict_combinations but preserves order
|
utool/util_dict.py
|
iter_all_dict_combinations_ordered
|
Erotemic/utool
| 8
|
python
|
def iter_all_dict_combinations_ordered(varied_dict):
'\n \n '
tups_list = [[(key, val) for val in val_list] for (key, val_list) in six.iteritems(varied_dict)]
dict_iter = (OrderedDict(tups) for tups in it.product(*tups_list))
return dict_iter
|
def iter_all_dict_combinations_ordered(varied_dict):
'\n \n '
tups_list = [[(key, val) for val in val_list] for (key, val_list) in six.iteritems(varied_dict)]
dict_iter = (OrderedDict(tups) for tups in it.product(*tups_list))
return dict_iter<|docstring|>Same as all_dict_combinations but preserves order<|endoftext|>
|
37b71bc6180269af3772a5dc0db50c182457505e80d3d08bbb68a493f2a1006f
|
def all_dict_combinations_ordered(varied_dict):
'\n Same as all_dict_combinations but preserves order\n '
dict_list = list(iter_all_dict_combinations_ordered)
return dict_list
|
Same as all_dict_combinations but preserves order
|
utool/util_dict.py
|
all_dict_combinations_ordered
|
Erotemic/utool
| 8
|
python
|
def all_dict_combinations_ordered(varied_dict):
'\n \n '
dict_list = list(iter_all_dict_combinations_ordered)
return dict_list
|
def all_dict_combinations_ordered(varied_dict):
'\n \n '
dict_list = list(iter_all_dict_combinations_ordered)
return dict_list<|docstring|>Same as all_dict_combinations but preserves order<|endoftext|>
|
67697e278d08e32d5af6ba8ac91076c5383f0ee5eee0ed4d2b5da5265639d270
|
def all_dict_combinations(varied_dict):
"\n all_dict_combinations\n\n Args:\n varied_dict (dict): a dict with lists of possible parameter settings\n\n Returns:\n list: dict_list a list of dicts correpsonding to all combinations of params settings\n\n CommandLine:\n python -m utool.util_dict --test-all_dict_combinations\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}\n >>> dict_list = all_dict_combinations(varied_dict)\n >>> result = str(ut.repr4(dict_list))\n >>> print(result)\n [\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},\n ]\n "
tups_list = [([(key, val) for val in val_list] if isinstance(val_list, list) else [(key, val_list)]) for (key, val_list) in iteritems_sorted(varied_dict)]
dict_list = [dict(tups) for tups in it.product(*tups_list)]
return dict_list
|
all_dict_combinations
Args:
varied_dict (dict): a dict with lists of possible parameter settings
Returns:
list: dict_list a list of dicts correpsonding to all combinations of params settings
CommandLine:
python -m utool.util_dict --test-all_dict_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}
>>> dict_list = all_dict_combinations(varied_dict)
>>> result = str(ut.repr4(dict_list))
>>> print(result)
[
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},
]
|
utool/util_dict.py
|
all_dict_combinations
|
Erotemic/utool
| 8
|
python
|
def all_dict_combinations(varied_dict):
"\n all_dict_combinations\n\n Args:\n varied_dict (dict): a dict with lists of possible parameter settings\n\n Returns:\n list: dict_list a list of dicts correpsonding to all combinations of params settings\n\n CommandLine:\n python -m utool.util_dict --test-all_dict_combinations\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}\n >>> dict_list = all_dict_combinations(varied_dict)\n >>> result = str(ut.repr4(dict_list))\n >>> print(result)\n [\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},\n ]\n "
tups_list = [([(key, val) for val in val_list] if isinstance(val_list, list) else [(key, val_list)]) for (key, val_list) in iteritems_sorted(varied_dict)]
dict_list = [dict(tups) for tups in it.product(*tups_list)]
return dict_list
|
def all_dict_combinations(varied_dict):
"\n all_dict_combinations\n\n Args:\n varied_dict (dict): a dict with lists of possible parameter settings\n\n Returns:\n list: dict_list a list of dicts correpsonding to all combinations of params settings\n\n CommandLine:\n python -m utool.util_dict --test-all_dict_combinations\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}\n >>> dict_list = all_dict_combinations(varied_dict)\n >>> result = str(ut.repr4(dict_list))\n >>> print(result)\n [\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},\n {'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},\n {'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},\n ]\n "
tups_list = [([(key, val) for val in val_list] if isinstance(val_list, list) else [(key, val_list)]) for (key, val_list) in iteritems_sorted(varied_dict)]
dict_list = [dict(tups) for tups in it.product(*tups_list)]
return dict_list<|docstring|>all_dict_combinations
Args:
varied_dict (dict): a dict with lists of possible parameter settings
Returns:
list: dict_list a list of dicts correpsonding to all combinations of params settings
CommandLine:
python -m utool.util_dict --test-all_dict_combinations
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}
>>> dict_list = all_dict_combinations(varied_dict)
>>> result = str(ut.repr4(dict_list))
>>> print(result)
[
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 0.0, 'pipeline_root': 'vsmany', 'sv_on': None},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': True},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': False},
{'logdist_weight': 1.0, 'pipeline_root': 'vsmany', 'sv_on': None},
]<|endoftext|>
|
3f964718f4577a2a1aef973c537a48f7089a7478ebbc428115b9ea58299b6a6b
|
def all_dict_combinations_lbls(varied_dict, remove_singles=True, allow_lone_singles=False):
"\n returns a label for each variation in a varydict.\n\n It tries to not be oververbose and returns only what parameters are varied\n in each label.\n\n CommandLine:\n python -m utool.util_dict --test-all_dict_combinations_lbls\n python -m utool.util_dict --exec-all_dict_combinations_lbls:1\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> import utool\n >>> from utool.util_dict import * # NOQA\n >>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}\n >>> comb_lbls = utool.all_dict_combinations_lbls(varied_dict)\n >>> result = (utool.repr4(comb_lbls))\n >>> print(result)\n [\n 'logdist_weight=0.0,sv_on=True',\n 'logdist_weight=0.0,sv_on=False',\n 'logdist_weight=0.0,sv_on=None',\n 'logdist_weight=1.0,sv_on=True',\n 'logdist_weight=1.0,sv_on=False',\n 'logdist_weight=1.0,sv_on=None',\n ]\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> import utool as ut\n >>> from utool.util_dict import * # NOQA\n >>> varied_dict = {'logdist_weight': [0.0], 'pipeline_root': ['vsmany'], 'sv_on': [True]}\n >>> allow_lone_singles = True\n >>> comb_lbls = ut.all_dict_combinations_lbls(varied_dict, allow_lone_singles=allow_lone_singles)\n >>> result = (ut.repr4(comb_lbls))\n >>> print(result)\n [\n 'logdist_weight=0.0,pipeline_root=vsmany,sv_on=True',\n ]\n "
is_lone_single = all([(isinstance(val_list, (list, tuple)) and (len(val_list) == 1)) for (key, val_list) in iteritems_sorted(varied_dict)])
if ((not remove_singles) or (allow_lone_singles and is_lone_single)):
multitups_list = [[(key, val) for val in val_list] for (key, val_list) in iteritems_sorted(varied_dict)]
else:
multitups_list = [[(key, val) for val in val_list] for (key, val_list) in iteritems_sorted(varied_dict) if (isinstance(val_list, (list, tuple)) and (len(val_list) > 1))]
combtup_list = list(it.product(*multitups_list))
combtup_list2 = [[((key, val) if isinstance(val, six.string_types) else (key, repr(val))) for (key, val) in combtup] for combtup in combtup_list]
comb_lbls = [','.join([('%s=%s' % (key, val)) for (key, val) in combtup]) for combtup in combtup_list2]
return comb_lbls
|
returns a label for each variation in a varydict.
It tries to not be oververbose and returns only what parameters are varied
in each label.
CommandLine:
python -m utool.util_dict --test-all_dict_combinations_lbls
python -m utool.util_dict --exec-all_dict_combinations_lbls:1
Example:
>>> # ENABLE_DOCTEST
>>> import utool
>>> from utool.util_dict import * # NOQA
>>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}
>>> comb_lbls = utool.all_dict_combinations_lbls(varied_dict)
>>> result = (utool.repr4(comb_lbls))
>>> print(result)
[
'logdist_weight=0.0,sv_on=True',
'logdist_weight=0.0,sv_on=False',
'logdist_weight=0.0,sv_on=None',
'logdist_weight=1.0,sv_on=True',
'logdist_weight=1.0,sv_on=False',
'logdist_weight=1.0,sv_on=None',
]
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_dict import * # NOQA
>>> varied_dict = {'logdist_weight': [0.0], 'pipeline_root': ['vsmany'], 'sv_on': [True]}
>>> allow_lone_singles = True
>>> comb_lbls = ut.all_dict_combinations_lbls(varied_dict, allow_lone_singles=allow_lone_singles)
>>> result = (ut.repr4(comb_lbls))
>>> print(result)
[
'logdist_weight=0.0,pipeline_root=vsmany,sv_on=True',
]
|
utool/util_dict.py
|
all_dict_combinations_lbls
|
Erotemic/utool
| 8
|
python
|
def all_dict_combinations_lbls(varied_dict, remove_singles=True, allow_lone_singles=False):
"\n returns a label for each variation in a varydict.\n\n It tries to not be oververbose and returns only what parameters are varied\n in each label.\n\n CommandLine:\n python -m utool.util_dict --test-all_dict_combinations_lbls\n python -m utool.util_dict --exec-all_dict_combinations_lbls:1\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> import utool\n >>> from utool.util_dict import * # NOQA\n >>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}\n >>> comb_lbls = utool.all_dict_combinations_lbls(varied_dict)\n >>> result = (utool.repr4(comb_lbls))\n >>> print(result)\n [\n 'logdist_weight=0.0,sv_on=True',\n 'logdist_weight=0.0,sv_on=False',\n 'logdist_weight=0.0,sv_on=None',\n 'logdist_weight=1.0,sv_on=True',\n 'logdist_weight=1.0,sv_on=False',\n 'logdist_weight=1.0,sv_on=None',\n ]\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> import utool as ut\n >>> from utool.util_dict import * # NOQA\n >>> varied_dict = {'logdist_weight': [0.0], 'pipeline_root': ['vsmany'], 'sv_on': [True]}\n >>> allow_lone_singles = True\n >>> comb_lbls = ut.all_dict_combinations_lbls(varied_dict, allow_lone_singles=allow_lone_singles)\n >>> result = (ut.repr4(comb_lbls))\n >>> print(result)\n [\n 'logdist_weight=0.0,pipeline_root=vsmany,sv_on=True',\n ]\n "
is_lone_single = all([(isinstance(val_list, (list, tuple)) and (len(val_list) == 1)) for (key, val_list) in iteritems_sorted(varied_dict)])
if ((not remove_singles) or (allow_lone_singles and is_lone_single)):
multitups_list = [[(key, val) for val in val_list] for (key, val_list) in iteritems_sorted(varied_dict)]
else:
multitups_list = [[(key, val) for val in val_list] for (key, val_list) in iteritems_sorted(varied_dict) if (isinstance(val_list, (list, tuple)) and (len(val_list) > 1))]
combtup_list = list(it.product(*multitups_list))
combtup_list2 = [[((key, val) if isinstance(val, six.string_types) else (key, repr(val))) for (key, val) in combtup] for combtup in combtup_list]
comb_lbls = [','.join([('%s=%s' % (key, val)) for (key, val) in combtup]) for combtup in combtup_list2]
return comb_lbls
|
def all_dict_combinations_lbls(varied_dict, remove_singles=True, allow_lone_singles=False):
"\n returns a label for each variation in a varydict.\n\n It tries to not be oververbose and returns only what parameters are varied\n in each label.\n\n CommandLine:\n python -m utool.util_dict --test-all_dict_combinations_lbls\n python -m utool.util_dict --exec-all_dict_combinations_lbls:1\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> import utool\n >>> from utool.util_dict import * # NOQA\n >>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}\n >>> comb_lbls = utool.all_dict_combinations_lbls(varied_dict)\n >>> result = (utool.repr4(comb_lbls))\n >>> print(result)\n [\n 'logdist_weight=0.0,sv_on=True',\n 'logdist_weight=0.0,sv_on=False',\n 'logdist_weight=0.0,sv_on=None',\n 'logdist_weight=1.0,sv_on=True',\n 'logdist_weight=1.0,sv_on=False',\n 'logdist_weight=1.0,sv_on=None',\n ]\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> import utool as ut\n >>> from utool.util_dict import * # NOQA\n >>> varied_dict = {'logdist_weight': [0.0], 'pipeline_root': ['vsmany'], 'sv_on': [True]}\n >>> allow_lone_singles = True\n >>> comb_lbls = ut.all_dict_combinations_lbls(varied_dict, allow_lone_singles=allow_lone_singles)\n >>> result = (ut.repr4(comb_lbls))\n >>> print(result)\n [\n 'logdist_weight=0.0,pipeline_root=vsmany,sv_on=True',\n ]\n "
is_lone_single = all([(isinstance(val_list, (list, tuple)) and (len(val_list) == 1)) for (key, val_list) in iteritems_sorted(varied_dict)])
if ((not remove_singles) or (allow_lone_singles and is_lone_single)):
multitups_list = [[(key, val) for val in val_list] for (key, val_list) in iteritems_sorted(varied_dict)]
else:
multitups_list = [[(key, val) for val in val_list] for (key, val_list) in iteritems_sorted(varied_dict) if (isinstance(val_list, (list, tuple)) and (len(val_list) > 1))]
combtup_list = list(it.product(*multitups_list))
combtup_list2 = [[((key, val) if isinstance(val, six.string_types) else (key, repr(val))) for (key, val) in combtup] for combtup in combtup_list]
comb_lbls = [','.join([('%s=%s' % (key, val)) for (key, val) in combtup]) for combtup in combtup_list2]
return comb_lbls<|docstring|>returns a label for each variation in a varydict.
It tries to not be oververbose and returns only what parameters are varied
in each label.
CommandLine:
python -m utool.util_dict --test-all_dict_combinations_lbls
python -m utool.util_dict --exec-all_dict_combinations_lbls:1
Example:
>>> # ENABLE_DOCTEST
>>> import utool
>>> from utool.util_dict import * # NOQA
>>> varied_dict = {'logdist_weight': [0.0, 1.0], 'pipeline_root': ['vsmany'], 'sv_on': [True, False, None]}
>>> comb_lbls = utool.all_dict_combinations_lbls(varied_dict)
>>> result = (utool.repr4(comb_lbls))
>>> print(result)
[
'logdist_weight=0.0,sv_on=True',
'logdist_weight=0.0,sv_on=False',
'logdist_weight=0.0,sv_on=None',
'logdist_weight=1.0,sv_on=True',
'logdist_weight=1.0,sv_on=False',
'logdist_weight=1.0,sv_on=None',
]
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> from utool.util_dict import * # NOQA
>>> varied_dict = {'logdist_weight': [0.0], 'pipeline_root': ['vsmany'], 'sv_on': [True]}
>>> allow_lone_singles = True
>>> comb_lbls = ut.all_dict_combinations_lbls(varied_dict, allow_lone_singles=allow_lone_singles)
>>> result = (ut.repr4(comb_lbls))
>>> print(result)
[
'logdist_weight=0.0,pipeline_root=vsmany,sv_on=True',
]<|endoftext|>
|
49186f3f405d44511e2f99d6e5db347f67eeb60cca83bcf2b43de98099efd023
|
def build_conflict_dict(key_list, val_list):
"\n Builds dict where a list of values is associated with more than one key\n\n Args:\n key_list (list):\n val_list (list):\n\n Returns:\n dict: key_to_vals\n\n CommandLine:\n python -m utool.util_dict --test-build_conflict_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> key_list = [ 1, 2, 2, 3, 1]\n >>> val_list = ['a', 'b', 'c', 'd', 'e']\n >>> key_to_vals = build_conflict_dict(key_list, val_list)\n >>> result = ut.repr4(key_to_vals)\n >>> print(result)\n {\n 1: ['a', 'e'],\n 2: ['b', 'c'],\n 3: ['d'],\n }\n "
key_to_vals = defaultdict(list)
for (key, val) in zip(key_list, val_list):
key_to_vals[key].append(val)
return key_to_vals
|
Builds dict where a list of values is associated with more than one key
Args:
key_list (list):
val_list (list):
Returns:
dict: key_to_vals
CommandLine:
python -m utool.util_dict --test-build_conflict_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> key_list = [ 1, 2, 2, 3, 1]
>>> val_list = ['a', 'b', 'c', 'd', 'e']
>>> key_to_vals = build_conflict_dict(key_list, val_list)
>>> result = ut.repr4(key_to_vals)
>>> print(result)
{
1: ['a', 'e'],
2: ['b', 'c'],
3: ['d'],
}
|
utool/util_dict.py
|
build_conflict_dict
|
Erotemic/utool
| 8
|
python
|
def build_conflict_dict(key_list, val_list):
"\n Builds dict where a list of values is associated with more than one key\n\n Args:\n key_list (list):\n val_list (list):\n\n Returns:\n dict: key_to_vals\n\n CommandLine:\n python -m utool.util_dict --test-build_conflict_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> key_list = [ 1, 2, 2, 3, 1]\n >>> val_list = ['a', 'b', 'c', 'd', 'e']\n >>> key_to_vals = build_conflict_dict(key_list, val_list)\n >>> result = ut.repr4(key_to_vals)\n >>> print(result)\n {\n 1: ['a', 'e'],\n 2: ['b', 'c'],\n 3: ['d'],\n }\n "
key_to_vals = defaultdict(list)
for (key, val) in zip(key_list, val_list):
key_to_vals[key].append(val)
return key_to_vals
|
def build_conflict_dict(key_list, val_list):
"\n Builds dict where a list of values is associated with more than one key\n\n Args:\n key_list (list):\n val_list (list):\n\n Returns:\n dict: key_to_vals\n\n CommandLine:\n python -m utool.util_dict --test-build_conflict_dict\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> key_list = [ 1, 2, 2, 3, 1]\n >>> val_list = ['a', 'b', 'c', 'd', 'e']\n >>> key_to_vals = build_conflict_dict(key_list, val_list)\n >>> result = ut.repr4(key_to_vals)\n >>> print(result)\n {\n 1: ['a', 'e'],\n 2: ['b', 'c'],\n 3: ['d'],\n }\n "
key_to_vals = defaultdict(list)
for (key, val) in zip(key_list, val_list):
key_to_vals[key].append(val)
return key_to_vals<|docstring|>Builds dict where a list of values is associated with more than one key
Args:
key_list (list):
val_list (list):
Returns:
dict: key_to_vals
CommandLine:
python -m utool.util_dict --test-build_conflict_dict
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> key_list = [ 1, 2, 2, 3, 1]
>>> val_list = ['a', 'b', 'c', 'd', 'e']
>>> key_to_vals = build_conflict_dict(key_list, val_list)
>>> result = ut.repr4(key_to_vals)
>>> print(result)
{
1: ['a', 'e'],
2: ['b', 'c'],
3: ['d'],
}<|endoftext|>
|
53f9380e090292f1a25b3812483f62f1be7d5576210ea4a02852f3dc1971d1af
|
def assert_keys_are_subset(dict1, dict2):
'\n Example:\n >>> # DISABLE_DOCTEST\n >>> dict1 = {1:1, 2:2, 3:3}\n >>> dict2 = {2:3, 3:3}\n >>> assert_keys_are_subset(dict1, dict2)\n >>> #dict2 = {4:3, 3:3}\n '
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
unknown_keys = keys2.difference(keys1)
assert (len(unknown_keys) == 0), ('unknown_keys=%r' % (unknown_keys,))
|
Example:
>>> # DISABLE_DOCTEST
>>> dict1 = {1:1, 2:2, 3:3}
>>> dict2 = {2:3, 3:3}
>>> assert_keys_are_subset(dict1, dict2)
>>> #dict2 = {4:3, 3:3}
|
utool/util_dict.py
|
assert_keys_are_subset
|
Erotemic/utool
| 8
|
python
|
def assert_keys_are_subset(dict1, dict2):
'\n Example:\n >>> # DISABLE_DOCTEST\n >>> dict1 = {1:1, 2:2, 3:3}\n >>> dict2 = {2:3, 3:3}\n >>> assert_keys_are_subset(dict1, dict2)\n >>> #dict2 = {4:3, 3:3}\n '
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
unknown_keys = keys2.difference(keys1)
assert (len(unknown_keys) == 0), ('unknown_keys=%r' % (unknown_keys,))
|
def assert_keys_are_subset(dict1, dict2):
'\n Example:\n >>> # DISABLE_DOCTEST\n >>> dict1 = {1:1, 2:2, 3:3}\n >>> dict2 = {2:3, 3:3}\n >>> assert_keys_are_subset(dict1, dict2)\n >>> #dict2 = {4:3, 3:3}\n '
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
unknown_keys = keys2.difference(keys1)
assert (len(unknown_keys) == 0), ('unknown_keys=%r' % (unknown_keys,))<|docstring|>Example:
>>> # DISABLE_DOCTEST
>>> dict1 = {1:1, 2:2, 3:3}
>>> dict2 = {2:3, 3:3}
>>> assert_keys_are_subset(dict1, dict2)
>>> #dict2 = {4:3, 3:3}<|endoftext|>
|
de7b9eb787256fa74eabd7aaca633da658e7522e20a23b2dbf68f84ac8658cec
|
def update_existing(dict1, dict2, copy=False, assert_exists=False, iswarning=False, alias_dict=None):
"\n updates vals in dict1 using vals from dict2 only if the\n key is already in dict1.\n\n Args:\n dict1 (dict):\n dict2 (dict):\n copy (bool): if true modifies dictionary in place (default = False)\n assert_exists (bool): if True throws error if new key specified (default = False)\n alias_dict (dict): dictionary of alias keys for dict2 (default = None)\n\n Returns:\n dict - updated dictionary\n\n CommandLine:\n python -m utool.util_dict --test-update_existing\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3}\n >>> dict2 = {'a': 2, 'd': 3}\n >>> dict1_ = update_existing(dict1, dict2)\n >>> assert 'd' not in dict1\n >>> assert dict1['a'] == 2\n >>> assert dict1_ is dict1\n "
if assert_exists:
try:
assert_keys_are_subset(dict1, dict2)
except AssertionError as ex:
from utool import util_dbg
util_dbg.printex(ex, iswarning=iswarning, N=1)
if (not iswarning):
raise
if copy:
dict1 = dict(dict1)
if (alias_dict is None):
alias_dict = {}
for (key, val) in six.iteritems(dict2):
key = alias_dict.get(key, key)
if (key in dict1):
dict1[key] = val
return dict1
|
updates vals in dict1 using vals from dict2 only if the
key is already in dict1.
Args:
dict1 (dict):
dict2 (dict):
copy (bool): if true modifies dictionary in place (default = False)
assert_exists (bool): if True throws error if new key specified (default = False)
alias_dict (dict): dictionary of alias keys for dict2 (default = None)
Returns:
dict - updated dictionary
CommandLine:
python -m utool.util_dict --test-update_existing
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict1 = {'a': 1, 'b': 2, 'c': 3}
>>> dict2 = {'a': 2, 'd': 3}
>>> dict1_ = update_existing(dict1, dict2)
>>> assert 'd' not in dict1
>>> assert dict1['a'] == 2
>>> assert dict1_ is dict1
|
utool/util_dict.py
|
update_existing
|
Erotemic/utool
| 8
|
python
|
def update_existing(dict1, dict2, copy=False, assert_exists=False, iswarning=False, alias_dict=None):
"\n updates vals in dict1 using vals from dict2 only if the\n key is already in dict1.\n\n Args:\n dict1 (dict):\n dict2 (dict):\n copy (bool): if true modifies dictionary in place (default = False)\n assert_exists (bool): if True throws error if new key specified (default = False)\n alias_dict (dict): dictionary of alias keys for dict2 (default = None)\n\n Returns:\n dict - updated dictionary\n\n CommandLine:\n python -m utool.util_dict --test-update_existing\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3}\n >>> dict2 = {'a': 2, 'd': 3}\n >>> dict1_ = update_existing(dict1, dict2)\n >>> assert 'd' not in dict1\n >>> assert dict1['a'] == 2\n >>> assert dict1_ is dict1\n "
if assert_exists:
try:
assert_keys_are_subset(dict1, dict2)
except AssertionError as ex:
from utool import util_dbg
util_dbg.printex(ex, iswarning=iswarning, N=1)
if (not iswarning):
raise
if copy:
dict1 = dict(dict1)
if (alias_dict is None):
alias_dict = {}
for (key, val) in six.iteritems(dict2):
key = alias_dict.get(key, key)
if (key in dict1):
dict1[key] = val
return dict1
|
def update_existing(dict1, dict2, copy=False, assert_exists=False, iswarning=False, alias_dict=None):
"\n updates vals in dict1 using vals from dict2 only if the\n key is already in dict1.\n\n Args:\n dict1 (dict):\n dict2 (dict):\n copy (bool): if true modifies dictionary in place (default = False)\n assert_exists (bool): if True throws error if new key specified (default = False)\n alias_dict (dict): dictionary of alias keys for dict2 (default = None)\n\n Returns:\n dict - updated dictionary\n\n CommandLine:\n python -m utool.util_dict --test-update_existing\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3}\n >>> dict2 = {'a': 2, 'd': 3}\n >>> dict1_ = update_existing(dict1, dict2)\n >>> assert 'd' not in dict1\n >>> assert dict1['a'] == 2\n >>> assert dict1_ is dict1\n "
if assert_exists:
try:
assert_keys_are_subset(dict1, dict2)
except AssertionError as ex:
from utool import util_dbg
util_dbg.printex(ex, iswarning=iswarning, N=1)
if (not iswarning):
raise
if copy:
dict1 = dict(dict1)
if (alias_dict is None):
alias_dict = {}
for (key, val) in six.iteritems(dict2):
key = alias_dict.get(key, key)
if (key in dict1):
dict1[key] = val
return dict1<|docstring|>updates vals in dict1 using vals from dict2 only if the
key is already in dict1.
Args:
dict1 (dict):
dict2 (dict):
copy (bool): if true modifies dictionary in place (default = False)
assert_exists (bool): if True throws error if new key specified (default = False)
alias_dict (dict): dictionary of alias keys for dict2 (default = None)
Returns:
dict - updated dictionary
CommandLine:
python -m utool.util_dict --test-update_existing
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict1 = {'a': 1, 'b': 2, 'c': 3}
>>> dict2 = {'a': 2, 'd': 3}
>>> dict1_ = update_existing(dict1, dict2)
>>> assert 'd' not in dict1
>>> assert dict1['a'] == 2
>>> assert dict1_ is dict1<|endoftext|>
|
73dd5d4fa5e794e7d1cd2236438bc3419e25df1d66d1e2abe18164bd16c10226
|
def dict_update_newkeys(dict_, dict2):
' Like dict.update, but does not overwrite items '
for (key, val) in six.iteritems(dict2):
if (key not in dict_):
dict_[key] = val
|
Like dict.update, but does not overwrite items
|
utool/util_dict.py
|
dict_update_newkeys
|
Erotemic/utool
| 8
|
python
|
def dict_update_newkeys(dict_, dict2):
' '
for (key, val) in six.iteritems(dict2):
if (key not in dict_):
dict_[key] = val
|
def dict_update_newkeys(dict_, dict2):
' '
for (key, val) in six.iteritems(dict2):
if (key not in dict_):
dict_[key] = val<|docstring|>Like dict.update, but does not overwrite items<|endoftext|>
|
1bdae108b56429af99f0276c87cd49b88a7b031bc280aaad607929bd536a27b2
|
def is_dicteq(dict1_, dict2_, almosteq_ok=True, verbose_err=True):
' Checks to see if dicts are the same. Performs recursion. Handles numpy '
import utool as ut
assert (len(dict1_) == len(dict2_)), 'dicts are not of same length'
try:
for ((key1, val1), (key2, val2)) in zip(dict1_.items(), dict2_.items()):
assert (key1 == key2), 'key mismatch'
assert (type(val1) == type(val2)), 'vals are not same type'
if (HAVE_NUMPY and np.iterable(val1)):
if (almosteq_ok and ut.is_float(val1)):
assert np.all(ut.almost_eq(val1, val2)), 'float vals are not within thresh'
else:
assert all([np.all((x1 == x2)) for (x1, x2) in zip(val1, val2)]), 'np vals are different'
elif isinstance(val1, dict):
is_dicteq(val1, val2, almosteq_ok=almosteq_ok, verbose_err=verbose_err)
else:
assert (val1 == val2), 'vals are different'
except AssertionError as ex:
if verbose_err:
ut.printex(ex)
return False
return True
|
Checks to see if dicts are the same. Performs recursion. Handles numpy
|
utool/util_dict.py
|
is_dicteq
|
Erotemic/utool
| 8
|
python
|
def is_dicteq(dict1_, dict2_, almosteq_ok=True, verbose_err=True):
' '
import utool as ut
assert (len(dict1_) == len(dict2_)), 'dicts are not of same length'
try:
for ((key1, val1), (key2, val2)) in zip(dict1_.items(), dict2_.items()):
assert (key1 == key2), 'key mismatch'
assert (type(val1) == type(val2)), 'vals are not same type'
if (HAVE_NUMPY and np.iterable(val1)):
if (almosteq_ok and ut.is_float(val1)):
assert np.all(ut.almost_eq(val1, val2)), 'float vals are not within thresh'
else:
assert all([np.all((x1 == x2)) for (x1, x2) in zip(val1, val2)]), 'np vals are different'
elif isinstance(val1, dict):
is_dicteq(val1, val2, almosteq_ok=almosteq_ok, verbose_err=verbose_err)
else:
assert (val1 == val2), 'vals are different'
except AssertionError as ex:
if verbose_err:
ut.printex(ex)
return False
return True
|
def is_dicteq(dict1_, dict2_, almosteq_ok=True, verbose_err=True):
' '
import utool as ut
assert (len(dict1_) == len(dict2_)), 'dicts are not of same length'
try:
for ((key1, val1), (key2, val2)) in zip(dict1_.items(), dict2_.items()):
assert (key1 == key2), 'key mismatch'
assert (type(val1) == type(val2)), 'vals are not same type'
if (HAVE_NUMPY and np.iterable(val1)):
if (almosteq_ok and ut.is_float(val1)):
assert np.all(ut.almost_eq(val1, val2)), 'float vals are not within thresh'
else:
assert all([np.all((x1 == x2)) for (x1, x2) in zip(val1, val2)]), 'np vals are different'
elif isinstance(val1, dict):
is_dicteq(val1, val2, almosteq_ok=almosteq_ok, verbose_err=verbose_err)
else:
assert (val1 == val2), 'vals are different'
except AssertionError as ex:
if verbose_err:
ut.printex(ex)
return False
return True<|docstring|>Checks to see if dicts are the same. Performs recursion. Handles numpy<|endoftext|>
|
6d11757bdad2da7822a6b48e42e54dce3c3525de9f52d1e755545641e6baf26f
|
def dict_subset(dict_, keys, default=util_const.NoParam):
"\n Args:\n dict_ (dict):\n keys (list):\n\n Returns:\n dict: subset dictionary\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}\n >>> keys = ['K', 'dcvs_clip_max']\n >>> d = tuple([])\n >>> subdict_ = dict_subset(dict_, keys)\n >>> result = ut.repr4(subdict_, sorted_=True, newlines=False)\n >>> print(result)\n {'K': 3, 'dcvs_clip_max': 0.2}\n "
if (default is util_const.NoParam):
items = dict_take(dict_, keys)
else:
items = dict_take(dict_, keys, default)
subdict_ = OrderedDict(list(zip(keys, items)))
return subdict_
|
Args:
dict_ (dict):
keys (list):
Returns:
dict: subset dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}
>>> keys = ['K', 'dcvs_clip_max']
>>> d = tuple([])
>>> subdict_ = dict_subset(dict_, keys)
>>> result = ut.repr4(subdict_, sorted_=True, newlines=False)
>>> print(result)
{'K': 3, 'dcvs_clip_max': 0.2}
|
utool/util_dict.py
|
dict_subset
|
Erotemic/utool
| 8
|
python
|
def dict_subset(dict_, keys, default=util_const.NoParam):
"\n Args:\n dict_ (dict):\n keys (list):\n\n Returns:\n dict: subset dictionary\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}\n >>> keys = ['K', 'dcvs_clip_max']\n >>> d = tuple([])\n >>> subdict_ = dict_subset(dict_, keys)\n >>> result = ut.repr4(subdict_, sorted_=True, newlines=False)\n >>> print(result)\n {'K': 3, 'dcvs_clip_max': 0.2}\n "
if (default is util_const.NoParam):
items = dict_take(dict_, keys)
else:
items = dict_take(dict_, keys, default)
subdict_ = OrderedDict(list(zip(keys, items)))
return subdict_
|
def dict_subset(dict_, keys, default=util_const.NoParam):
"\n Args:\n dict_ (dict):\n keys (list):\n\n Returns:\n dict: subset dictionary\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}\n >>> keys = ['K', 'dcvs_clip_max']\n >>> d = tuple([])\n >>> subdict_ = dict_subset(dict_, keys)\n >>> result = ut.repr4(subdict_, sorted_=True, newlines=False)\n >>> print(result)\n {'K': 3, 'dcvs_clip_max': 0.2}\n "
if (default is util_const.NoParam):
items = dict_take(dict_, keys)
else:
items = dict_take(dict_, keys, default)
subdict_ = OrderedDict(list(zip(keys, items)))
return subdict_<|docstring|>Args:
dict_ (dict):
keys (list):
Returns:
dict: subset dictionary
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'K': 3, 'dcvs_clip_max': 0.2, 'p': 0.1}
>>> keys = ['K', 'dcvs_clip_max']
>>> d = tuple([])
>>> subdict_ = dict_subset(dict_, keys)
>>> result = ut.repr4(subdict_, sorted_=True, newlines=False)
>>> print(result)
{'K': 3, 'dcvs_clip_max': 0.2}<|endoftext|>
|
5270bc99c89ac2f3a28f970ebebdb1c3d14e2113c1f1a2e17a90b44e5b596d67
|
def dict_setdiff(dict_, negative_keys):
'\n returns a copy of dict_ without keys in the negative_keys list\n\n Args:\n dict_ (dict):\n negative_keys (list):\n '
keys = [key for key in six.iterkeys(dict_) if (key not in set(negative_keys))]
subdict_ = dict_subset(dict_, keys)
return subdict_
|
returns a copy of dict_ without keys in the negative_keys list
Args:
dict_ (dict):
negative_keys (list):
|
utool/util_dict.py
|
dict_setdiff
|
Erotemic/utool
| 8
|
python
|
def dict_setdiff(dict_, negative_keys):
'\n returns a copy of dict_ without keys in the negative_keys list\n\n Args:\n dict_ (dict):\n negative_keys (list):\n '
keys = [key for key in six.iterkeys(dict_) if (key not in set(negative_keys))]
subdict_ = dict_subset(dict_, keys)
return subdict_
|
def dict_setdiff(dict_, negative_keys):
'\n returns a copy of dict_ without keys in the negative_keys list\n\n Args:\n dict_ (dict):\n negative_keys (list):\n '
keys = [key for key in six.iterkeys(dict_) if (key not in set(negative_keys))]
subdict_ = dict_subset(dict_, keys)
return subdict_<|docstring|>returns a copy of dict_ without keys in the negative_keys list
Args:
dict_ (dict):
negative_keys (list):<|endoftext|>
|
84898ddbb78a376f5165086ad325521b5813e588fb2ff54f2b31933a3e52b622
|
def delete_dict_keys(dict_, key_list):
"\n Removes items from a dictionary inplace. Keys that do not exist are\n ignored.\n\n Args:\n dict_ (dict): dict like object with a __del__ attribute\n key_list (list): list of keys that specify the items to remove\n\n CommandLine:\n python -m utool.util_dict --test-delete_dict_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}\n >>> key_list = ['duck', 'bread', 'cider']\n >>> delete_dict_keys(dict_, key_list)\n >>> result = ut.repr4(dict_, nl=False)\n >>> print(result)\n {'churches': 1, 'very small rocks': 2}\n\n "
invalid_keys = (set(key_list) - set(dict_.keys()))
valid_keys = (set(key_list) - invalid_keys)
for key in valid_keys:
del dict_[key]
return dict_
|
Removes items from a dictionary inplace. Keys that do not exist are
ignored.
Args:
dict_ (dict): dict like object with a __del__ attribute
key_list (list): list of keys that specify the items to remove
CommandLine:
python -m utool.util_dict --test-delete_dict_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}
>>> key_list = ['duck', 'bread', 'cider']
>>> delete_dict_keys(dict_, key_list)
>>> result = ut.repr4(dict_, nl=False)
>>> print(result)
{'churches': 1, 'very small rocks': 2}
|
utool/util_dict.py
|
delete_dict_keys
|
Erotemic/utool
| 8
|
python
|
def delete_dict_keys(dict_, key_list):
"\n Removes items from a dictionary inplace. Keys that do not exist are\n ignored.\n\n Args:\n dict_ (dict): dict like object with a __del__ attribute\n key_list (list): list of keys that specify the items to remove\n\n CommandLine:\n python -m utool.util_dict --test-delete_dict_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}\n >>> key_list = ['duck', 'bread', 'cider']\n >>> delete_dict_keys(dict_, key_list)\n >>> result = ut.repr4(dict_, nl=False)\n >>> print(result)\n {'churches': 1, 'very small rocks': 2}\n\n "
invalid_keys = (set(key_list) - set(dict_.keys()))
valid_keys = (set(key_list) - invalid_keys)
for key in valid_keys:
del dict_[key]
return dict_
|
def delete_dict_keys(dict_, key_list):
"\n Removes items from a dictionary inplace. Keys that do not exist are\n ignored.\n\n Args:\n dict_ (dict): dict like object with a __del__ attribute\n key_list (list): list of keys that specify the items to remove\n\n CommandLine:\n python -m utool.util_dict --test-delete_dict_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}\n >>> key_list = ['duck', 'bread', 'cider']\n >>> delete_dict_keys(dict_, key_list)\n >>> result = ut.repr4(dict_, nl=False)\n >>> print(result)\n {'churches': 1, 'very small rocks': 2}\n\n "
invalid_keys = (set(key_list) - set(dict_.keys()))
valid_keys = (set(key_list) - invalid_keys)
for key in valid_keys:
del dict_[key]
return dict_<|docstring|>Removes items from a dictionary inplace. Keys that do not exist are
ignored.
Args:
dict_ (dict): dict like object with a __del__ attribute
key_list (list): list of keys that specify the items to remove
CommandLine:
python -m utool.util_dict --test-delete_dict_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'bread': 1, 'churches': 1, 'cider': 2, 'very small rocks': 2}
>>> key_list = ['duck', 'bread', 'cider']
>>> delete_dict_keys(dict_, key_list)
>>> result = ut.repr4(dict_, nl=False)
>>> print(result)
{'churches': 1, 'very small rocks': 2}<|endoftext|>
|
10000c8577570176298c9790cc089454dd5e39adcca6ccbc108395cf7574f8cd
|
def dict_take_gen(dict_, keys, *d):
"\n generate multiple values from a dictionary\n\n Args:\n dict_ (dict):\n keys (list):\n\n Varargs:\n d: if specified is default for key errors\n\n CommandLine:\n python -m utool.util_dict --test-dict_take_gen\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> result = list(dict_take_gen(dict_, keys, None))\n >>> result = ut.repr4(result, nl=False)\n >>> print(result)\n ['a', 'b', 'c', None, None]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> try:\n >>> print(list(dict_take_gen(dict_, keys)))\n >>> result = 'did not get key error'\n >>> except KeyError:\n >>> result = 'correctly got key error'\n >>> print(result)\n correctly got key error\n "
if isinstance(keys, six.string_types):
keys = keys.split(', ')
if (len(d) == 0):
dictget = dict_.__getitem__
elif (len(d) == 1):
dictget = dict_.get
else:
raise ValueError('len(d) must be 1 or 0')
for key in keys:
if (HAVE_NUMPY and isinstance(key, np.ndarray)):
(yield list(dict_take_gen(dict_, key, *d)))
else:
(yield dictget(key, *d))
|
generate multiple values from a dictionary
Args:
dict_ (dict):
keys (list):
Varargs:
d: if specified is default for key errors
CommandLine:
python -m utool.util_dict --test-dict_take_gen
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(dict_take_gen(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(dict_take_gen(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> print(result)
correctly got key error
|
utool/util_dict.py
|
dict_take_gen
|
Erotemic/utool
| 8
|
python
|
def dict_take_gen(dict_, keys, *d):
"\n generate multiple values from a dictionary\n\n Args:\n dict_ (dict):\n keys (list):\n\n Varargs:\n d: if specified is default for key errors\n\n CommandLine:\n python -m utool.util_dict --test-dict_take_gen\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> result = list(dict_take_gen(dict_, keys, None))\n >>> result = ut.repr4(result, nl=False)\n >>> print(result)\n ['a', 'b', 'c', None, None]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> try:\n >>> print(list(dict_take_gen(dict_, keys)))\n >>> result = 'did not get key error'\n >>> except KeyError:\n >>> result = 'correctly got key error'\n >>> print(result)\n correctly got key error\n "
if isinstance(keys, six.string_types):
keys = keys.split(', ')
if (len(d) == 0):
dictget = dict_.__getitem__
elif (len(d) == 1):
dictget = dict_.get
else:
raise ValueError('len(d) must be 1 or 0')
for key in keys:
if (HAVE_NUMPY and isinstance(key, np.ndarray)):
(yield list(dict_take_gen(dict_, key, *d)))
else:
(yield dictget(key, *d))
|
def dict_take_gen(dict_, keys, *d):
"\n generate multiple values from a dictionary\n\n Args:\n dict_ (dict):\n keys (list):\n\n Varargs:\n d: if specified is default for key errors\n\n CommandLine:\n python -m utool.util_dict --test-dict_take_gen\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> result = list(dict_take_gen(dict_, keys, None))\n >>> result = ut.repr4(result, nl=False)\n >>> print(result)\n ['a', 'b', 'c', None, None]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> try:\n >>> print(list(dict_take_gen(dict_, keys)))\n >>> result = 'did not get key error'\n >>> except KeyError:\n >>> result = 'correctly got key error'\n >>> print(result)\n correctly got key error\n "
if isinstance(keys, six.string_types):
keys = keys.split(', ')
if (len(d) == 0):
dictget = dict_.__getitem__
elif (len(d) == 1):
dictget = dict_.get
else:
raise ValueError('len(d) must be 1 or 0')
for key in keys:
if (HAVE_NUMPY and isinstance(key, np.ndarray)):
(yield list(dict_take_gen(dict_, key, *d)))
else:
(yield dictget(key, *d))<|docstring|>generate multiple values from a dictionary
Args:
dict_ (dict):
keys (list):
Varargs:
d: if specified is default for key errors
CommandLine:
python -m utool.util_dict --test-dict_take_gen
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> result = list(dict_take_gen(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> try:
>>> print(list(dict_take_gen(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> print(result)
correctly got key error<|endoftext|>
|
b402e8e9eb261163719a83f74aca39f1f40d20d27e9a6da4c9645c2a16cb8993
|
def dict_take(dict_, keys, *d):
' get multiple values from a dictionary '
try:
return list(dict_take_gen(dict_, keys, *d))
except TypeError:
return list(dict_take_gen(dict_, keys, *d))[0]
|
get multiple values from a dictionary
|
utool/util_dict.py
|
dict_take
|
Erotemic/utool
| 8
|
python
|
def dict_take(dict_, keys, *d):
' '
try:
return list(dict_take_gen(dict_, keys, *d))
except TypeError:
return list(dict_take_gen(dict_, keys, *d))[0]
|
def dict_take(dict_, keys, *d):
' '
try:
return list(dict_take_gen(dict_, keys, *d))
except TypeError:
return list(dict_take_gen(dict_, keys, *d))[0]<|docstring|>get multiple values from a dictionary<|endoftext|>
|
a6a9d7b28bf92cbd71f7443978d1bd4fdb090b8bc7d19490a6b632a2dcd2cc25
|
def dict_take_pop(dict_, keys, *d):
" like dict_take but pops values off\n\n CommandLine:\n python -m utool.util_dict --test-dict_take_pop\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 'other': None, 'another': 'foo', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> print('before: ' + ut.repr4(dict_))\n >>> result = list(dict_take_pop(dict_, keys, None))\n >>> result = ut.repr4(result, nl=False)\n >>> print('after: ' + ut.repr4(dict_))\n >>> assert len(dict_) == 2\n >>> print(result)\n ['a', 'b', 'c', None, None]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> print('before: ' + ut.repr4(dict_))\n >>> try:\n >>> print(list(dict_take_pop(dict_, keys)))\n >>> result = 'did not get key error'\n >>> except KeyError:\n >>> result = 'correctly got key error'\n >>> assert len(dict_) == 0\n >>> print('after: ' + ut.repr4(dict_))\n >>> print(result)\n correctly got key error\n "
if (len(d) == 0):
return [dict_.pop(key) for key in keys]
elif (len(d) == 1):
default = d[0]
return [dict_.pop(key, default) for key in keys]
else:
raise ValueError('len(d) must be 1 or 0')
|
like dict_take but pops values off
CommandLine:
python -m utool.util_dict --test-dict_take_pop
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 'other': None, 'another': 'foo', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> print('before: ' + ut.repr4(dict_))
>>> result = list(dict_take_pop(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print('after: ' + ut.repr4(dict_))
>>> assert len(dict_) == 2
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> print('before: ' + ut.repr4(dict_))
>>> try:
>>> print(list(dict_take_pop(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> assert len(dict_) == 0
>>> print('after: ' + ut.repr4(dict_))
>>> print(result)
correctly got key error
|
utool/util_dict.py
|
dict_take_pop
|
Erotemic/utool
| 8
|
python
|
def dict_take_pop(dict_, keys, *d):
" like dict_take but pops values off\n\n CommandLine:\n python -m utool.util_dict --test-dict_take_pop\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 'other': None, 'another': 'foo', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> print('before: ' + ut.repr4(dict_))\n >>> result = list(dict_take_pop(dict_, keys, None))\n >>> result = ut.repr4(result, nl=False)\n >>> print('after: ' + ut.repr4(dict_))\n >>> assert len(dict_) == 2\n >>> print(result)\n ['a', 'b', 'c', None, None]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> print('before: ' + ut.repr4(dict_))\n >>> try:\n >>> print(list(dict_take_pop(dict_, keys)))\n >>> result = 'did not get key error'\n >>> except KeyError:\n >>> result = 'correctly got key error'\n >>> assert len(dict_) == 0\n >>> print('after: ' + ut.repr4(dict_))\n >>> print(result)\n correctly got key error\n "
if (len(d) == 0):
return [dict_.pop(key) for key in keys]
elif (len(d) == 1):
default = d[0]
return [dict_.pop(key, default) for key in keys]
else:
raise ValueError('len(d) must be 1 or 0')
|
def dict_take_pop(dict_, keys, *d):
" like dict_take but pops values off\n\n CommandLine:\n python -m utool.util_dict --test-dict_take_pop\n\n Example1:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 'other': None, 'another': 'foo', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> print('before: ' + ut.repr4(dict_))\n >>> result = list(dict_take_pop(dict_, keys, None))\n >>> result = ut.repr4(result, nl=False)\n >>> print('after: ' + ut.repr4(dict_))\n >>> assert len(dict_) == 2\n >>> print(result)\n ['a', 'b', 'c', None, None]\n\n Example2:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {1: 'a', 2: 'b', 3: 'c'}\n >>> keys = [1, 2, 3, 4, 5]\n >>> print('before: ' + ut.repr4(dict_))\n >>> try:\n >>> print(list(dict_take_pop(dict_, keys)))\n >>> result = 'did not get key error'\n >>> except KeyError:\n >>> result = 'correctly got key error'\n >>> assert len(dict_) == 0\n >>> print('after: ' + ut.repr4(dict_))\n >>> print(result)\n correctly got key error\n "
if (len(d) == 0):
return [dict_.pop(key) for key in keys]
elif (len(d) == 1):
default = d[0]
return [dict_.pop(key, default) for key in keys]
else:
raise ValueError('len(d) must be 1 or 0')<|docstring|>like dict_take but pops values off
CommandLine:
python -m utool.util_dict --test-dict_take_pop
Example1:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 'other': None, 'another': 'foo', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> print('before: ' + ut.repr4(dict_))
>>> result = list(dict_take_pop(dict_, keys, None))
>>> result = ut.repr4(result, nl=False)
>>> print('after: ' + ut.repr4(dict_))
>>> assert len(dict_) == 2
>>> print(result)
['a', 'b', 'c', None, None]
Example2:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {1: 'a', 2: 'b', 3: 'c'}
>>> keys = [1, 2, 3, 4, 5]
>>> print('before: ' + ut.repr4(dict_))
>>> try:
>>> print(list(dict_take_pop(dict_, keys)))
>>> result = 'did not get key error'
>>> except KeyError:
>>> result = 'correctly got key error'
>>> assert len(dict_) == 0
>>> print('after: ' + ut.repr4(dict_))
>>> print(result)
correctly got key error<|endoftext|>
|
9275581e45c9b45380b382667b447e16d4670ecf7dac9363f0742f7d24cd189c
|
def dict_assign(dict_, keys, vals):
' simple method for assigning or setting values with a similar interface\n to dict_take '
for (key, val) in zip(keys, vals):
dict_[key] = val
|
simple method for assigning or setting values with a similar interface
to dict_take
|
utool/util_dict.py
|
dict_assign
|
Erotemic/utool
| 8
|
python
|
def dict_assign(dict_, keys, vals):
' simple method for assigning or setting values with a similar interface\n to dict_take '
for (key, val) in zip(keys, vals):
dict_[key] = val
|
def dict_assign(dict_, keys, vals):
' simple method for assigning or setting values with a similar interface\n to dict_take '
for (key, val) in zip(keys, vals):
dict_[key] = val<|docstring|>simple method for assigning or setting values with a similar interface
to dict_take<|endoftext|>
|
68bc08b54fde8cd52993bcf6529fda5ebd6bd48d04e2c5c6b937865f0e653e3e
|
def dict_where_len0(dict_):
'\n Accepts a dict of lists. Returns keys that have vals with no length\n '
keys = np.array(dict_.keys())
flags = (np.array(list(map(len, dict_.values()))) == 0)
indices = np.where(flags)[0]
return keys[indices]
|
Accepts a dict of lists. Returns keys that have vals with no length
|
utool/util_dict.py
|
dict_where_len0
|
Erotemic/utool
| 8
|
python
|
def dict_where_len0(dict_):
'\n \n '
keys = np.array(dict_.keys())
flags = (np.array(list(map(len, dict_.values()))) == 0)
indices = np.where(flags)[0]
return keys[indices]
|
def dict_where_len0(dict_):
'\n \n '
keys = np.array(dict_.keys())
flags = (np.array(list(map(len, dict_.values()))) == 0)
indices = np.where(flags)[0]
return keys[indices]<|docstring|>Accepts a dict of lists. Returns keys that have vals with no length<|endoftext|>
|
d5dd3be7e5b33922c73db4e7356d6d2f0fc1f8068931379d05c5abbe96dd12fa
|
def get_dict_column(dict_, colx):
"\n Args:\n dict_ (dict_): a dictionary of lists\n colx (int):\n\n CommandLine:\n python -m utool.util_dict --test-get_dict_column\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}\n >>> colx = [2, 0]\n >>> retdict_ = get_dict_column(dict_, colx)\n >>> result = ut.repr2(retdict_)\n >>> print(result)\n {'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}\n "
retdict_ = {key: util_list.list_take(val, colx) for (key, val) in six.iteritems(dict_)}
return retdict_
|
Args:
dict_ (dict_): a dictionary of lists
colx (int):
CommandLine:
python -m utool.util_dict --test-get_dict_column
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}
>>> colx = [2, 0]
>>> retdict_ = get_dict_column(dict_, colx)
>>> result = ut.repr2(retdict_)
>>> print(result)
{'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}
|
utool/util_dict.py
|
get_dict_column
|
Erotemic/utool
| 8
|
python
|
def get_dict_column(dict_, colx):
"\n Args:\n dict_ (dict_): a dictionary of lists\n colx (int):\n\n CommandLine:\n python -m utool.util_dict --test-get_dict_column\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}\n >>> colx = [2, 0]\n >>> retdict_ = get_dict_column(dict_, colx)\n >>> result = ut.repr2(retdict_)\n >>> print(result)\n {'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}\n "
retdict_ = {key: util_list.list_take(val, colx) for (key, val) in six.iteritems(dict_)}
return retdict_
|
def get_dict_column(dict_, colx):
"\n Args:\n dict_ (dict_): a dictionary of lists\n colx (int):\n\n CommandLine:\n python -m utool.util_dict --test-get_dict_column\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}\n >>> colx = [2, 0]\n >>> retdict_ = get_dict_column(dict_, colx)\n >>> result = ut.repr2(retdict_)\n >>> print(result)\n {'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}\n "
retdict_ = {key: util_list.list_take(val, colx) for (key, val) in six.iteritems(dict_)}
return retdict_<|docstring|>Args:
dict_ (dict_): a dictionary of lists
colx (int):
CommandLine:
python -m utool.util_dict --test-get_dict_column
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'a': [0, 1, 2], 'b': [3, 4, 5], 'c': [6, 7, 8]}
>>> colx = [2, 0]
>>> retdict_ = get_dict_column(dict_, colx)
>>> result = ut.repr2(retdict_)
>>> print(result)
{'a': [2, 0], 'b': [5, 3], 'c': [8, 6]}<|endoftext|>
|
e78aca5f76556bb67300af227e30a263288e0f55e6321f6e9a0dfa4636c6d657
|
def dictinfo(dict_):
'\n dictinfo\n\n In depth debugging info\n\n Args:\n dict_ (dict):\n\n Returns:\n str\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {}\n >>> result = dictinfo(dict_)\n >>> print(result)\n '
import utool as ut
if (not isinstance(dict_, dict)):
return ('expected dict got %r' % type(dict_))
keys = list(dict_.keys())
vals = list(dict_.values())
num_keys = len(keys)
key_types = list(set(map(type, keys)))
val_types = list(set(map(type, vals)))
fmtstr_ = ('\n' + ut.unindent('\n * num_keys = {num_keys}\n * key_types = {key_types}\n * val_types = {val_types}\n '.strip('\n')))
if (len(val_types) == 1):
if (val_types[0] == np.ndarray):
val_shape_stats = ut.get_stats(set(map(np.shape, vals)), axis=0)
val_shape_stats_str = ut.repr4(val_shape_stats, strvals=True, newlines=False)
val_dtypes = list(set([val.dtype for val in vals]))
fmtstr_ += ut.unindent('\n * val_shape_stats = {val_shape_stats_str}\n * val_dtypes = {val_dtypes}\n '.strip('\n'))
elif (val_types[0] == list):
val_len_stats = ut.get_stats(set(map(len, vals)))
val_len_stats_str = ut.repr4(val_len_stats, strvals=True, newlines=False)
depth = ut.list_depth(vals)
deep_val_types = list(set(ut.list_deep_types(vals)))
fmtstr_ += ut.unindent('\n * list_depth = {depth}\n * val_len_stats = {val_len_stats_str}\n * deep_types = {deep_val_types}\n '.strip('\n'))
if (len(deep_val_types) == 1):
if (deep_val_types[0] == np.ndarray):
deep_val_dtypes = list(set([val.dtype for val in vals]))
fmtstr_ += ut.unindent('\n * deep_val_dtypes = {deep_val_dtypes}\n ').strip('\n')
elif (val_types[0] in [np.uint8, np.int8, np.int32, np.int64, np.float16, np.float32, np.float64]):
val_stats = ut.get_stats(vals)
fmtstr_ += ut.unindent('\n * val_stats = {val_stats}\n ').strip('\n')
fmtstr = fmtstr_.format(**locals())
return ut.indent(fmtstr)
|
dictinfo
In depth debugging info
Args:
dict_ (dict):
Returns:
str
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {}
>>> result = dictinfo(dict_)
>>> print(result)
|
utool/util_dict.py
|
dictinfo
|
Erotemic/utool
| 8
|
python
|
def dictinfo(dict_):
'\n dictinfo\n\n In depth debugging info\n\n Args:\n dict_ (dict):\n\n Returns:\n str\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {}\n >>> result = dictinfo(dict_)\n >>> print(result)\n '
import utool as ut
if (not isinstance(dict_, dict)):
return ('expected dict got %r' % type(dict_))
keys = list(dict_.keys())
vals = list(dict_.values())
num_keys = len(keys)
key_types = list(set(map(type, keys)))
val_types = list(set(map(type, vals)))
fmtstr_ = ('\n' + ut.unindent('\n * num_keys = {num_keys}\n * key_types = {key_types}\n * val_types = {val_types}\n '.strip('\n')))
if (len(val_types) == 1):
if (val_types[0] == np.ndarray):
val_shape_stats = ut.get_stats(set(map(np.shape, vals)), axis=0)
val_shape_stats_str = ut.repr4(val_shape_stats, strvals=True, newlines=False)
val_dtypes = list(set([val.dtype for val in vals]))
fmtstr_ += ut.unindent('\n * val_shape_stats = {val_shape_stats_str}\n * val_dtypes = {val_dtypes}\n '.strip('\n'))
elif (val_types[0] == list):
val_len_stats = ut.get_stats(set(map(len, vals)))
val_len_stats_str = ut.repr4(val_len_stats, strvals=True, newlines=False)
depth = ut.list_depth(vals)
deep_val_types = list(set(ut.list_deep_types(vals)))
fmtstr_ += ut.unindent('\n * list_depth = {depth}\n * val_len_stats = {val_len_stats_str}\n * deep_types = {deep_val_types}\n '.strip('\n'))
if (len(deep_val_types) == 1):
if (deep_val_types[0] == np.ndarray):
deep_val_dtypes = list(set([val.dtype for val in vals]))
fmtstr_ += ut.unindent('\n * deep_val_dtypes = {deep_val_dtypes}\n ').strip('\n')
elif (val_types[0] in [np.uint8, np.int8, np.int32, np.int64, np.float16, np.float32, np.float64]):
val_stats = ut.get_stats(vals)
fmtstr_ += ut.unindent('\n * val_stats = {val_stats}\n ').strip('\n')
fmtstr = fmtstr_.format(**locals())
return ut.indent(fmtstr)
|
def dictinfo(dict_):
'\n dictinfo\n\n In depth debugging info\n\n Args:\n dict_ (dict):\n\n Returns:\n str\n\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {}\n >>> result = dictinfo(dict_)\n >>> print(result)\n '
import utool as ut
if (not isinstance(dict_, dict)):
return ('expected dict got %r' % type(dict_))
keys = list(dict_.keys())
vals = list(dict_.values())
num_keys = len(keys)
key_types = list(set(map(type, keys)))
val_types = list(set(map(type, vals)))
fmtstr_ = ('\n' + ut.unindent('\n * num_keys = {num_keys}\n * key_types = {key_types}\n * val_types = {val_types}\n '.strip('\n')))
if (len(val_types) == 1):
if (val_types[0] == np.ndarray):
val_shape_stats = ut.get_stats(set(map(np.shape, vals)), axis=0)
val_shape_stats_str = ut.repr4(val_shape_stats, strvals=True, newlines=False)
val_dtypes = list(set([val.dtype for val in vals]))
fmtstr_ += ut.unindent('\n * val_shape_stats = {val_shape_stats_str}\n * val_dtypes = {val_dtypes}\n '.strip('\n'))
elif (val_types[0] == list):
val_len_stats = ut.get_stats(set(map(len, vals)))
val_len_stats_str = ut.repr4(val_len_stats, strvals=True, newlines=False)
depth = ut.list_depth(vals)
deep_val_types = list(set(ut.list_deep_types(vals)))
fmtstr_ += ut.unindent('\n * list_depth = {depth}\n * val_len_stats = {val_len_stats_str}\n * deep_types = {deep_val_types}\n '.strip('\n'))
if (len(deep_val_types) == 1):
if (deep_val_types[0] == np.ndarray):
deep_val_dtypes = list(set([val.dtype for val in vals]))
fmtstr_ += ut.unindent('\n * deep_val_dtypes = {deep_val_dtypes}\n ').strip('\n')
elif (val_types[0] in [np.uint8, np.int8, np.int32, np.int64, np.float16, np.float32, np.float64]):
val_stats = ut.get_stats(vals)
fmtstr_ += ut.unindent('\n * val_stats = {val_stats}\n ').strip('\n')
fmtstr = fmtstr_.format(**locals())
return ut.indent(fmtstr)<|docstring|>dictinfo
In depth debugging info
Args:
dict_ (dict):
Returns:
str
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {}
>>> result = dictinfo(dict_)
>>> print(result)<|endoftext|>
|
f545aa873590da90ce41e42147c27ede75f1f5f37ab24754d7a48b8082b03068
|
def dict_find_keys(dict_, val_list):
"\n Args:\n dict_ (dict):\n val_list (list):\n\n Returns:\n dict: found_dict\n\n CommandLine:\n python -m utool.util_dict --test-dict_find_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,\n ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,\n ... 'lsh': 6, 'kdtree_single': 4}\n >>> val_list = [1]\n >>> found_dict = dict_find_keys(dict_, val_list)\n >>> result = ut.repr2(ut.map_vals(sorted, found_dict))\n >>> print(result)\n {1: ['default', 'kdtree']}\n "
found_dict = {search_val: [key for (key, val) in six.iteritems(dict_) if (val == search_val)] for search_val in val_list}
return found_dict
|
Args:
dict_ (dict):
val_list (list):
Returns:
dict: found_dict
CommandLine:
python -m utool.util_dict --test-dict_find_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,
... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,
... 'lsh': 6, 'kdtree_single': 4}
>>> val_list = [1]
>>> found_dict = dict_find_keys(dict_, val_list)
>>> result = ut.repr2(ut.map_vals(sorted, found_dict))
>>> print(result)
{1: ['default', 'kdtree']}
|
utool/util_dict.py
|
dict_find_keys
|
Erotemic/utool
| 8
|
python
|
def dict_find_keys(dict_, val_list):
"\n Args:\n dict_ (dict):\n val_list (list):\n\n Returns:\n dict: found_dict\n\n CommandLine:\n python -m utool.util_dict --test-dict_find_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,\n ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,\n ... 'lsh': 6, 'kdtree_single': 4}\n >>> val_list = [1]\n >>> found_dict = dict_find_keys(dict_, val_list)\n >>> result = ut.repr2(ut.map_vals(sorted, found_dict))\n >>> print(result)\n {1: ['default', 'kdtree']}\n "
found_dict = {search_val: [key for (key, val) in six.iteritems(dict_) if (val == search_val)] for search_val in val_list}
return found_dict
|
def dict_find_keys(dict_, val_list):
"\n Args:\n dict_ (dict):\n val_list (list):\n\n Returns:\n dict: found_dict\n\n CommandLine:\n python -m utool.util_dict --test-dict_find_keys\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,\n ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,\n ... 'lsh': 6, 'kdtree_single': 4}\n >>> val_list = [1]\n >>> found_dict = dict_find_keys(dict_, val_list)\n >>> result = ut.repr2(ut.map_vals(sorted, found_dict))\n >>> print(result)\n {1: ['default', 'kdtree']}\n "
found_dict = {search_val: [key for (key, val) in six.iteritems(dict_) if (val == search_val)] for search_val in val_list}
return found_dict<|docstring|>Args:
dict_ (dict):
val_list (list):
Returns:
dict: found_dict
CommandLine:
python -m utool.util_dict --test-dict_find_keys
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,
... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,
... 'lsh': 6, 'kdtree_single': 4}
>>> val_list = [1]
>>> found_dict = dict_find_keys(dict_, val_list)
>>> result = ut.repr2(ut.map_vals(sorted, found_dict))
>>> print(result)
{1: ['default', 'kdtree']}<|endoftext|>
|
28e0dd9ee8513a688393ef8a0acede353eb02e7195a5b520eeae8637f3046aa1
|
def dict_find_other_sameval_keys(dict_, key):
"\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,\n ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,\n ... 'lsh': 6, 'kdtree_single': 4}\n >>> key = 'default'\n >>> found_dict = dict_find_keys(dict_, val_list)\n "
value = dict_[key]
found_dict = dict_find_keys(dict_, [value])
other_keys = found_dict[value]
other_keys.remove(key)
return other_keys
|
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,
... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,
... 'lsh': 6, 'kdtree_single': 4}
>>> key = 'default'
>>> found_dict = dict_find_keys(dict_, val_list)
|
utool/util_dict.py
|
dict_find_other_sameval_keys
|
Erotemic/utool
| 8
|
python
|
def dict_find_other_sameval_keys(dict_, key):
"\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,\n ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,\n ... 'lsh': 6, 'kdtree_single': 4}\n >>> key = 'default'\n >>> found_dict = dict_find_keys(dict_, val_list)\n "
value = dict_[key]
found_dict = dict_find_keys(dict_, [value])
other_keys = found_dict[value]
other_keys.remove(key)
return other_keys
|
def dict_find_other_sameval_keys(dict_, key):
"\n Example:\n >>> # DISABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,\n ... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,\n ... 'lsh': 6, 'kdtree_single': 4}\n >>> key = 'default'\n >>> found_dict = dict_find_keys(dict_, val_list)\n "
value = dict_[key]
found_dict = dict_find_keys(dict_, [value])
other_keys = found_dict[value]
other_keys.remove(key)
return other_keys<|docstring|>Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> dict_ = {'default': 1, 'hierarchical': 5, 'linear': 0, 'kdtree': 1,
... 'composite': 3, 'autotuned': 255, 'saved': 254, 'kmeans': 2,
... 'lsh': 6, 'kdtree_single': 4}
>>> key = 'default'
>>> found_dict = dict_find_keys(dict_, val_list)<|endoftext|>
|
76b429d059fc17b9009013aa1d809939ed772341bb2eca7fb28338cea2afc008
|
@profile
def dict_hist(item_list, weight_list=None, ordered=False, labels=None):
'\n Builds a histogram of items in item_list\n\n Args:\n item_list (list): list with hashable items (usually containing duplicates)\n\n Returns:\n dict : dictionary where the keys are items in item_list, and the values\n are the number of times the item appears in item_list.\n\n CommandLine:\n python -m utool.util_dict --test-dict_hist\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]\n >>> hist_ = dict_hist(item_list)\n >>> result = ut.repr2(hist_)\n >>> print(result)\n {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}\n '
if (labels is None):
hist_ = defaultdict(int)
else:
hist_ = {k: 0 for k in labels}
if (weight_list is None):
for item in item_list:
hist_[item] += 1
else:
for (item, weight) in zip(item_list, weight_list):
hist_[item] += weight
if ordered:
getval = op.itemgetter(1)
key_order = [key for (key, value) in sorted(hist_.items(), key=getval)]
hist_ = order_dict_by(hist_, key_order)
return hist_
|
Builds a histogram of items in item_list
Args:
item_list (list): list with hashable items (usually containing duplicates)
Returns:
dict : dictionary where the keys are items in item_list, and the values
are the number of times the item appears in item_list.
CommandLine:
python -m utool.util_dict --test-dict_hist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]
>>> hist_ = dict_hist(item_list)
>>> result = ut.repr2(hist_)
>>> print(result)
{1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}
|
utool/util_dict.py
|
dict_hist
|
Erotemic/utool
| 8
|
python
|
@profile
def dict_hist(item_list, weight_list=None, ordered=False, labels=None):
'\n Builds a histogram of items in item_list\n\n Args:\n item_list (list): list with hashable items (usually containing duplicates)\n\n Returns:\n dict : dictionary where the keys are items in item_list, and the values\n are the number of times the item appears in item_list.\n\n CommandLine:\n python -m utool.util_dict --test-dict_hist\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]\n >>> hist_ = dict_hist(item_list)\n >>> result = ut.repr2(hist_)\n >>> print(result)\n {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}\n '
if (labels is None):
hist_ = defaultdict(int)
else:
hist_ = {k: 0 for k in labels}
if (weight_list is None):
for item in item_list:
hist_[item] += 1
else:
for (item, weight) in zip(item_list, weight_list):
hist_[item] += weight
if ordered:
getval = op.itemgetter(1)
key_order = [key for (key, value) in sorted(hist_.items(), key=getval)]
hist_ = order_dict_by(hist_, key_order)
return hist_
|
@profile
def dict_hist(item_list, weight_list=None, ordered=False, labels=None):
'\n Builds a histogram of items in item_list\n\n Args:\n item_list (list): list with hashable items (usually containing duplicates)\n\n Returns:\n dict : dictionary where the keys are items in item_list, and the values\n are the number of times the item appears in item_list.\n\n CommandLine:\n python -m utool.util_dict --test-dict_hist\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]\n >>> hist_ = dict_hist(item_list)\n >>> result = ut.repr2(hist_)\n >>> print(result)\n {1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}\n '
if (labels is None):
hist_ = defaultdict(int)
else:
hist_ = {k: 0 for k in labels}
if (weight_list is None):
for item in item_list:
hist_[item] += 1
else:
for (item, weight) in zip(item_list, weight_list):
hist_[item] += weight
if ordered:
getval = op.itemgetter(1)
key_order = [key for (key, value) in sorted(hist_.items(), key=getval)]
hist_ = order_dict_by(hist_, key_order)
return hist_<|docstring|>Builds a histogram of items in item_list
Args:
item_list (list): list with hashable items (usually containing duplicates)
Returns:
dict : dictionary where the keys are items in item_list, and the values
are the number of times the item appears in item_list.
CommandLine:
python -m utool.util_dict --test-dict_hist
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> item_list = [1, 2, 39, 900, 1232, 900, 1232, 2, 2, 2, 900]
>>> hist_ = dict_hist(item_list)
>>> result = ut.repr2(hist_)
>>> print(result)
{1: 1, 2: 4, 39: 1, 900: 3, 1232: 2}<|endoftext|>
|
d1d75b321839911b568eb5b18c4723f18c84a9e395429a25e9afa8358d26836f
|
def range_hist(items, bins):
"\n Bins items into a discrete histogram by values and/or ranges.\n\n items = [1, 2, 3, 4, 5, 6, 7]\n bins = [0, 1, 2, (3, float('inf'))]\n ut.range_hist(items, bins)\n "
big_hist = ut.dict_hist(items)
hist = ut.odict([(b, 0) for b in bins])
for (k, v) in big_hist.items():
for b in bins:
if isinstance(b, (list, tuple)):
if ((k >= b[0]) and (k < b[1])):
hist[b] += v
elif (k == b):
hist[b] += v
return hist
|
Bins items into a discrete histogram by values and/or ranges.
items = [1, 2, 3, 4, 5, 6, 7]
bins = [0, 1, 2, (3, float('inf'))]
ut.range_hist(items, bins)
|
utool/util_dict.py
|
range_hist
|
Erotemic/utool
| 8
|
python
|
def range_hist(items, bins):
"\n Bins items into a discrete histogram by values and/or ranges.\n\n items = [1, 2, 3, 4, 5, 6, 7]\n bins = [0, 1, 2, (3, float('inf'))]\n ut.range_hist(items, bins)\n "
big_hist = ut.dict_hist(items)
hist = ut.odict([(b, 0) for b in bins])
for (k, v) in big_hist.items():
for b in bins:
if isinstance(b, (list, tuple)):
if ((k >= b[0]) and (k < b[1])):
hist[b] += v
elif (k == b):
hist[b] += v
return hist
|
def range_hist(items, bins):
"\n Bins items into a discrete histogram by values and/or ranges.\n\n items = [1, 2, 3, 4, 5, 6, 7]\n bins = [0, 1, 2, (3, float('inf'))]\n ut.range_hist(items, bins)\n "
big_hist = ut.dict_hist(items)
hist = ut.odict([(b, 0) for b in bins])
for (k, v) in big_hist.items():
for b in bins:
if isinstance(b, (list, tuple)):
if ((k >= b[0]) and (k < b[1])):
hist[b] += v
elif (k == b):
hist[b] += v
return hist<|docstring|>Bins items into a discrete histogram by values and/or ranges.
items = [1, 2, 3, 4, 5, 6, 7]
bins = [0, 1, 2, (3, float('inf'))]
ut.range_hist(items, bins)<|endoftext|>
|
a3a40f80ccedc525cf5b27c89502941a52a9bbdc2611fde35b8c7651a52ca3b8
|
def dict_hist_cumsum(hist_, reverse=True):
' VERY HACKY '
import utool as ut
items = hist_.items()
if reverse:
items = sorted(items)[::(- 1)]
else:
items = sorted(items)
key_list = ut.get_list_column(items, 0)
val_list = ut.get_list_column(items, 1)
cumhist_ = dict(zip(key_list, np.cumsum(val_list)))
return cumhist_
|
VERY HACKY
|
utool/util_dict.py
|
dict_hist_cumsum
|
Erotemic/utool
| 8
|
python
|
def dict_hist_cumsum(hist_, reverse=True):
' '
import utool as ut
items = hist_.items()
if reverse:
items = sorted(items)[::(- 1)]
else:
items = sorted(items)
key_list = ut.get_list_column(items, 0)
val_list = ut.get_list_column(items, 1)
cumhist_ = dict(zip(key_list, np.cumsum(val_list)))
return cumhist_
|
def dict_hist_cumsum(hist_, reverse=True):
' '
import utool as ut
items = hist_.items()
if reverse:
items = sorted(items)[::(- 1)]
else:
items = sorted(items)
key_list = ut.get_list_column(items, 0)
val_list = ut.get_list_column(items, 1)
cumhist_ = dict(zip(key_list, np.cumsum(val_list)))
return cumhist_<|docstring|>VERY HACKY<|endoftext|>
|
73a8a0bc074b89092953fe3ff2aff9d1a80eb2e631470afe9ff6d26bd40d98b4
|
def merge_dicts(*args):
"\n add / concatenate / union / join / merge / combine dictionaries\n\n Copies the first dictionary given and then repeatedly calls update using\n the rest of the dicts given in args. Duplicate keys will receive the last\n value specified the list of dictionaries.\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --test-merge_dicts\n\n References:\n http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> x = {'a': 1, 'b': 2}\n >>> y = {'b': 3, 'c': 4}\n >>> mergedict_ = merge_dicts(x, y)\n >>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)\n >>> print(result)\n {'a': 1, 'b': 3, 'c': 4}\n\n "
iter_ = iter(args)
mergedict_ = six.next(iter_).copy()
for dict_ in iter_:
mergedict_.update(dict_)
return mergedict_
|
add / concatenate / union / join / merge / combine dictionaries
Copies the first dictionary given and then repeatedly calls update using
the rest of the dicts given in args. Duplicate keys will receive the last
value specified the list of dictionaries.
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --test-merge_dicts
References:
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> x = {'a': 1, 'b': 2}
>>> y = {'b': 3, 'c': 4}
>>> mergedict_ = merge_dicts(x, y)
>>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)
>>> print(result)
{'a': 1, 'b': 3, 'c': 4}
|
utool/util_dict.py
|
merge_dicts
|
Erotemic/utool
| 8
|
python
|
def merge_dicts(*args):
"\n add / concatenate / union / join / merge / combine dictionaries\n\n Copies the first dictionary given and then repeatedly calls update using\n the rest of the dicts given in args. Duplicate keys will receive the last\n value specified the list of dictionaries.\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --test-merge_dicts\n\n References:\n http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> x = {'a': 1, 'b': 2}\n >>> y = {'b': 3, 'c': 4}\n >>> mergedict_ = merge_dicts(x, y)\n >>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)\n >>> print(result)\n {'a': 1, 'b': 3, 'c': 4}\n\n "
iter_ = iter(args)
mergedict_ = six.next(iter_).copy()
for dict_ in iter_:
mergedict_.update(dict_)
return mergedict_
|
def merge_dicts(*args):
"\n add / concatenate / union / join / merge / combine dictionaries\n\n Copies the first dictionary given and then repeatedly calls update using\n the rest of the dicts given in args. Duplicate keys will receive the last\n value specified the list of dictionaries.\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --test-merge_dicts\n\n References:\n http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> x = {'a': 1, 'b': 2}\n >>> y = {'b': 3, 'c': 4}\n >>> mergedict_ = merge_dicts(x, y)\n >>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)\n >>> print(result)\n {'a': 1, 'b': 3, 'c': 4}\n\n "
iter_ = iter(args)
mergedict_ = six.next(iter_).copy()
for dict_ in iter_:
mergedict_.update(dict_)
return mergedict_<|docstring|>add / concatenate / union / join / merge / combine dictionaries
Copies the first dictionary given and then repeatedly calls update using
the rest of the dicts given in args. Duplicate keys will receive the last
value specified the list of dictionaries.
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --test-merge_dicts
References:
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> x = {'a': 1, 'b': 2}
>>> y = {'b': 3, 'c': 4}
>>> mergedict_ = merge_dicts(x, y)
>>> result = ut.repr4(mergedict_, sorted_=True, newlines=False)
>>> print(result)
{'a': 1, 'b': 3, 'c': 4}<|endoftext|>
|
7788bd2ee77848cef830a0d3388d49e8e9dbc9f666e597101a054d8ec50cbc95
|
def dict_union3(dict1, dict2, combine_op=op.add):
"\n Args:\n dict1 (dict):\n dict2 (dict):\n combine_op (func): (default=op.add)\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --exec-dict_union3\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n >>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}\n >>> combine_op = op.add\n >>> mergedict_ = dict_union3(dict1, dict2, combine_op)\n >>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))\n >>> print(result)\n mergedict_ = {'a': 1, 'b': 4, 'c': 6, 'd': 9, 'e': 21, 'f': 42}\n "
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
keys3 = keys1.intersection(keys2)
if ((len(keys3) > 0) and (combine_op is None)):
raise AssertionError('Can only combine disjoint dicts when combine_op is None')
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
for key in keys1.difference(keys3):
dict3[key] = dict1[key]
for key in keys2.difference(keys3):
dict3[key] = dict2[key]
return dict3
|
Args:
dict1 (dict):
dict2 (dict):
combine_op (func): (default=op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_union3
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine_op = op.add
>>> mergedict_ = dict_union3(dict1, dict2, combine_op)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'a': 1, 'b': 4, 'c': 6, 'd': 9, 'e': 21, 'f': 42}
|
utool/util_dict.py
|
dict_union3
|
Erotemic/utool
| 8
|
python
|
def dict_union3(dict1, dict2, combine_op=op.add):
"\n Args:\n dict1 (dict):\n dict2 (dict):\n combine_op (func): (default=op.add)\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --exec-dict_union3\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n >>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}\n >>> combine_op = op.add\n >>> mergedict_ = dict_union3(dict1, dict2, combine_op)\n >>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))\n >>> print(result)\n mergedict_ = {'a': 1, 'b': 4, 'c': 6, 'd': 9, 'e': 21, 'f': 42}\n "
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
keys3 = keys1.intersection(keys2)
if ((len(keys3) > 0) and (combine_op is None)):
raise AssertionError('Can only combine disjoint dicts when combine_op is None')
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
for key in keys1.difference(keys3):
dict3[key] = dict1[key]
for key in keys2.difference(keys3):
dict3[key] = dict2[key]
return dict3
|
def dict_union3(dict1, dict2, combine_op=op.add):
"\n Args:\n dict1 (dict):\n dict2 (dict):\n combine_op (func): (default=op.add)\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --exec-dict_union3\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n >>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}\n >>> combine_op = op.add\n >>> mergedict_ = dict_union3(dict1, dict2, combine_op)\n >>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))\n >>> print(result)\n mergedict_ = {'a': 1, 'b': 4, 'c': 6, 'd': 9, 'e': 21, 'f': 42}\n "
keys1 = set(dict1.keys())
keys2 = set(dict2.keys())
keys3 = keys1.intersection(keys2)
if ((len(keys3) > 0) and (combine_op is None)):
raise AssertionError('Can only combine disjoint dicts when combine_op is None')
dict3 = {key: combine_op(dict1[key], dict2[key]) for key in keys3}
for key in keys1.difference(keys3):
dict3[key] = dict1[key]
for key in keys2.difference(keys3):
dict3[key] = dict2[key]
return dict3<|docstring|>Args:
dict1 (dict):
dict2 (dict):
combine_op (func): (default=op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_union3
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine_op = op.add
>>> mergedict_ = dict_union3(dict1, dict2, combine_op)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'a': 1, 'b': 4, 'c': 6, 'd': 9, 'e': 21, 'f': 42}<|endoftext|>
|
43d1d423216fa4b187e5ac2d0740073e3c0f76c2856198ade4e0b2341795d264
|
def dict_intersection(dict1, dict2, combine=False, combine_op=op.add):
"\n Args:\n dict1 (dict):\n dict2 (dict):\n combine (bool): Combines keys only if the values are equal if False else\n values are combined using combine_op (default = False)\n combine_op (func): (default = op.add)\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --exec-dict_intersection\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n >>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}\n >>> combine = False\n >>> mergedict_ = dict_intersection(dict1, dict2, combine)\n >>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))\n >>> print(result)\n mergedict_ = {'b': 2, 'c': 3}\n "
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
if combine:
dict_isect = {k: combine_op(dict1[k], dict2[k]) for k in isect_keys}
else:
if isinstance(dict1, OrderedDict):
isect_keys_ = [k for k in dict1.keys() if (k in isect_keys)]
_dict_cls = OrderedDict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(((k, dict1[k]) for k in isect_keys_ if (dict1[k] == dict2[k])))
return dict_isect
|
Args:
dict1 (dict):
dict2 (dict):
combine (bool): Combines keys only if the values are equal if False else
values are combined using combine_op (default = False)
combine_op (func): (default = op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine = False
>>> mergedict_ = dict_intersection(dict1, dict2, combine)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'b': 2, 'c': 3}
|
utool/util_dict.py
|
dict_intersection
|
Erotemic/utool
| 8
|
python
|
def dict_intersection(dict1, dict2, combine=False, combine_op=op.add):
"\n Args:\n dict1 (dict):\n dict2 (dict):\n combine (bool): Combines keys only if the values are equal if False else\n values are combined using combine_op (default = False)\n combine_op (func): (default = op.add)\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --exec-dict_intersection\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n >>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}\n >>> combine = False\n >>> mergedict_ = dict_intersection(dict1, dict2, combine)\n >>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))\n >>> print(result)\n mergedict_ = {'b': 2, 'c': 3}\n "
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
if combine:
dict_isect = {k: combine_op(dict1[k], dict2[k]) for k in isect_keys}
else:
if isinstance(dict1, OrderedDict):
isect_keys_ = [k for k in dict1.keys() if (k in isect_keys)]
_dict_cls = OrderedDict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(((k, dict1[k]) for k in isect_keys_ if (dict1[k] == dict2[k])))
return dict_isect
|
def dict_intersection(dict1, dict2, combine=False, combine_op=op.add):
"\n Args:\n dict1 (dict):\n dict2 (dict):\n combine (bool): Combines keys only if the values are equal if False else\n values are combined using combine_op (default = False)\n combine_op (func): (default = op.add)\n\n Returns:\n dict: mergedict_\n\n CommandLine:\n python -m utool.util_dict --exec-dict_intersection\n\n Example:\n >>> # ENABLE_DOCTEST\n >>> from utool.util_dict import * # NOQA\n >>> import utool as ut\n >>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}\n >>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}\n >>> combine = False\n >>> mergedict_ = dict_intersection(dict1, dict2, combine)\n >>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))\n >>> print(result)\n mergedict_ = {'b': 2, 'c': 3}\n "
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
if combine:
dict_isect = {k: combine_op(dict1[k], dict2[k]) for k in isect_keys}
else:
if isinstance(dict1, OrderedDict):
isect_keys_ = [k for k in dict1.keys() if (k in isect_keys)]
_dict_cls = OrderedDict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(((k, dict1[k]) for k in isect_keys_ if (dict1[k] == dict2[k])))
return dict_isect<|docstring|>Args:
dict1 (dict):
dict2 (dict):
combine (bool): Combines keys only if the values are equal if False else
values are combined using combine_op (default = False)
combine_op (func): (default = op.add)
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_dict import * # NOQA
>>> import utool as ut
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> combine = False
>>> mergedict_ = dict_intersection(dict1, dict2, combine)
>>> result = ('mergedict_ = %s' % (ut.repr4(mergedict_, nl=False),))
>>> print(result)
mergedict_ = {'b': 2, 'c': 3}<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.